##// END OF EJS Templates
interfaces: mark `completelocalrepository` as a Protocol class...
Matt Harbison -
r53397:3abf9bc1 default
parent child Browse files
Show More
@@ -1,2404 +1,2406
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import annotations
10 10
11 11 import abc
12 12 import typing
13 13
14 14 from typing import (
15 15 Any,
16 16 Callable,
17 17 Collection,
18 18 Iterable,
19 19 Iterator,
20 20 Mapping,
21 21 Protocol,
22 22 Set,
23 23 )
24 24
25 25 from ..i18n import _
26 26 from .. import error
27 27
28 28 if typing.TYPE_CHECKING:
29 29 from typing import (
30 30 ByteString, # TODO: change to Buffer for 3.14
31 31 )
32 32
33 33 # Almost all mercurial modules are only imported in the type checking phase
34 34 # to avoid circular imports
35 35 from .. import (
36 36 match as matchmod,
37 37 pathutil,
38 38 util,
39 39 )
40 40 from ..utils import (
41 41 urlutil,
42 42 )
43 43
44 44 from . import dirstate as intdirstate
45 45
46 46 # TODO: make a protocol class for this
47 47 NodeConstants = Any
48 48
49 49 # TODO: create a Protocol class, since importing uimod here causes a cycle
50 50 # that confuses pytype.
51 51 Ui = Any
52 52
53 53 # TODO: make a protocol class for this
54 54 Vfs = Any
55 55
56 56 # Local repository feature string.
57 57
58 58 # Revlogs are being used for file storage.
59 59 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
60 60 # The storage part of the repository is shared from an external source.
61 61 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
62 62 # LFS supported for backing file storage.
63 63 REPO_FEATURE_LFS = b'lfs'
64 64 # Repository supports being stream cloned.
65 65 REPO_FEATURE_STREAM_CLONE = b'streamclone'
66 66 # Repository supports (at least) some sidedata to be stored
67 67 REPO_FEATURE_SIDE_DATA = b'side-data'
68 68 # Files storage may lack data for all ancestors.
69 69 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
70 70
71 71 REVISION_FLAG_CENSORED = 1 << 15
72 72 REVISION_FLAG_ELLIPSIS = 1 << 14
73 73 REVISION_FLAG_EXTSTORED = 1 << 13
74 74 REVISION_FLAG_HASCOPIESINFO = 1 << 12
75 75
76 76 REVISION_FLAGS_KNOWN = (
77 77 REVISION_FLAG_CENSORED
78 78 | REVISION_FLAG_ELLIPSIS
79 79 | REVISION_FLAG_EXTSTORED
80 80 | REVISION_FLAG_HASCOPIESINFO
81 81 )
82 82
83 83 CG_DELTAMODE_STD = b'default'
84 84 CG_DELTAMODE_PREV = b'previous'
85 85 CG_DELTAMODE_FULL = b'fulltext'
86 86 CG_DELTAMODE_P1 = b'p1'
87 87
88 88
89 89 ## Cache related constants:
90 90 #
91 91 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
92 92
93 93 # Warm branchmaps of all known repoview's filter-level
94 94 CACHE_BRANCHMAP_ALL = b"branchmap-all"
95 95 # Warm branchmaps of repoview's filter-level used by server
96 96 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
97 97 # Warm internal changelog cache (eg: persistent nodemap)
98 98 CACHE_CHANGELOG_CACHE = b"changelog-cache"
99 99 # check of a branchmap can use the "pure topo" mode
100 100 CACHE_BRANCHMAP_DETECT_PURE_TOPO = b"branchmap-detect-pure-topo"
101 101 # Warm full manifest cache
102 102 CACHE_FULL_MANIFEST = b"full-manifest"
103 103 # Warm file-node-tags cache
104 104 CACHE_FILE_NODE_TAGS = b"file-node-tags"
105 105 # Warm internal manifestlog cache (eg: persistent nodemap)
106 106 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
107 107 # Warn rev branch cache
108 108 CACHE_REV_BRANCH = b"rev-branch-cache"
109 109 # Warm tags' cache for default repoview'
110 110 CACHE_TAGS_DEFAULT = b"tags-default"
111 111 # Warm tags' cache for repoview's filter-level used by server
112 112 CACHE_TAGS_SERVED = b"tags-served"
113 113
114 114 # the cache to warm by default after a simple transaction
115 115 # (this is a mutable set to let extension update it)
116 116 CACHES_DEFAULT = {
117 117 CACHE_BRANCHMAP_SERVED,
118 118 }
119 119
120 120 # the caches to warm when warming all of them
121 121 # (this is a mutable set to let extension update it)
122 122 CACHES_ALL = {
123 123 CACHE_BRANCHMAP_SERVED,
124 124 CACHE_BRANCHMAP_ALL,
125 125 CACHE_BRANCHMAP_DETECT_PURE_TOPO,
126 126 CACHE_REV_BRANCH,
127 127 CACHE_CHANGELOG_CACHE,
128 128 CACHE_FILE_NODE_TAGS,
129 129 CACHE_FULL_MANIFEST,
130 130 CACHE_MANIFESTLOG_CACHE,
131 131 CACHE_TAGS_DEFAULT,
132 132 CACHE_TAGS_SERVED,
133 133 }
134 134
135 135 # the cache to warm by default on simple call
136 136 # (this is a mutable set to let extension update it)
137 137 CACHES_POST_CLONE = CACHES_ALL.copy()
138 138 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
139 139 CACHES_POST_CLONE.discard(CACHE_REV_BRANCH)
140 140
141 141
142 142 class _ipeerconnection(Protocol):
143 143 """Represents a "connection" to a repository.
144 144
145 145 This is the base interface for representing a connection to a repository.
146 146 It holds basic properties and methods applicable to all peer types.
147 147
148 148 This is not a complete interface definition and should not be used
149 149 outside of this module.
150 150 """
151 151
152 152 ui: Ui
153 153 """ui.ui instance"""
154 154
155 155 path: urlutil.path | None
156 156 """a urlutil.path instance or None"""
157 157
158 158 @abc.abstractmethod
159 159 def url(self):
160 160 """Returns a URL string representing this peer.
161 161
162 162 Currently, implementations expose the raw URL used to construct the
163 163 instance. It may contain credentials as part of the URL. The
164 164 expectations of the value aren't well-defined and this could lead to
165 165 data leakage.
166 166
167 167 TODO audit/clean consumers and more clearly define the contents of this
168 168 value.
169 169 """
170 170
171 171 @abc.abstractmethod
172 172 def local(self):
173 173 """Returns a local repository instance.
174 174
175 175 If the peer represents a local repository, returns an object that
176 176 can be used to interface with it. Otherwise returns ``None``.
177 177 """
178 178
179 179 @abc.abstractmethod
180 180 def canpush(self):
181 181 """Returns a boolean indicating if this peer can be pushed to."""
182 182
183 183 @abc.abstractmethod
184 184 def close(self):
185 185 """Close the connection to this peer.
186 186
187 187 This is called when the peer will no longer be used. Resources
188 188 associated with the peer should be cleaned up.
189 189 """
190 190
191 191
192 192 class ipeercapabilities(Protocol):
193 193 """Peer sub-interface related to capabilities."""
194 194
195 195 @abc.abstractmethod
196 196 def capable(self, name):
197 197 """Determine support for a named capability.
198 198
199 199 Returns ``False`` if capability not supported.
200 200
201 201 Returns ``True`` if boolean capability is supported. Returns a string
202 202 if capability support is non-boolean.
203 203
204 204 Capability strings may or may not map to wire protocol capabilities.
205 205 """
206 206
207 207 @abc.abstractmethod
208 208 def requirecap(self, name, purpose):
209 209 """Require a capability to be present.
210 210
211 211 Raises a ``CapabilityError`` if the capability isn't present.
212 212 """
213 213
214 214
215 215 class ipeercommands(Protocol):
216 216 """Client-side interface for communicating over the wire protocol.
217 217
218 218 This interface is used as a gateway to the Mercurial wire protocol.
219 219 methods commonly call wire protocol commands of the same name.
220 220 """
221 221
222 222 @abc.abstractmethod
223 223 def branchmap(self):
224 224 """Obtain heads in named branches.
225 225
226 226 Returns a dict mapping branch name to an iterable of nodes that are
227 227 heads on that branch.
228 228 """
229 229
230 230 @abc.abstractmethod
231 231 def capabilities(self):
232 232 """Obtain capabilities of the peer.
233 233
234 234 Returns a set of string capabilities.
235 235 """
236 236
237 237 @abc.abstractmethod
238 238 def get_cached_bundle_inline(self, path):
239 239 """Retrieve a clonebundle across the wire.
240 240
241 241 Returns a chunkbuffer
242 242 """
243 243
244 244 @abc.abstractmethod
245 245 def clonebundles(self):
246 246 """Obtains the clone bundles manifest for the repo.
247 247
248 248 Returns the manifest as unparsed bytes.
249 249 """
250 250
251 251 @abc.abstractmethod
252 252 def debugwireargs(self, one, two, three=None, four=None, five=None):
253 253 """Used to facilitate debugging of arguments passed over the wire."""
254 254
255 255 @abc.abstractmethod
256 256 def getbundle(self, source, **kwargs):
257 257 """Obtain remote repository data as a bundle.
258 258
259 259 This command is how the bulk of repository data is transferred from
260 260 the peer to the local repository
261 261
262 262 Returns a generator of bundle data.
263 263 """
264 264
265 265 @abc.abstractmethod
266 266 def heads(self):
267 267 """Determine all known head revisions in the peer.
268 268
269 269 Returns an iterable of binary nodes.
270 270 """
271 271
272 272 @abc.abstractmethod
273 273 def known(self, nodes):
274 274 """Determine whether multiple nodes are known.
275 275
276 276 Accepts an iterable of nodes whose presence to check for.
277 277
278 278 Returns an iterable of booleans indicating of the corresponding node
279 279 at that index is known to the peer.
280 280 """
281 281
282 282 @abc.abstractmethod
283 283 def listkeys(self, namespace):
284 284 """Obtain all keys in a pushkey namespace.
285 285
286 286 Returns an iterable of key names.
287 287 """
288 288
289 289 @abc.abstractmethod
290 290 def lookup(self, key):
291 291 """Resolve a value to a known revision.
292 292
293 293 Returns a binary node of the resolved revision on success.
294 294 """
295 295
296 296 @abc.abstractmethod
297 297 def pushkey(self, namespace, key, old, new):
298 298 """Set a value using the ``pushkey`` protocol.
299 299
300 300 Arguments correspond to the pushkey namespace and key to operate on and
301 301 the old and new values for that key.
302 302
303 303 Returns a string with the peer result. The value inside varies by the
304 304 namespace.
305 305 """
306 306
307 307 @abc.abstractmethod
308 308 def stream_out(self):
309 309 """Obtain streaming clone data.
310 310
311 311 Successful result should be a generator of data chunks.
312 312 """
313 313
314 314 @abc.abstractmethod
315 315 def unbundle(self, bundle, heads, url):
316 316 """Transfer repository data to the peer.
317 317
318 318 This is how the bulk of data during a push is transferred.
319 319
320 320 Returns the integer number of heads added to the peer.
321 321 """
322 322
323 323
324 324 class ipeerlegacycommands(Protocol):
325 325 """Interface for implementing support for legacy wire protocol commands.
326 326
327 327 Wire protocol commands transition to legacy status when they are no longer
328 328 used by modern clients. To facilitate identifying which commands are
329 329 legacy, the interfaces are split.
330 330 """
331 331
332 332 @abc.abstractmethod
333 333 def between(self, pairs):
334 334 """Obtain nodes between pairs of nodes.
335 335
336 336 ``pairs`` is an iterable of node pairs.
337 337
338 338 Returns an iterable of iterables of nodes corresponding to each
339 339 requested pair.
340 340 """
341 341
342 342 @abc.abstractmethod
343 343 def branches(self, nodes):
344 344 """Obtain ancestor changesets of specific nodes back to a branch point.
345 345
346 346 For each requested node, the peer finds the first ancestor node that is
347 347 a DAG root or is a merge.
348 348
349 349 Returns an iterable of iterables with the resolved values for each node.
350 350 """
351 351
352 352 @abc.abstractmethod
353 353 def changegroup(self, nodes, source):
354 354 """Obtain a changegroup with data for descendants of specified nodes."""
355 355
356 356 @abc.abstractmethod
357 357 def changegroupsubset(self, bases, heads, source):
358 358 pass
359 359
360 360
361 361 class ipeercommandexecutor(Protocol):
362 362 """Represents a mechanism to execute remote commands.
363 363
364 364 This is the primary interface for requesting that wire protocol commands
365 365 be executed. Instances of this interface are active in a context manager
366 366 and have a well-defined lifetime. When the context manager exits, all
367 367 outstanding requests are waited on.
368 368 """
369 369
370 370 @abc.abstractmethod
371 371 def callcommand(self, name, args):
372 372 """Request that a named command be executed.
373 373
374 374 Receives the command name and a dictionary of command arguments.
375 375
376 376 Returns a ``concurrent.futures.Future`` that will resolve to the
377 377 result of that command request. That exact value is left up to
378 378 the implementation and possibly varies by command.
379 379
380 380 Not all commands can coexist with other commands in an executor
381 381 instance: it depends on the underlying wire protocol transport being
382 382 used and the command itself.
383 383
384 384 Implementations MAY call ``sendcommands()`` automatically if the
385 385 requested command can not coexist with other commands in this executor.
386 386
387 387 Implementations MAY call ``sendcommands()`` automatically when the
388 388 future's ``result()`` is called. So, consumers using multiple
389 389 commands with an executor MUST ensure that ``result()`` is not called
390 390 until all command requests have been issued.
391 391 """
392 392
393 393 @abc.abstractmethod
394 394 def sendcommands(self):
395 395 """Trigger submission of queued command requests.
396 396
397 397 Not all transports submit commands as soon as they are requested to
398 398 run. When called, this method forces queued command requests to be
399 399 issued. It will no-op if all commands have already been sent.
400 400
401 401 When called, no more new commands may be issued with this executor.
402 402 """
403 403
404 404 @abc.abstractmethod
405 405 def close(self):
406 406 """Signal that this command request is finished.
407 407
408 408 When called, no more new commands may be issued. All outstanding
409 409 commands that have previously been issued are waited on before
410 410 returning. This not only includes waiting for the futures to resolve,
411 411 but also waiting for all response data to arrive. In other words,
412 412 calling this waits for all on-wire state for issued command requests
413 413 to finish.
414 414
415 415 When used as a context manager, this method is called when exiting the
416 416 context manager.
417 417
418 418 This method may call ``sendcommands()`` if there are buffered commands.
419 419 """
420 420
421 421
422 422 class ipeerrequests(Protocol):
423 423 """Interface for executing commands on a peer."""
424 424
425 425 limitedarguments: bool
426 426 """True if the peer cannot receive large argument value for commands."""
427 427
428 428 @abc.abstractmethod
429 429 def commandexecutor(self):
430 430 """A context manager that resolves to an ipeercommandexecutor.
431 431
432 432 The object this resolves to can be used to issue command requests
433 433 to the peer.
434 434
435 435 Callers should call its ``callcommand`` method to issue command
436 436 requests.
437 437
438 438 A new executor should be obtained for each distinct set of commands
439 439 (possibly just a single command) that the consumer wants to execute
440 440 as part of a single operation or round trip. This is because some
441 441 peers are half-duplex and/or don't support persistent connections.
442 442 e.g. in the case of HTTP peers, commands sent to an executor represent
443 443 a single HTTP request. While some peers may support multiple command
444 444 sends over the wire per executor, consumers need to code to the least
445 445 capable peer. So it should be assumed that command executors buffer
446 446 called commands until they are told to send them and that each
447 447 command executor could result in a new connection or wire-level request
448 448 being issued.
449 449 """
450 450
451 451
452 452 class peer(_ipeerconnection, ipeercapabilities, ipeerrequests, Protocol):
453 453 """Unified interface for peer repositories.
454 454
455 455 All peer instances must conform to this interface.
456 456 """
457 457
458 458 limitedarguments: bool = False
459 459
460 460 def __init__(self, ui, path=None, remotehidden=False):
461 461 self.ui = ui
462 462 self.path = path
463 463
464 464 def capable(self, name):
465 465 # TODO: this class should maybe subclass ipeercommands too, otherwise it
466 466 # is assuming whatever uses this as a mixin also has this interface.
467 467 caps = self.capabilities() # pytype: disable=attribute-error
468 468 if name in caps:
469 469 return True
470 470
471 471 name = b'%s=' % name
472 472 for cap in caps:
473 473 if cap.startswith(name):
474 474 return cap[len(name) :]
475 475
476 476 return False
477 477
478 478 def requirecap(self, name, purpose):
479 479 if self.capable(name):
480 480 return
481 481
482 482 raise error.CapabilityError(
483 483 _(
484 484 b'cannot %s; remote repository does not support the '
485 485 b'\'%s\' capability'
486 486 )
487 487 % (purpose, name)
488 488 )
489 489
490 490
491 491 class iverifyproblem(Protocol):
492 492 """Represents a problem with the integrity of the repository.
493 493
494 494 Instances of this interface are emitted to describe an integrity issue
495 495 with a repository (e.g. corrupt storage, missing data, etc).
496 496
497 497 Instances are essentially messages associated with severity.
498 498 """
499 499
500 500 warning: bytes | None
501 501 """Message indicating a non-fatal problem."""
502 502
503 503 error: bytes | None
504 504 """Message indicating a fatal problem."""
505 505
506 506 node: bytes | None
507 507 """Revision encountering the problem.
508 508
509 509 ``None`` means the problem doesn't apply to a single revision.
510 510 """
511 511
512 512
513 513 class irevisiondelta(Protocol):
514 514 """Represents a delta between one revision and another.
515 515
516 516 Instances convey enough information to allow a revision to be exchanged
517 517 with another repository.
518 518
519 519 Instances represent the fulltext revision data or a delta against
520 520 another revision. Therefore the ``revision`` and ``delta`` attributes
521 521 are mutually exclusive.
522 522
523 523 Typically used for changegroup generation.
524 524 """
525 525
526 526 node: bytes
527 527 """20 byte node of this revision."""
528 528
529 529 p1node: bytes
530 530 """20 byte node of 1st parent of this revision."""
531 531
532 532 p2node: bytes
533 533 """20 byte node of 2nd parent of this revision."""
534 534
535 535 # TODO: is this really optional? revlog.revlogrevisiondelta defaults to None
536 536 linknode: bytes | None
537 537 """20 byte node of the changelog revision this node is linked to."""
538 538
539 539 flags: int
540 540 """2 bytes of integer flags that apply to this revision.
541 541
542 542 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
543 543 """
544 544
545 545 basenode: bytes
546 546 """20 byte node of the revision this data is a delta against.
547 547
548 548 ``nullid`` indicates that the revision is a full revision and not
549 549 a delta.
550 550 """
551 551
552 552 baserevisionsize: int | None
553 553 """Size of base revision this delta is against.
554 554
555 555 May be ``None`` if ``basenode`` is ``nullid``.
556 556 """
557 557
558 558 # TODO: is this really optional? (Seems possible in
559 559 # storageutil.emitrevisions()).
560 560 revision: bytes | None
561 561 """Raw fulltext of revision data for this node."""
562 562
563 563 delta: bytes | None
564 564 """Delta between ``basenode`` and ``node``.
565 565
566 566 Stored in the bdiff delta format.
567 567 """
568 568
569 569 sidedata: bytes | None
570 570 """Raw sidedata bytes for the given revision."""
571 571
572 572 protocol_flags: int
573 573 """Single byte of integer flags that can influence the protocol.
574 574
575 575 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
576 576 """
577 577
578 578
579 579 class ifilerevisionssequence(Protocol):
580 580 """Contains index data for all revisions of a file.
581 581
582 582 Types implementing this behave like lists of tuples. The index
583 583 in the list corresponds to the revision number. The values contain
584 584 index metadata.
585 585
586 586 The *null* revision (revision number -1) is always the last item
587 587 in the index.
588 588 """
589 589
590 590 @abc.abstractmethod
591 591 def __len__(self):
592 592 """The total number of revisions."""
593 593
594 594 @abc.abstractmethod
595 595 def __getitem__(self, rev):
596 596 """Returns the object having a specific revision number.
597 597
598 598 Returns an 8-tuple with the following fields:
599 599
600 600 offset+flags
601 601 Contains the offset and flags for the revision. 64-bit unsigned
602 602 integer where first 6 bytes are the offset and the next 2 bytes
603 603 are flags. The offset can be 0 if it is not used by the store.
604 604 compressed size
605 605 Size of the revision data in the store. It can be 0 if it isn't
606 606 needed by the store.
607 607 uncompressed size
608 608 Fulltext size. It can be 0 if it isn't needed by the store.
609 609 base revision
610 610 Revision number of revision the delta for storage is encoded
611 611 against. -1 indicates not encoded against a base revision.
612 612 link revision
613 613 Revision number of changelog revision this entry is related to.
614 614 p1 revision
615 615 Revision number of 1st parent. -1 if no 1st parent.
616 616 p2 revision
617 617 Revision number of 2nd parent. -1 if no 1st parent.
618 618 node
619 619 Binary node value for this revision number.
620 620
621 621 Negative values should index off the end of the sequence. ``-1``
622 622 should return the null revision. ``-2`` should return the most
623 623 recent revision.
624 624 """
625 625
626 626 @abc.abstractmethod
627 627 def __contains__(self, rev):
628 628 """Whether a revision number exists."""
629 629
630 630 @abc.abstractmethod
631 631 def insert(self, i, entry):
632 632 """Add an item to the index at specific revision."""
633 633
634 634
635 635 class ifileindex(Protocol):
636 636 """Storage interface for index data of a single file.
637 637
638 638 File storage data is divided into index metadata and data storage.
639 639 This interface defines the index portion of the interface.
640 640
641 641 The index logically consists of:
642 642
643 643 * A mapping between revision numbers and nodes.
644 644 * DAG data (storing and querying the relationship between nodes).
645 645 * Metadata to facilitate storage.
646 646 """
647 647
648 648 nullid: bytes
649 649 """node for the null revision for use as delta base."""
650 650
651 651 @abc.abstractmethod
652 652 def __len__(self) -> int:
653 653 """Obtain the number of revisions stored for this file."""
654 654
655 655 @abc.abstractmethod
656 656 def __iter__(self) -> Iterator[int]:
657 657 """Iterate over revision numbers for this file."""
658 658
659 659 @abc.abstractmethod
660 660 def hasnode(self, node):
661 661 """Returns a bool indicating if a node is known to this store.
662 662
663 663 Implementations must only return True for full, binary node values:
664 664 hex nodes, revision numbers, and partial node matches must be
665 665 rejected.
666 666
667 667 The null node is never present.
668 668 """
669 669
670 670 @abc.abstractmethod
671 671 def revs(self, start=0, stop=None):
672 672 """Iterate over revision numbers for this file, with control."""
673 673
674 674 @abc.abstractmethod
675 675 def parents(self, node):
676 676 """Returns a 2-tuple of parent nodes for a revision.
677 677
678 678 Values will be ``nullid`` if the parent is empty.
679 679 """
680 680
681 681 @abc.abstractmethod
682 682 def parentrevs(self, rev):
683 683 """Like parents() but operates on revision numbers."""
684 684
685 685 @abc.abstractmethod
686 686 def rev(self, node):
687 687 """Obtain the revision number given a node.
688 688
689 689 Raises ``error.LookupError`` if the node is not known.
690 690 """
691 691
692 692 @abc.abstractmethod
693 693 def node(self, rev):
694 694 """Obtain the node value given a revision number.
695 695
696 696 Raises ``IndexError`` if the node is not known.
697 697 """
698 698
699 699 @abc.abstractmethod
700 700 def lookup(self, node):
701 701 """Attempt to resolve a value to a node.
702 702
703 703 Value can be a binary node, hex node, revision number, or a string
704 704 that can be converted to an integer.
705 705
706 706 Raises ``error.LookupError`` if a node could not be resolved.
707 707 """
708 708
709 709 @abc.abstractmethod
710 710 def linkrev(self, rev):
711 711 """Obtain the changeset revision number a revision is linked to."""
712 712
713 713 @abc.abstractmethod
714 714 def iscensored(self, rev):
715 715 """Return whether a revision's content has been censored."""
716 716
717 717 @abc.abstractmethod
718 718 def commonancestorsheads(self, node1, node2):
719 719 """Obtain an iterable of nodes containing heads of common ancestors.
720 720
721 721 See ``ancestor.commonancestorsheads()``.
722 722 """
723 723
724 724 @abc.abstractmethod
725 725 def descendants(self, revs):
726 726 """Obtain descendant revision numbers for a set of revision numbers.
727 727
728 728 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
729 729 """
730 730
731 731 @abc.abstractmethod
732 732 def heads(self, start=None, stop=None):
733 733 """Obtain a list of nodes that are DAG heads, with control.
734 734
735 735 The set of revisions examined can be limited by specifying
736 736 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
737 737 iterable of nodes. DAG traversal starts at earlier revision
738 738 ``start`` and iterates forward until any node in ``stop`` is
739 739 encountered.
740 740 """
741 741
742 742 @abc.abstractmethod
743 743 def children(self, node):
744 744 """Obtain nodes that are children of a node.
745 745
746 746 Returns a list of nodes.
747 747 """
748 748
749 749
750 750 class ifiledata(Protocol):
751 751 """Storage interface for data storage of a specific file.
752 752
753 753 This complements ``ifileindex`` and provides an interface for accessing
754 754 data for a tracked file.
755 755 """
756 756
757 757 @abc.abstractmethod
758 758 def size(self, rev):
759 759 """Obtain the fulltext size of file data.
760 760
761 761 Any metadata is excluded from size measurements.
762 762 """
763 763
764 764 @abc.abstractmethod
765 765 def revision(self, node):
766 766 """Obtain fulltext data for a node.
767 767
768 768 By default, any storage transformations are applied before the data
769 769 is returned. If ``raw`` is True, non-raw storage transformations
770 770 are not applied.
771 771
772 772 The fulltext data may contain a header containing metadata. Most
773 773 consumers should use ``read()`` to obtain the actual file data.
774 774 """
775 775
776 776 @abc.abstractmethod
777 777 def rawdata(self, node):
778 778 """Obtain raw data for a node."""
779 779
780 780 @abc.abstractmethod
781 781 def read(self, node):
782 782 """Resolve file fulltext data.
783 783
784 784 This is similar to ``revision()`` except any metadata in the data
785 785 headers is stripped.
786 786 """
787 787
788 788 @abc.abstractmethod
789 789 def renamed(self, node):
790 790 """Obtain copy metadata for a node.
791 791
792 792 Returns ``False`` if no copy metadata is stored or a 2-tuple of
793 793 (path, node) from which this revision was copied.
794 794 """
795 795
796 796 @abc.abstractmethod
797 797 def cmp(self, node, fulltext):
798 798 """Compare fulltext to another revision.
799 799
800 800 Returns True if the fulltext is different from what is stored.
801 801
802 802 This takes copy metadata into account.
803 803
804 804 TODO better document the copy metadata and censoring logic.
805 805 """
806 806
807 807 @abc.abstractmethod
808 808 def emitrevisions(
809 809 self,
810 810 nodes,
811 811 nodesorder=None,
812 812 revisiondata=False,
813 813 assumehaveparentrevisions=False,
814 814 deltamode=CG_DELTAMODE_STD,
815 815 ):
816 816 """Produce ``irevisiondelta`` for revisions.
817 817
818 818 Given an iterable of nodes, emits objects conforming to the
819 819 ``irevisiondelta`` interface that describe revisions in storage.
820 820
821 821 This method is a generator.
822 822
823 823 The input nodes may be unordered. Implementations must ensure that a
824 824 node's parents are emitted before the node itself. Transitively, this
825 825 means that a node may only be emitted once all its ancestors in
826 826 ``nodes`` have also been emitted.
827 827
828 828 By default, emits "index" data (the ``node``, ``p1node``, and
829 829 ``p2node`` attributes). If ``revisiondata`` is set, revision data
830 830 will also be present on the emitted objects.
831 831
832 832 With default argument values, implementations can choose to emit
833 833 either fulltext revision data or a delta. When emitting deltas,
834 834 implementations must consider whether the delta's base revision
835 835 fulltext is available to the receiver.
836 836
837 837 The base revision fulltext is guaranteed to be available if any of
838 838 the following are met:
839 839
840 840 * Its fulltext revision was emitted by this method call.
841 841 * A delta for that revision was emitted by this method call.
842 842 * ``assumehaveparentrevisions`` is True and the base revision is a
843 843 parent of the node.
844 844
845 845 ``nodesorder`` can be used to control the order that revisions are
846 846 emitted. By default, revisions can be reordered as long as they are
847 847 in DAG topological order (see above). If the value is ``nodes``,
848 848 the iteration order from ``nodes`` should be used. If the value is
849 849 ``storage``, then the native order from the backing storage layer
850 850 is used. (Not all storage layers will have strong ordering and behavior
851 851 of this mode is storage-dependent.) ``nodes`` ordering can force
852 852 revisions to be emitted before their ancestors, so consumers should
853 853 use it with care.
854 854
855 855 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
856 856 be set and it is the caller's responsibility to resolve it, if needed.
857 857
858 858 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
859 859 all revision data should be emitted as deltas against the revision
860 860 emitted just prior. The initial revision should be a delta against its
861 861 1st parent.
862 862 """
863 863
864 864
865 865 class ifilemutation(Protocol):
866 866 """Storage interface for mutation events of a tracked file."""
867 867
868 868 @abc.abstractmethod
869 869 def add(self, filedata, meta, transaction, linkrev, p1, p2):
870 870 """Add a new revision to the store.
871 871
872 872 Takes file data, dictionary of metadata, a transaction, linkrev,
873 873 and parent nodes.
874 874
875 875 Returns the node that was added.
876 876
877 877 May no-op if a revision matching the supplied data is already stored.
878 878 """
879 879
880 880 @abc.abstractmethod
881 881 def addrevision(
882 882 self,
883 883 revisiondata,
884 884 transaction,
885 885 linkrev,
886 886 p1,
887 887 p2,
888 888 node=None,
889 889 flags=0,
890 890 cachedelta=None,
891 891 ):
892 892 """Add a new revision to the store and return its number.
893 893
894 894 This is similar to ``add()`` except it operates at a lower level.
895 895
896 896 The data passed in already contains a metadata header, if any.
897 897
898 898 ``node`` and ``flags`` can be used to define the expected node and
899 899 the flags to use with storage. ``flags`` is a bitwise value composed
900 900 of the various ``REVISION_FLAG_*`` constants.
901 901
902 902 ``add()`` is usually called when adding files from e.g. the working
903 903 directory. ``addrevision()`` is often called by ``add()`` and for
904 904 scenarios where revision data has already been computed, such as when
905 905 applying raw data from a peer repo.
906 906 """
907 907
908 908 @abc.abstractmethod
909 909 def addgroup(
910 910 self,
911 911 deltas,
912 912 linkmapper,
913 913 transaction,
914 914 addrevisioncb=None,
915 915 duplicaterevisioncb=None,
916 916 maybemissingparents=False,
917 917 ):
918 918 """Process a series of deltas for storage.
919 919
920 920 ``deltas`` is an iterable of 7-tuples of
921 921 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
922 922 to add.
923 923
924 924 The ``delta`` field contains ``mpatch`` data to apply to a base
925 925 revision, identified by ``deltabase``. The base node can be
926 926 ``nullid``, in which case the header from the delta can be ignored
927 927 and the delta used as the fulltext.
928 928
929 929 ``alwayscache`` instructs the lower layers to cache the content of the
930 930 newly added revision, even if it needs to be explicitly computed.
931 931 This used to be the default when ``addrevisioncb`` was provided up to
932 932 Mercurial 5.8.
933 933
934 934 ``addrevisioncb`` should be called for each new rev as it is committed.
935 935 ``duplicaterevisioncb`` should be called for all revs with a
936 936 pre-existing node.
937 937
938 938 ``maybemissingparents`` is a bool indicating whether the incoming
939 939 data may reference parents/ancestor revisions that aren't present.
940 940 This flag is set when receiving data into a "shallow" store that
941 941 doesn't hold all history.
942 942
943 943 Returns a list of nodes that were processed. A node will be in the list
944 944 even if it existed in the store previously.
945 945 """
946 946
947 947 @abc.abstractmethod
948 948 def censorrevision(self, tr, node, tombstone=b''):
949 949 """Remove the content of a single revision.
950 950
951 951 The specified ``node`` will have its content purged from storage.
952 952 Future attempts to access the revision data for this node will
953 953 result in failure.
954 954
955 955 A ``tombstone`` message can optionally be stored. This message may be
956 956 displayed to users when they attempt to access the missing revision
957 957 data.
958 958
959 959 Storage backends may have stored deltas against the previous content
960 960 in this revision. As part of censoring a revision, these storage
961 961 backends are expected to rewrite any internally stored deltas such
962 962 that they no longer reference the deleted content.
963 963 """
964 964
965 965 @abc.abstractmethod
966 966 def getstrippoint(self, minlink):
967 967 """Find the minimum revision that must be stripped to strip a linkrev.
968 968
969 969 Returns a 2-tuple containing the minimum revision number and a set
970 970 of all revisions numbers that would be broken by this strip.
971 971
972 972 TODO this is highly revlog centric and should be abstracted into
973 973 a higher-level deletion API. ``repair.strip()`` relies on this.
974 974 """
975 975
976 976 @abc.abstractmethod
977 977 def strip(self, minlink, transaction):
978 978 """Remove storage of items starting at a linkrev.
979 979
980 980 This uses ``getstrippoint()`` to determine the first node to remove.
981 981 Then it effectively truncates storage for all revisions after that.
982 982
983 983 TODO this is highly revlog centric and should be abstracted into a
984 984 higher-level deletion API.
985 985 """
986 986
987 987
988 988 class ifilestorage(ifileindex, ifiledata, ifilemutation, Protocol):
989 989 """Complete storage interface for a single tracked file."""
990 990
991 991 @abc.abstractmethod
992 992 def files(self):
993 993 """Obtain paths that are backing storage for this file.
994 994
995 995 TODO this is used heavily by verify code and there should probably
996 996 be a better API for that.
997 997 """
998 998
999 999 @abc.abstractmethod
1000 1000 def storageinfo(
1001 1001 self,
1002 1002 exclusivefiles=False,
1003 1003 sharedfiles=False,
1004 1004 revisionscount=False,
1005 1005 trackedsize=False,
1006 1006 storedsize=False,
1007 1007 ):
1008 1008 """Obtain information about storage for this file's data.
1009 1009
1010 1010 Returns a dict describing storage for this tracked path. The keys
1011 1011 in the dict map to arguments of the same. The arguments are bools
1012 1012 indicating whether to calculate and obtain that data.
1013 1013
1014 1014 exclusivefiles
1015 1015 Iterable of (vfs, path) describing files that are exclusively
1016 1016 used to back storage for this tracked path.
1017 1017
1018 1018 sharedfiles
1019 1019 Iterable of (vfs, path) describing files that are used to back
1020 1020 storage for this tracked path. Those files may also provide storage
1021 1021 for other stored entities.
1022 1022
1023 1023 revisionscount
1024 1024 Number of revisions available for retrieval.
1025 1025
1026 1026 trackedsize
1027 1027 Total size in bytes of all tracked revisions. This is a sum of the
1028 1028 length of the fulltext of all revisions.
1029 1029
1030 1030 storedsize
1031 1031 Total size in bytes used to store data for all tracked revisions.
1032 1032 This is commonly less than ``trackedsize`` due to internal usage
1033 1033 of deltas rather than fulltext revisions.
1034 1034
1035 1035 Not all storage backends may support all queries are have a reasonable
1036 1036 value to use. In that case, the value should be set to ``None`` and
1037 1037 callers are expected to handle this special value.
1038 1038 """
1039 1039
1040 1040 @abc.abstractmethod
1041 1041 def verifyintegrity(self, state) -> Iterable[iverifyproblem]:
1042 1042 """Verifies the integrity of file storage.
1043 1043
1044 1044 ``state`` is a dict holding state of the verifier process. It can be
1045 1045 used to communicate data between invocations of multiple storage
1046 1046 primitives.
1047 1047
1048 1048 If individual revisions cannot have their revision content resolved,
1049 1049 the method is expected to set the ``skipread`` key to a set of nodes
1050 1050 that encountered problems. If set, the method can also add the node(s)
1051 1051 to ``safe_renamed`` in order to indicate nodes that may perform the
1052 1052 rename checks with currently accessible data.
1053 1053
1054 1054 The method yields objects conforming to the ``iverifyproblem``
1055 1055 interface.
1056 1056 """
1057 1057
1058 1058
1059 1059 class idirs(Protocol):
1060 1060 """Interface representing a collection of directories from paths.
1061 1061
1062 1062 This interface is essentially a derived data structure representing
1063 1063 directories from a collection of paths.
1064 1064 """
1065 1065
1066 1066 @abc.abstractmethod
1067 1067 def addpath(self, path):
1068 1068 """Add a path to the collection.
1069 1069
1070 1070 All directories in the path will be added to the collection.
1071 1071 """
1072 1072
1073 1073 @abc.abstractmethod
1074 1074 def delpath(self, path):
1075 1075 """Remove a path from the collection.
1076 1076
1077 1077 If the removal was the last path in a particular directory, the
1078 1078 directory is removed from the collection.
1079 1079 """
1080 1080
1081 1081 @abc.abstractmethod
1082 1082 def __iter__(self):
1083 1083 """Iterate over the directories in this collection of paths."""
1084 1084
1085 1085 @abc.abstractmethod
1086 1086 def __contains__(self, path):
1087 1087 """Whether a specific directory is in this collection."""
1088 1088
1089 1089
1090 1090 class imanifestdict(Protocol):
1091 1091 """Interface representing a manifest data structure.
1092 1092
1093 1093 A manifest is effectively a dict mapping paths to entries. Each entry
1094 1094 consists of a binary node and extra flags affecting that entry.
1095 1095 """
1096 1096
1097 1097 @abc.abstractmethod
1098 1098 def __getitem__(self, key: bytes) -> bytes:
1099 1099 """Returns the binary node value for a path in the manifest.
1100 1100
1101 1101 Raises ``KeyError`` if the path does not exist in the manifest.
1102 1102
1103 1103 Equivalent to ``self.find(path)[0]``.
1104 1104 """
1105 1105
1106 1106 @abc.abstractmethod
1107 1107 def find(self, path: bytes) -> tuple[bytes, bytes]:
1108 1108 """Returns the entry for a path in the manifest.
1109 1109
1110 1110 Returns a 2-tuple of (node, flags).
1111 1111
1112 1112 Raises ``KeyError`` if the path does not exist in the manifest.
1113 1113 """
1114 1114
1115 1115 @abc.abstractmethod
1116 1116 def __len__(self) -> int:
1117 1117 """Return the number of entries in the manifest."""
1118 1118
1119 1119 @abc.abstractmethod
1120 1120 def __nonzero__(self) -> bool:
1121 1121 """Returns True if the manifest has entries, False otherwise."""
1122 1122
1123 1123 __bool__ = __nonzero__
1124 1124
1125 1125 @abc.abstractmethod
1126 1126 def set(self, path: bytes, node: bytes, flags: bytes) -> None:
1127 1127 """Define the node value and flags for a path in the manifest.
1128 1128
1129 1129 Equivalent to __setitem__ followed by setflag, but can be more efficient.
1130 1130 """
1131 1131
1132 1132 @abc.abstractmethod
1133 1133 def __setitem__(self, path: bytes, node: bytes) -> None:
1134 1134 """Define the node value for a path in the manifest.
1135 1135
1136 1136 If the path is already in the manifest, its flags will be copied to
1137 1137 the new entry.
1138 1138 """
1139 1139
1140 1140 @abc.abstractmethod
1141 1141 def __contains__(self, path: bytes) -> bool:
1142 1142 """Whether a path exists in the manifest."""
1143 1143
1144 1144 @abc.abstractmethod
1145 1145 def __delitem__(self, path: bytes) -> None:
1146 1146 """Remove a path from the manifest.
1147 1147
1148 1148 Raises ``KeyError`` if the path is not in the manifest.
1149 1149 """
1150 1150
1151 1151 @abc.abstractmethod
1152 1152 def __iter__(self) -> Iterator[bytes]:
1153 1153 """Iterate over paths in the manifest."""
1154 1154
1155 1155 @abc.abstractmethod
1156 1156 def iterkeys(self) -> Iterator[bytes]:
1157 1157 """Iterate over paths in the manifest."""
1158 1158
1159 1159 @abc.abstractmethod
1160 1160 def keys(self) -> list[bytes]:
1161 1161 """Obtain a list of paths in the manifest."""
1162 1162
1163 1163 @abc.abstractmethod
1164 1164 def filesnotin(self, other, match=None) -> Set[bytes]:
1165 1165 """Obtain the set of paths in this manifest but not in another.
1166 1166
1167 1167 ``match`` is an optional matcher function to be applied to both
1168 1168 manifests.
1169 1169
1170 1170 Returns a set of paths.
1171 1171 """
1172 1172
1173 1173 @abc.abstractmethod
1174 1174 def dirs(self) -> pathutil.dirs:
1175 1175 """Returns an object implementing the ``idirs`` interface."""
1176 1176
1177 1177 @abc.abstractmethod
1178 1178 def hasdir(self, dir: bytes) -> bool:
1179 1179 """Returns a bool indicating if a directory is in this manifest."""
1180 1180
1181 1181 @abc.abstractmethod
1182 1182 def walk(self, match: matchmod.basematcher) -> Iterator[bytes]:
1183 1183 """Generator of paths in manifest satisfying a matcher.
1184 1184
1185 1185 If the matcher has explicit files listed and they don't exist in
1186 1186 the manifest, ``match.bad()`` is called for each missing file.
1187 1187 """
1188 1188
1189 1189 @abc.abstractmethod
1190 1190 def diff(
1191 1191 self,
1192 1192 other: Any, # TODO: 'manifestdict' or (better) equivalent interface
1193 1193 match: matchmod.basematcher | None = None,
1194 1194 clean: bool = False,
1195 1195 ) -> dict[
1196 1196 bytes,
1197 1197 tuple[tuple[bytes | None, bytes], tuple[bytes | None, bytes]] | None,
1198 1198 ]:
1199 1199 """Find differences between this manifest and another.
1200 1200
1201 1201 This manifest is compared to ``other``.
1202 1202
1203 1203 If ``match`` is provided, the two manifests are filtered against this
1204 1204 matcher and only entries satisfying the matcher are compared.
1205 1205
1206 1206 If ``clean`` is True, unchanged files are included in the returned
1207 1207 object.
1208 1208
1209 1209 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1210 1210 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1211 1211 represents the node and flags for this manifest and ``(node2, flag2)``
1212 1212 are the same for the other manifest.
1213 1213 """
1214 1214
1215 1215 @abc.abstractmethod
1216 1216 def setflag(self, path: bytes, flag: bytes) -> None:
1217 1217 """Set the flag value for a given path.
1218 1218
1219 1219 Raises ``KeyError`` if the path is not already in the manifest.
1220 1220 """
1221 1221
1222 1222 @abc.abstractmethod
1223 1223 def get(self, path: bytes, default=None) -> bytes | None:
1224 1224 """Obtain the node value for a path or a default value if missing."""
1225 1225
1226 1226 @abc.abstractmethod
1227 1227 def flags(self, path: bytes) -> bytes:
1228 1228 """Return the flags value for a path (default: empty bytestring)."""
1229 1229
1230 1230 @abc.abstractmethod
1231 1231 def copy(self) -> 'imanifestdict':
1232 1232 """Return a copy of this manifest."""
1233 1233
1234 1234 @abc.abstractmethod
1235 1235 def items(self) -> Iterator[tuple[bytes, bytes]]:
1236 1236 """Returns an iterable of (path, node) for items in this manifest."""
1237 1237
1238 1238 @abc.abstractmethod
1239 1239 def iteritems(self) -> Iterator[tuple[bytes, bytes]]:
1240 1240 """Identical to items()."""
1241 1241
1242 1242 @abc.abstractmethod
1243 1243 def iterentries(self) -> Iterator[tuple[bytes, bytes, bytes]]:
1244 1244 """Returns an iterable of (path, node, flags) for this manifest.
1245 1245
1246 1246 Similar to ``iteritems()`` except items are a 3-tuple and include
1247 1247 flags.
1248 1248 """
1249 1249
1250 1250 @abc.abstractmethod
1251 1251 def text(self) -> ByteString:
1252 1252 """Obtain the raw data representation for this manifest.
1253 1253
1254 1254 Result is used to create a manifest revision.
1255 1255 """
1256 1256
1257 1257 @abc.abstractmethod
1258 1258 def fastdelta(
1259 1259 self, base: ByteString, changes: Iterable[tuple[bytes, bool]]
1260 1260 ) -> tuple[ByteString, ByteString]:
1261 1261 """Obtain a delta between this manifest and another given changes.
1262 1262
1263 1263 ``base`` in the raw data representation for another manifest.
1264 1264
1265 1265 ``changes`` is an iterable of ``(path, to_delete)``.
1266 1266
1267 1267 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1268 1268 delta between ``base`` and this manifest.
1269 1269
1270 1270 If this manifest implementation can't support ``fastdelta()``,
1271 1271 raise ``mercurial.manifest.FastdeltaUnavailable``.
1272 1272 """
1273 1273
1274 1274
1275 1275 class imanifestrevisionbase(Protocol):
1276 1276 """Base interface representing a single revision of a manifest.
1277 1277
1278 1278 Should not be used as a primary interface: should always be inherited
1279 1279 as part of a larger interface.
1280 1280 """
1281 1281
1282 1282 @abc.abstractmethod
1283 1283 def copy(self):
1284 1284 """Obtain a copy of this manifest instance.
1285 1285
1286 1286 Returns an object conforming to the ``imanifestrevisionwritable``
1287 1287 interface. The instance will be associated with the same
1288 1288 ``imanifestlog`` collection as this instance.
1289 1289 """
1290 1290
1291 1291 @abc.abstractmethod
1292 1292 def read(self):
1293 1293 """Obtain the parsed manifest data structure.
1294 1294
1295 1295 The returned object conforms to the ``imanifestdict`` interface.
1296 1296 """
1297 1297
1298 1298
1299 1299 class imanifestrevisionstored(imanifestrevisionbase, Protocol):
1300 1300 """Interface representing a manifest revision committed to storage."""
1301 1301
1302 1302 @abc.abstractmethod
1303 1303 def node(self) -> bytes:
1304 1304 """The binary node for this manifest."""
1305 1305
1306 1306 parents: list[bytes]
1307 1307 """List of binary nodes that are parents for this manifest revision."""
1308 1308
1309 1309 @abc.abstractmethod
1310 1310 def readdelta(self, shallow: bool = False):
1311 1311 """Obtain the manifest data structure representing changes from parent.
1312 1312
1313 1313 This manifest is compared to its 1st parent. A new manifest
1314 1314 representing those differences is constructed.
1315 1315
1316 1316 If `shallow` is True, this will read the delta for this directory,
1317 1317 without recursively reading subdirectory manifests. Instead, any
1318 1318 subdirectory entry will be reported as it appears in the manifest, i.e.
1319 1319 the subdirectory will be reported among files and distinguished only by
1320 1320 its 't' flag. This only apply if the underlying manifest support it.
1321 1321
1322 1322 The returned object conforms to the ``imanifestdict`` interface.
1323 1323 """
1324 1324
1325 1325 @abc.abstractmethod
1326 1326 def read_any_fast_delta(
1327 1327 self,
1328 1328 valid_bases: Collection[int] | None = None,
1329 1329 *,
1330 1330 shallow: bool = False,
1331 1331 ):
1332 1332 """read some manifest information as fast if possible
1333 1333
1334 1334 This might return a "delta", a manifest object containing only file
1335 1335 changed compared to another revisions. The `valid_bases` argument
1336 1336 control the set of revision that might be used as a base.
1337 1337
1338 1338 If no delta can be retrieved quickly, a full read of the manifest will
1339 1339 be performed instead.
1340 1340
1341 1341 The function return a tuple with two elements. The first one is the
1342 1342 delta base used (or None if we did a full read), the second one is the
1343 1343 manifest information.
1344 1344
1345 1345 If `shallow` is True, this will read the delta for this directory,
1346 1346 without recursively reading subdirectory manifests. Instead, any
1347 1347 subdirectory entry will be reported as it appears in the manifest, i.e.
1348 1348 the subdirectory will be reported among files and distinguished only by
1349 1349 its 't' flag. This only apply if the underlying manifest support it.
1350 1350
1351 1351 The returned object conforms to the ``imanifestdict`` interface.
1352 1352 """
1353 1353
1354 1354 @abc.abstractmethod
1355 1355 def read_delta_parents(self, *, shallow: bool = False, exact: bool = True):
1356 1356 """return a diff from this revision against both parents.
1357 1357
1358 1358 If `exact` is False, this might return a superset of the diff, containing
1359 1359 files that are actually present as is in one of the parents.
1360 1360
1361 1361 If `shallow` is True, this will read the delta for this directory,
1362 1362 without recursively reading subdirectory manifests. Instead, any
1363 1363 subdirectory entry will be reported as it appears in the manifest, i.e.
1364 1364 the subdirectory will be reported among files and distinguished only by
1365 1365 its 't' flag. This only apply if the underlying manifest support it.
1366 1366
1367 1367 The returned object conforms to the ``imanifestdict`` interface."""
1368 1368
1369 1369 @abc.abstractmethod
1370 1370 def read_delta_new_entries(self, *, shallow: bool = False):
1371 1371 """Return a manifest containing just the entries that might be new to
1372 1372 the repository.
1373 1373
1374 1374 This is often equivalent to a diff against both parents, but without
1375 1375 garantee. For performance reason, It might contains more files in some cases.
1376 1376
1377 1377 If `shallow` is True, this will read the delta for this directory,
1378 1378 without recursively reading subdirectory manifests. Instead, any
1379 1379 subdirectory entry will be reported as it appears in the manifest, i.e.
1380 1380 the subdirectory will be reported among files and distinguished only by
1381 1381 its 't' flag. This only apply if the underlying manifest support it.
1382 1382
1383 1383 The returned object conforms to the ``imanifestdict`` interface."""
1384 1384
1385 1385 @abc.abstractmethod
1386 1386 def readfast(self, shallow: bool = False):
1387 1387 """Calls either ``read()`` or ``readdelta()``.
1388 1388
1389 1389 The faster of the two options is called.
1390 1390 """
1391 1391
1392 1392 @abc.abstractmethod
1393 1393 def find(self, key: bytes) -> tuple[bytes, bytes]:
1394 1394 """Calls ``self.read().find(key)``.
1395 1395
1396 1396 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1397 1397 """
1398 1398
1399 1399
1400 1400 class imanifestrevisionwritable(imanifestrevisionbase, Protocol):
1401 1401 """Interface representing a manifest revision that can be committed."""
1402 1402
1403 1403 @abc.abstractmethod
1404 1404 def write(
1405 1405 self, transaction, linkrev, p1node, p2node, added, removed, match=None
1406 1406 ):
1407 1407 """Add this revision to storage.
1408 1408
1409 1409 Takes a transaction object, the changeset revision number it will
1410 1410 be associated with, its parent nodes, and lists of added and
1411 1411 removed paths.
1412 1412
1413 1413 If match is provided, storage can choose not to inspect or write out
1414 1414 items that do not match. Storage is still required to be able to provide
1415 1415 the full manifest in the future for any directories written (these
1416 1416 manifests should not be "narrowed on disk").
1417 1417
1418 1418 Returns the binary node of the created revision.
1419 1419 """
1420 1420
1421 1421
1422 1422 class imanifeststorage(Protocol):
1423 1423 """Storage interface for manifest data."""
1424 1424
1425 1425 nodeconstants: NodeConstants
1426 1426 """nodeconstants used by the current repository."""
1427 1427
1428 1428 tree: bytes
1429 1429 """The path to the directory this manifest tracks.
1430 1430
1431 1431 The empty bytestring represents the root manifest.
1432 1432 """
1433 1433
1434 1434 index: ifilerevisionssequence
1435 1435 """An ``ifilerevisionssequence`` instance."""
1436 1436
1437 1437 opener: Vfs
1438 1438 """VFS opener to use to access underlying files used for storage.
1439 1439
1440 1440 TODO this is revlog specific and should not be exposed.
1441 1441 """
1442 1442
1443 1443 # TODO: finish type hints
1444 1444 fulltextcache: dict
1445 1445 """Dict with cache of fulltexts.
1446 1446
1447 1447 TODO this doesn't feel appropriate for the storage interface.
1448 1448 """
1449 1449
1450 1450 @abc.abstractmethod
1451 1451 def __len__(self):
1452 1452 """Obtain the number of revisions stored for this manifest."""
1453 1453
1454 1454 @abc.abstractmethod
1455 1455 def __iter__(self):
1456 1456 """Iterate over revision numbers for this manifest."""
1457 1457
1458 1458 @abc.abstractmethod
1459 1459 def rev(self, node):
1460 1460 """Obtain the revision number given a binary node.
1461 1461
1462 1462 Raises ``error.LookupError`` if the node is not known.
1463 1463 """
1464 1464
1465 1465 @abc.abstractmethod
1466 1466 def node(self, rev):
1467 1467 """Obtain the node value given a revision number.
1468 1468
1469 1469 Raises ``error.LookupError`` if the revision is not known.
1470 1470 """
1471 1471
1472 1472 @abc.abstractmethod
1473 1473 def lookup(self, value):
1474 1474 """Attempt to resolve a value to a node.
1475 1475
1476 1476 Value can be a binary node, hex node, revision number, or a bytes
1477 1477 that can be converted to an integer.
1478 1478
1479 1479 Raises ``error.LookupError`` if a ndoe could not be resolved.
1480 1480 """
1481 1481
1482 1482 @abc.abstractmethod
1483 1483 def parents(self, node):
1484 1484 """Returns a 2-tuple of parent nodes for a node.
1485 1485
1486 1486 Values will be ``nullid`` if the parent is empty.
1487 1487 """
1488 1488
1489 1489 @abc.abstractmethod
1490 1490 def parentrevs(self, rev):
1491 1491 """Like parents() but operates on revision numbers."""
1492 1492
1493 1493 @abc.abstractmethod
1494 1494 def linkrev(self, rev):
1495 1495 """Obtain the changeset revision number a revision is linked to."""
1496 1496
1497 1497 @abc.abstractmethod
1498 1498 def revision(self, node):
1499 1499 """Obtain fulltext data for a node."""
1500 1500
1501 1501 @abc.abstractmethod
1502 1502 def rawdata(self, node):
1503 1503 """Obtain raw data for a node."""
1504 1504
1505 1505 @abc.abstractmethod
1506 1506 def revdiff(self, rev1, rev2):
1507 1507 """Obtain a delta between two revision numbers.
1508 1508
1509 1509 The returned data is the result of ``bdiff.bdiff()`` on the raw
1510 1510 revision data.
1511 1511 """
1512 1512
1513 1513 @abc.abstractmethod
1514 1514 def cmp(self, node, fulltext):
1515 1515 """Compare fulltext to another revision.
1516 1516
1517 1517 Returns True if the fulltext is different from what is stored.
1518 1518 """
1519 1519
1520 1520 @abc.abstractmethod
1521 1521 def emitrevisions(
1522 1522 self,
1523 1523 nodes,
1524 1524 nodesorder=None,
1525 1525 revisiondata=False,
1526 1526 assumehaveparentrevisions=False,
1527 1527 ):
1528 1528 """Produce ``irevisiondelta`` describing revisions.
1529 1529
1530 1530 See the documentation for ``ifiledata`` for more.
1531 1531 """
1532 1532
1533 1533 @abc.abstractmethod
1534 1534 def addgroup(
1535 1535 self,
1536 1536 deltas,
1537 1537 linkmapper,
1538 1538 transaction,
1539 1539 addrevisioncb=None,
1540 1540 duplicaterevisioncb=None,
1541 1541 ):
1542 1542 """Process a series of deltas for storage.
1543 1543
1544 1544 See the documentation in ``ifilemutation`` for more.
1545 1545 """
1546 1546
1547 1547 @abc.abstractmethod
1548 1548 def rawsize(self, rev):
1549 1549 """Obtain the size of tracked data.
1550 1550
1551 1551 Is equivalent to ``len(m.rawdata(node))``.
1552 1552
1553 1553 TODO this method is only used by upgrade code and may be removed.
1554 1554 """
1555 1555
1556 1556 @abc.abstractmethod
1557 1557 def getstrippoint(self, minlink):
1558 1558 """Find minimum revision that must be stripped to strip a linkrev.
1559 1559
1560 1560 See the documentation in ``ifilemutation`` for more.
1561 1561 """
1562 1562
1563 1563 @abc.abstractmethod
1564 1564 def strip(self, minlink, transaction):
1565 1565 """Remove storage of items starting at a linkrev.
1566 1566
1567 1567 See the documentation in ``ifilemutation`` for more.
1568 1568 """
1569 1569
1570 1570 @abc.abstractmethod
1571 1571 def checksize(self):
1572 1572 """Obtain the expected sizes of backing files.
1573 1573
1574 1574 TODO this is used by verify and it should not be part of the interface.
1575 1575 """
1576 1576
1577 1577 @abc.abstractmethod
1578 1578 def files(self):
1579 1579 """Obtain paths that are backing storage for this manifest.
1580 1580
1581 1581 TODO this is used by verify and there should probably be a better API
1582 1582 for this functionality.
1583 1583 """
1584 1584
1585 1585 @abc.abstractmethod
1586 1586 def deltaparent(self, rev):
1587 1587 """Obtain the revision that a revision is delta'd against.
1588 1588
1589 1589 TODO delta encoding is an implementation detail of storage and should
1590 1590 not be exposed to the storage interface.
1591 1591 """
1592 1592
1593 1593 @abc.abstractmethod
1594 1594 def clone(self, tr, dest, **kwargs):
1595 1595 """Clone this instance to another."""
1596 1596
1597 1597 @abc.abstractmethod
1598 1598 def clearcaches(self, clear_persisted_data=False):
1599 1599 """Clear any caches associated with this instance."""
1600 1600
1601 1601 @abc.abstractmethod
1602 1602 def dirlog(self, d):
1603 1603 """Obtain a manifest storage instance for a tree."""
1604 1604
1605 1605 @abc.abstractmethod
1606 1606 def add(
1607 1607 self,
1608 1608 m,
1609 1609 transaction,
1610 1610 link,
1611 1611 p1,
1612 1612 p2,
1613 1613 added,
1614 1614 removed,
1615 1615 readtree=None,
1616 1616 match=None,
1617 1617 ):
1618 1618 """Add a revision to storage.
1619 1619
1620 1620 ``m`` is an object conforming to ``imanifestdict``.
1621 1621
1622 1622 ``link`` is the linkrev revision number.
1623 1623
1624 1624 ``p1`` and ``p2`` are the parent revision numbers.
1625 1625
1626 1626 ``added`` and ``removed`` are iterables of added and removed paths,
1627 1627 respectively.
1628 1628
1629 1629 ``readtree`` is a function that can be used to read the child tree(s)
1630 1630 when recursively writing the full tree structure when using
1631 1631 treemanifets.
1632 1632
1633 1633 ``match`` is a matcher that can be used to hint to storage that not all
1634 1634 paths must be inspected; this is an optimization and can be safely
1635 1635 ignored. Note that the storage must still be able to reproduce a full
1636 1636 manifest including files that did not match.
1637 1637 """
1638 1638
1639 1639 @abc.abstractmethod
1640 1640 def storageinfo(
1641 1641 self,
1642 1642 exclusivefiles=False,
1643 1643 sharedfiles=False,
1644 1644 revisionscount=False,
1645 1645 trackedsize=False,
1646 1646 storedsize=False,
1647 1647 ):
1648 1648 """Obtain information about storage for this manifest's data.
1649 1649
1650 1650 See ``ifilestorage.storageinfo()`` for a description of this method.
1651 1651 This one behaves the same way, except for manifest data.
1652 1652 """
1653 1653
1654 1654 @abc.abstractmethod
1655 1655 def get_revlog(self):
1656 1656 """return an actual revlog instance if any
1657 1657
1658 1658 This exist because a lot of code leverage the fact the underlying
1659 1659 storage is a revlog for optimization, so giving simple way to access
1660 1660 the revlog instance helps such code.
1661 1661 """
1662 1662
1663 1663
1664 1664 class imanifestlog(Protocol):
1665 1665 """Interface representing a collection of manifest snapshots.
1666 1666
1667 1667 Represents the root manifest in a repository.
1668 1668
1669 1669 Also serves as a means to access nested tree manifests and to cache
1670 1670 tree manifests.
1671 1671 """
1672 1672
1673 1673 nodeconstants: NodeConstants
1674 1674 """nodeconstants used by the current repository."""
1675 1675
1676 1676 narrowed: bool
1677 1677 """True, is the manifest is narrowed by a matcher"""
1678 1678
1679 1679 @abc.abstractmethod
1680 1680 def __getitem__(self, node):
1681 1681 """Obtain a manifest instance for a given binary node.
1682 1682
1683 1683 Equivalent to calling ``self.get('', node)``.
1684 1684
1685 1685 The returned object conforms to the ``imanifestrevisionstored``
1686 1686 interface.
1687 1687 """
1688 1688
1689 1689 @abc.abstractmethod
1690 1690 def get(self, tree, node, verify=True):
1691 1691 """Retrieve the manifest instance for a given directory and binary node.
1692 1692
1693 1693 ``node`` always refers to the node of the root manifest (which will be
1694 1694 the only manifest if flat manifests are being used).
1695 1695
1696 1696 If ``tree`` is the empty string, the root manifest is returned.
1697 1697 Otherwise the manifest for the specified directory will be returned
1698 1698 (requires tree manifests).
1699 1699
1700 1700 If ``verify`` is True, ``LookupError`` is raised if the node is not
1701 1701 known.
1702 1702
1703 1703 The returned object conforms to the ``imanifestrevisionstored``
1704 1704 interface.
1705 1705 """
1706 1706
1707 1707 @abc.abstractmethod
1708 1708 def getstorage(self, tree):
1709 1709 """Retrieve an interface to storage for a particular tree.
1710 1710
1711 1711 If ``tree`` is the empty bytestring, storage for the root manifest will
1712 1712 be returned. Otherwise storage for a tree manifest is returned.
1713 1713
1714 1714 TODO formalize interface for returned object.
1715 1715 """
1716 1716
1717 1717 @abc.abstractmethod
1718 1718 def clearcaches(self, clear_persisted_data: bool = False) -> None:
1719 1719 """Clear caches associated with this collection."""
1720 1720
1721 1721 @abc.abstractmethod
1722 1722 def rev(self, node):
1723 1723 """Obtain the revision number for a binary node.
1724 1724
1725 1725 Raises ``error.LookupError`` if the node is not known.
1726 1726 """
1727 1727
1728 1728 @abc.abstractmethod
1729 1729 def update_caches(self, transaction):
1730 1730 """update whatever cache are relevant for the used storage."""
1731 1731
1732 1732
1733 1733 class ilocalrepositoryfilestorage(Protocol):
1734 1734 """Local repository sub-interface providing access to tracked file storage.
1735 1735
1736 1736 This interface defines how a repository accesses storage for a single
1737 1737 tracked file path.
1738 1738 """
1739 1739
1740 1740 @abc.abstractmethod
1741 1741 def file(self, f):
1742 1742 """Obtain a filelog for a tracked path.
1743 1743
1744 1744 The returned type conforms to the ``ifilestorage`` interface.
1745 1745 """
1746 1746
1747 1747
1748 1748 class ilocalrepositorymain(Protocol):
1749 1749 """Main interface for local repositories.
1750 1750
1751 1751 This currently captures the reality of things - not how things should be.
1752 1752 """
1753 1753
1754 1754 nodeconstants: NodeConstants
1755 1755 """Constant nodes matching the hash function used by the repository."""
1756 1756
1757 1757 nullid: bytes
1758 1758 """null revision for the hash function used by the repository."""
1759 1759
1760 1760 supported: set[bytes]
1761 1761 """Set of requirements that this repo is capable of opening."""
1762 1762
1763 1763 requirements: set[bytes]
1764 1764 """Set of requirements this repo uses."""
1765 1765
1766 1766 features: set[bytes]
1767 1767 """Set of "features" this repository supports.
1768 1768
1769 1769 A "feature" is a loosely-defined term. It can refer to a feature
1770 1770 in the classical sense or can describe an implementation detail
1771 1771 of the repository. For example, a ``readonly`` feature may denote
1772 1772 the repository as read-only. Or a ``revlogfilestore`` feature may
1773 1773 denote that the repository is using revlogs for file storage.
1774 1774
1775 1775 The intent of features is to provide a machine-queryable mechanism
1776 1776 for repo consumers to test for various repository characteristics.
1777 1777
1778 1778 Features are similar to ``requirements``. The main difference is that
1779 1779 requirements are stored on-disk and represent requirements to open the
1780 1780 repository. Features are more run-time capabilities of the repository
1781 1781 and more granular capabilities (which may be derived from requirements).
1782 1782 """
1783 1783
1784 1784 filtername: bytes
1785 1785 """Name of the repoview that is active on this repo."""
1786 1786
1787 1787 vfs_map: Mapping[bytes, Vfs]
1788 1788 """a bytes-key β†’ vfs mapping used by transaction and others"""
1789 1789
1790 1790 wvfs: Vfs
1791 1791 """VFS used to access the working directory."""
1792 1792
1793 1793 vfs: Vfs
1794 1794 """VFS rooted at the .hg directory.
1795 1795
1796 1796 Used to access repository data not in the store.
1797 1797 """
1798 1798
1799 1799 svfs: Vfs
1800 1800 """VFS rooted at the store.
1801 1801
1802 1802 Used to access repository data in the store. Typically .hg/store.
1803 1803 But can point elsewhere if the store is shared.
1804 1804 """
1805 1805
1806 1806 root: bytes
1807 1807 """Path to the root of the working directory."""
1808 1808
1809 1809 path: bytes
1810 1810 """Path to the .hg directory."""
1811 1811
1812 1812 origroot: bytes
1813 1813 """The filesystem path that was used to construct the repo."""
1814 1814
1815 1815 auditor: Any
1816 1816 """A pathauditor for the working directory.
1817 1817
1818 1818 This checks if a path refers to a nested repository.
1819 1819
1820 1820 Operates on the filesystem.
1821 1821 """
1822 1822
1823 1823 nofsauditor: Any # TODO: add type hints
1824 1824 """A pathauditor for the working directory.
1825 1825
1826 1826 This is like ``auditor`` except it doesn't do filesystem checks.
1827 1827 """
1828 1828
1829 1829 baseui: Ui
1830 1830 """Original ui instance passed into constructor."""
1831 1831
1832 1832 ui: Ui
1833 1833 """Main ui instance for this instance."""
1834 1834
1835 1835 sharedpath: bytes
1836 1836 """Path to the .hg directory of the repo this repo was shared from."""
1837 1837
1838 1838 store: Any # TODO: add type hints
1839 1839 """A store instance."""
1840 1840
1841 1841 spath: bytes
1842 1842 """Path to the store."""
1843 1843
1844 1844 sjoin: Callable # TODO: add type hints
1845 1845 """Alias to self.store.join."""
1846 1846
1847 1847 cachevfs: Vfs
1848 1848 """A VFS used to access the cache directory.
1849 1849
1850 1850 Typically .hg/cache.
1851 1851 """
1852 1852
1853 1853 wcachevfs: Vfs
1854 1854 """A VFS used to access the cache directory dedicated to working copy
1855 1855
1856 1856 Typically .hg/wcache.
1857 1857 """
1858 1858
1859 1859 filteredrevcache: Any # TODO: add type hints
1860 1860 """Holds sets of revisions to be filtered."""
1861 1861
1862 1862 names: Any # TODO: add type hints
1863 1863 """A ``namespaces`` instance."""
1864 1864
1865 1865 filecopiesmode: Any # TODO: add type hints
1866 1866 """The way files copies should be dealt with in this repo."""
1867 1867
1868 1868 @abc.abstractmethod
1869 1869 def close(self):
1870 1870 """Close the handle on this repository."""
1871 1871
1872 1872 @abc.abstractmethod
1873 1873 def peer(self, path=None):
1874 1874 """Obtain an object conforming to the ``peer`` interface."""
1875 1875
1876 1876 @abc.abstractmethod
1877 1877 def unfiltered(self):
1878 1878 """Obtain an unfiltered/raw view of this repo."""
1879 1879
1880 1880 @abc.abstractmethod
1881 1881 def filtered(self, name, visibilityexceptions=None):
1882 1882 """Obtain a named view of this repository."""
1883 1883
1884 1884 obsstore: Any # TODO: add type hints
1885 1885 """A store of obsolescence data."""
1886 1886
1887 1887 changelog: Any # TODO: add type hints
1888 1888 """A handle on the changelog revlog."""
1889 1889
1890 1890 manifestlog: imanifestlog
1891 1891 """An instance conforming to the ``imanifestlog`` interface.
1892 1892
1893 1893 Provides access to manifests for the repository.
1894 1894 """
1895 1895
1896 1896 dirstate: intdirstate.idirstate
1897 1897 """Working directory state."""
1898 1898
1899 1899 narrowpats: Any # TODO: add type hints
1900 1900 """Matcher patterns for this repository's narrowspec."""
1901 1901
1902 1902 @abc.abstractmethod
1903 1903 def narrowmatch(self, match=None, includeexact=False):
1904 1904 """Obtain a matcher for the narrowspec."""
1905 1905
1906 1906 @abc.abstractmethod
1907 1907 def setnarrowpats(self, newincludes, newexcludes):
1908 1908 """Define the narrowspec for this repository."""
1909 1909
1910 1910 @abc.abstractmethod
1911 1911 def __getitem__(self, changeid):
1912 1912 """Try to resolve a changectx."""
1913 1913
1914 1914 @abc.abstractmethod
1915 1915 def __contains__(self, changeid):
1916 1916 """Whether a changeset exists."""
1917 1917
1918 1918 @abc.abstractmethod
1919 1919 def __nonzero__(self):
1920 1920 """Always returns True."""
1921 1921 return True
1922 1922
1923 1923 __bool__ = __nonzero__
1924 1924
1925 1925 @abc.abstractmethod
1926 1926 def __len__(self):
1927 1927 """Returns the number of changesets in the repo."""
1928 1928
1929 1929 @abc.abstractmethod
1930 1930 def __iter__(self):
1931 1931 """Iterate over revisions in the changelog."""
1932 1932
1933 1933 @abc.abstractmethod
1934 1934 def revs(self, expr, *args):
1935 1935 """Evaluate a revset.
1936 1936
1937 1937 Emits revisions.
1938 1938 """
1939 1939
1940 1940 @abc.abstractmethod
1941 1941 def set(self, expr, *args):
1942 1942 """Evaluate a revset.
1943 1943
1944 1944 Emits changectx instances.
1945 1945 """
1946 1946
1947 1947 @abc.abstractmethod
1948 1948 def anyrevs(self, specs, user=False, localalias=None):
1949 1949 """Find revisions matching one of the given revsets."""
1950 1950
1951 1951 @abc.abstractmethod
1952 1952 def url(self):
1953 1953 """Returns a string representing the location of this repo."""
1954 1954
1955 1955 @abc.abstractmethod
1956 1956 def hook(self, name, throw=False, **args):
1957 1957 """Call a hook."""
1958 1958
1959 1959 @abc.abstractmethod
1960 1960 def tags(self):
1961 1961 """Return a mapping of tag to node."""
1962 1962
1963 1963 @abc.abstractmethod
1964 1964 def tagtype(self, tagname):
1965 1965 """Return the type of a given tag."""
1966 1966
1967 1967 @abc.abstractmethod
1968 1968 def tagslist(self):
1969 1969 """Return a list of tags ordered by revision."""
1970 1970
1971 1971 @abc.abstractmethod
1972 1972 def nodetags(self, node):
1973 1973 """Return the tags associated with a node."""
1974 1974
1975 1975 @abc.abstractmethod
1976 1976 def nodebookmarks(self, node):
1977 1977 """Return the list of bookmarks pointing to the specified node."""
1978 1978
1979 1979 @abc.abstractmethod
1980 1980 def branchmap(self):
1981 1981 """Return a mapping of branch to heads in that branch."""
1982 1982
1983 1983 @abc.abstractmethod
1984 1984 def revbranchcache(self):
1985 1985 pass
1986 1986
1987 1987 @abc.abstractmethod
1988 1988 def register_changeset(self, rev, changelogrevision):
1989 1989 """Extension point for caches for new nodes.
1990 1990
1991 1991 Multiple consumers are expected to need parts of the changelogrevision,
1992 1992 so it is provided as optimization to avoid duplicate lookups. A simple
1993 1993 cache would be fragile when other revisions are accessed, too."""
1994 1994 pass
1995 1995
1996 1996 @abc.abstractmethod
1997 1997 def branchtip(self, branchtip, ignoremissing=False):
1998 1998 """Return the tip node for a given branch."""
1999 1999
2000 2000 @abc.abstractmethod
2001 2001 def lookup(self, key):
2002 2002 """Resolve the node for a revision."""
2003 2003
2004 2004 @abc.abstractmethod
2005 2005 def lookupbranch(self, key):
2006 2006 """Look up the branch name of the given revision or branch name."""
2007 2007
2008 2008 @abc.abstractmethod
2009 2009 def known(self, nodes):
2010 2010 """Determine whether a series of nodes is known.
2011 2011
2012 2012 Returns a list of bools.
2013 2013 """
2014 2014
2015 2015 @abc.abstractmethod
2016 2016 def local(self):
2017 2017 """Whether the repository is local."""
2018 2018 return True
2019 2019
2020 2020 @abc.abstractmethod
2021 2021 def publishing(self):
2022 2022 """Whether the repository is a publishing repository."""
2023 2023
2024 2024 @abc.abstractmethod
2025 2025 def cancopy(self):
2026 2026 pass
2027 2027
2028 2028 @abc.abstractmethod
2029 2029 def shared(self):
2030 2030 """The type of shared repository or None."""
2031 2031
2032 2032 @abc.abstractmethod
2033 2033 def wjoin(self, f, *insidef):
2034 2034 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
2035 2035
2036 2036 @abc.abstractmethod
2037 2037 def setparents(self, p1, p2):
2038 2038 """Set the parent nodes of the working directory."""
2039 2039
2040 2040 @abc.abstractmethod
2041 2041 def filectx(self, path, changeid=None, fileid=None):
2042 2042 """Obtain a filectx for the given file revision."""
2043 2043
2044 2044 @abc.abstractmethod
2045 2045 def getcwd(self):
2046 2046 """Obtain the current working directory from the dirstate."""
2047 2047
2048 2048 @abc.abstractmethod
2049 2049 def pathto(self, f, cwd=None):
2050 2050 """Obtain the relative path to a file."""
2051 2051
2052 2052 @abc.abstractmethod
2053 2053 def adddatafilter(self, name, fltr):
2054 2054 pass
2055 2055
2056 2056 @abc.abstractmethod
2057 2057 def wread(self, filename):
2058 2058 """Read a file from wvfs, using data filters."""
2059 2059
2060 2060 @abc.abstractmethod
2061 2061 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2062 2062 """Write data to a file in the wvfs, using data filters."""
2063 2063
2064 2064 @abc.abstractmethod
2065 2065 def wwritedata(self, filename, data):
2066 2066 """Resolve data for writing to the wvfs, using data filters."""
2067 2067
2068 2068 @abc.abstractmethod
2069 2069 def currenttransaction(self):
2070 2070 """Obtain the current transaction instance or None."""
2071 2071
2072 2072 @abc.abstractmethod
2073 2073 def transaction(self, desc, report=None):
2074 2074 """Open a new transaction to write to the repository."""
2075 2075
2076 2076 @abc.abstractmethod
2077 2077 def undofiles(self):
2078 2078 """Returns a list of (vfs, path) for files to undo transactions."""
2079 2079
2080 2080 @abc.abstractmethod
2081 2081 def recover(self):
2082 2082 """Roll back an interrupted transaction."""
2083 2083
2084 2084 @abc.abstractmethod
2085 2085 def rollback(self, dryrun=False, force=False):
2086 2086 """Undo the last transaction.
2087 2087
2088 2088 DANGEROUS.
2089 2089 """
2090 2090
2091 2091 @abc.abstractmethod
2092 2092 def updatecaches(self, tr=None, full=False, caches=None):
2093 2093 """Warm repo caches."""
2094 2094
2095 2095 @abc.abstractmethod
2096 2096 def invalidatecaches(self):
2097 2097 """Invalidate cached data due to the repository mutating."""
2098 2098
2099 2099 @abc.abstractmethod
2100 2100 def invalidatevolatilesets(self):
2101 2101 pass
2102 2102
2103 2103 @abc.abstractmethod
2104 2104 def invalidatedirstate(self):
2105 2105 """Invalidate the dirstate."""
2106 2106
2107 2107 @abc.abstractmethod
2108 2108 def invalidate(self, clearfilecache=False):
2109 2109 pass
2110 2110
2111 2111 @abc.abstractmethod
2112 2112 def invalidateall(self):
2113 2113 pass
2114 2114
2115 2115 @abc.abstractmethod
2116 2116 def lock(self, wait=True):
2117 2117 """Lock the repository store and return a lock instance."""
2118 2118
2119 2119 @abc.abstractmethod
2120 2120 def currentlock(self):
2121 2121 """Return the lock if it's held or None."""
2122 2122
2123 2123 @abc.abstractmethod
2124 2124 def wlock(self, wait=True):
2125 2125 """Lock the non-store parts of the repository."""
2126 2126
2127 2127 @abc.abstractmethod
2128 2128 def currentwlock(self):
2129 2129 """Return the wlock if it's held or None."""
2130 2130
2131 2131 @abc.abstractmethod
2132 2132 def checkcommitpatterns(self, wctx, match, status, fail):
2133 2133 pass
2134 2134
2135 2135 @abc.abstractmethod
2136 2136 def commit(
2137 2137 self,
2138 2138 text=b'',
2139 2139 user=None,
2140 2140 date=None,
2141 2141 match=None,
2142 2142 force=False,
2143 2143 editor=False,
2144 2144 extra=None,
2145 2145 ):
2146 2146 """Add a new revision to the repository."""
2147 2147
2148 2148 @abc.abstractmethod
2149 2149 def commitctx(self, ctx, error=False, origctx=None):
2150 2150 """Commit a commitctx instance to the repository."""
2151 2151
2152 2152 @abc.abstractmethod
2153 2153 def destroying(self):
2154 2154 """Inform the repository that nodes are about to be destroyed."""
2155 2155
2156 2156 @abc.abstractmethod
2157 2157 def destroyed(self):
2158 2158 """Inform the repository that nodes have been destroyed."""
2159 2159
2160 2160 @abc.abstractmethod
2161 2161 def status(
2162 2162 self,
2163 2163 node1=b'.',
2164 2164 node2=None,
2165 2165 match=None,
2166 2166 ignored=False,
2167 2167 clean=False,
2168 2168 unknown=False,
2169 2169 listsubrepos=False,
2170 2170 ):
2171 2171 """Convenience method to call repo[x].status()."""
2172 2172
2173 2173 @abc.abstractmethod
2174 2174 def addpostdsstatus(self, ps):
2175 2175 pass
2176 2176
2177 2177 @abc.abstractmethod
2178 2178 def postdsstatus(self):
2179 2179 pass
2180 2180
2181 2181 @abc.abstractmethod
2182 2182 def clearpostdsstatus(self):
2183 2183 pass
2184 2184
2185 2185 @abc.abstractmethod
2186 2186 def heads(self, start=None):
2187 2187 """Obtain list of nodes that are DAG heads."""
2188 2188
2189 2189 @abc.abstractmethod
2190 2190 def branchheads(self, branch=None, start=None, closed=False):
2191 2191 pass
2192 2192
2193 2193 @abc.abstractmethod
2194 2194 def branches(self, nodes):
2195 2195 pass
2196 2196
2197 2197 @abc.abstractmethod
2198 2198 def between(self, pairs):
2199 2199 pass
2200 2200
2201 2201 @abc.abstractmethod
2202 2202 def checkpush(self, pushop):
2203 2203 pass
2204 2204
2205 2205 prepushoutgoinghooks: util.hooks
2206 2206 """util.hooks instance."""
2207 2207
2208 2208 @abc.abstractmethod
2209 2209 def pushkey(self, namespace, key, old, new):
2210 2210 pass
2211 2211
2212 2212 @abc.abstractmethod
2213 2213 def listkeys(self, namespace):
2214 2214 pass
2215 2215
2216 2216 @abc.abstractmethod
2217 2217 def debugwireargs(self, one, two, three=None, four=None, five=None):
2218 2218 pass
2219 2219
2220 2220 @abc.abstractmethod
2221 2221 def savecommitmessage(self, text):
2222 2222 pass
2223 2223
2224 2224 @abc.abstractmethod
2225 2225 def register_sidedata_computer(
2226 2226 self, kind, category, keys, computer, flags, replace=False
2227 2227 ):
2228 2228 pass
2229 2229
2230 2230 @abc.abstractmethod
2231 2231 def register_wanted_sidedata(self, category):
2232 2232 pass
2233 2233
2234 2234
2235 2235 class completelocalrepository(
2236 ilocalrepositorymain, ilocalrepositoryfilestorage
2236 ilocalrepositorymain,
2237 ilocalrepositoryfilestorage,
2238 Protocol,
2237 2239 ):
2238 2240 """Complete interface for a local repository."""
2239 2241
2240 2242
2241 2243 class iwireprotocolcommandcacher(Protocol):
2242 2244 """Represents a caching backend for wire protocol commands.
2243 2245
2244 2246 Wire protocol version 2 supports transparent caching of many commands.
2245 2247 To leverage this caching, servers can activate objects that cache
2246 2248 command responses. Objects handle both cache writing and reading.
2247 2249 This interface defines how that response caching mechanism works.
2248 2250
2249 2251 Wire protocol version 2 commands emit a series of objects that are
2250 2252 serialized and sent to the client. The caching layer exists between
2251 2253 the invocation of the command function and the sending of its output
2252 2254 objects to an output layer.
2253 2255
2254 2256 Instances of this interface represent a binding to a cache that
2255 2257 can serve a response (in place of calling a command function) and/or
2256 2258 write responses to a cache for subsequent use.
2257 2259
2258 2260 When a command request arrives, the following happens with regards
2259 2261 to this interface:
2260 2262
2261 2263 1. The server determines whether the command request is cacheable.
2262 2264 2. If it is, an instance of this interface is spawned.
2263 2265 3. The cacher is activated in a context manager (``__enter__`` is called).
2264 2266 4. A cache *key* for that request is derived. This will call the
2265 2267 instance's ``adjustcachekeystate()`` method so the derivation
2266 2268 can be influenced.
2267 2269 5. The cacher is informed of the derived cache key via a call to
2268 2270 ``setcachekey()``.
2269 2271 6. The cacher's ``lookup()`` method is called to test for presence of
2270 2272 the derived key in the cache.
2271 2273 7. If ``lookup()`` returns a hit, that cached result is used in place
2272 2274 of invoking the command function. ``__exit__`` is called and the instance
2273 2275 is discarded.
2274 2276 8. The command function is invoked.
2275 2277 9. ``onobject()`` is called for each object emitted by the command
2276 2278 function.
2277 2279 10. After the final object is seen, ``onfinished()`` is called.
2278 2280 11. ``__exit__`` is called to signal the end of use of the instance.
2279 2281
2280 2282 Cache *key* derivation can be influenced by the instance.
2281 2283
2282 2284 Cache keys are initially derived by a deterministic representation of
2283 2285 the command request. This includes the command name, arguments, protocol
2284 2286 version, etc. This initial key derivation is performed by CBOR-encoding a
2285 2287 data structure and feeding that output into a hasher.
2286 2288
2287 2289 Instances of this interface can influence this initial key derivation
2288 2290 via ``adjustcachekeystate()``.
2289 2291
2290 2292 The instance is informed of the derived cache key via a call to
2291 2293 ``setcachekey()``. The instance must store the key locally so it can
2292 2294 be consulted on subsequent operations that may require it.
2293 2295
2294 2296 When constructed, the instance has access to a callable that can be used
2295 2297 for encoding response objects. This callable receives as its single
2296 2298 argument an object emitted by a command function. It returns an iterable
2297 2299 of bytes chunks representing the encoded object. Unless the cacher is
2298 2300 caching native Python objects in memory or has a way of reconstructing
2299 2301 the original Python objects, implementations typically call this function
2300 2302 to produce bytes from the output objects and then store those bytes in
2301 2303 the cache. When it comes time to re-emit those bytes, they are wrapped
2302 2304 in a ``wireprototypes.encodedresponse`` instance to tell the output
2303 2305 layer that they are pre-encoded.
2304 2306
2305 2307 When receiving the objects emitted by the command function, instances
2306 2308 can choose what to do with those objects. The simplest thing to do is
2307 2309 re-emit the original objects. They will be forwarded to the output
2308 2310 layer and will be processed as if the cacher did not exist.
2309 2311
2310 2312 Implementations could also choose to not emit objects - instead locally
2311 2313 buffering objects or their encoded representation. They could then emit
2312 2314 a single "coalesced" object when ``onfinished()`` is called. In
2313 2315 this way, the implementation would function as a filtering layer of
2314 2316 sorts.
2315 2317
2316 2318 When caching objects, typically the encoded form of the object will
2317 2319 be stored. Keep in mind that if the original object is forwarded to
2318 2320 the output layer, it will need to be encoded there as well. For large
2319 2321 output, this redundant encoding could add overhead. Implementations
2320 2322 could wrap the encoded object data in ``wireprototypes.encodedresponse``
2321 2323 instances to avoid this overhead.
2322 2324 """
2323 2325
2324 2326 @abc.abstractmethod
2325 2327 def __enter__(self):
2326 2328 """Marks the instance as active.
2327 2329
2328 2330 Should return self.
2329 2331 """
2330 2332
2331 2333 @abc.abstractmethod
2332 2334 def __exit__(self, exctype, excvalue, exctb):
2333 2335 """Called when cacher is no longer used.
2334 2336
2335 2337 This can be used by implementations to perform cleanup actions (e.g.
2336 2338 disconnecting network sockets, aborting a partially cached response.
2337 2339 """
2338 2340
2339 2341 @abc.abstractmethod
2340 2342 def adjustcachekeystate(self, state):
2341 2343 """Influences cache key derivation by adjusting state to derive key.
2342 2344
2343 2345 A dict defining the state used to derive the cache key is passed.
2344 2346
2345 2347 Implementations can modify this dict to record additional state that
2346 2348 is wanted to influence key derivation.
2347 2349
2348 2350 Implementations are *highly* encouraged to not modify or delete
2349 2351 existing keys.
2350 2352 """
2351 2353
2352 2354 @abc.abstractmethod
2353 2355 def setcachekey(self, key):
2354 2356 """Record the derived cache key for this request.
2355 2357
2356 2358 Instances may mutate the key for internal usage, as desired. e.g.
2357 2359 instances may wish to prepend the repo name, introduce path
2358 2360 components for filesystem or URL addressing, etc. Behavior is up to
2359 2361 the cache.
2360 2362
2361 2363 Returns a bool indicating if the request is cacheable by this
2362 2364 instance.
2363 2365 """
2364 2366
2365 2367 @abc.abstractmethod
2366 2368 def lookup(self):
2367 2369 """Attempt to resolve an entry in the cache.
2368 2370
2369 2371 The instance is instructed to look for the cache key that it was
2370 2372 informed about via the call to ``setcachekey()``.
2371 2373
2372 2374 If there's no cache hit or the cacher doesn't wish to use the cached
2373 2375 entry, ``None`` should be returned.
2374 2376
2375 2377 Else, a dict defining the cached result should be returned. The
2376 2378 dict may have the following keys:
2377 2379
2378 2380 objs
2379 2381 An iterable of objects that should be sent to the client. That
2380 2382 iterable of objects is expected to be what the command function
2381 2383 would return if invoked or an equivalent representation thereof.
2382 2384 """
2383 2385
2384 2386 @abc.abstractmethod
2385 2387 def onobject(self, obj):
2386 2388 """Called when a new object is emitted from the command function.
2387 2389
2388 2390 Receives as its argument the object that was emitted from the
2389 2391 command function.
2390 2392
2391 2393 This method returns an iterator of objects to forward to the output
2392 2394 layer. The easiest implementation is a generator that just
2393 2395 ``yield obj``.
2394 2396 """
2395 2397
2396 2398 @abc.abstractmethod
2397 2399 def onfinished(self):
2398 2400 """Called after all objects have been emitted from the command function.
2399 2401
2400 2402 Implementations should return an iterator of objects to forward to
2401 2403 the output layer.
2402 2404
2403 2405 This method can be a generator.
2404 2406 """
General Comments 0
You need to be logged in to leave comments. Login now