##// END OF EJS Templates
typing: add annotations to the `repository.peer` mixin class...
Matt Harbison -
r53415:5a924cb0 default
parent child Browse files
Show More
@@ -1,2407 +1,2414
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import annotations
10 10
11 11 import abc
12 12 import typing
13 13
14 14 from typing import (
15 15 Any,
16 16 Callable,
17 17 Collection,
18 18 Iterable,
19 19 Iterator,
20 20 Mapping,
21 21 Protocol,
22 22 Set,
23 23 )
24 24
25 25 from ..i18n import _
26 26 from .. import error
27 27
28 28 if typing.TYPE_CHECKING:
29 29 from typing import (
30 30 ByteString, # TODO: change to Buffer for 3.14
31 31 )
32 32
33 33 # Almost all mercurial modules are only imported in the type checking phase
34 34 # to avoid circular imports
35 35 from .. import (
36 36 match as matchmod,
37 37 pathutil,
38 38 util,
39 39 )
40 40 from ..utils import (
41 41 urlutil,
42 42 )
43 43
44 44 from . import dirstate as intdirstate
45 45
46 46 # TODO: make a protocol class for this
47 47 NodeConstants = Any
48 48
49 49 # TODO: create a Protocol class, since importing uimod here causes a cycle
50 50 # that confuses pytype.
51 51 Ui = Any
52 52
53 53 # TODO: make a protocol class for this
54 54 Vfs = Any
55 55
56 56 # Local repository feature string.
57 57
58 58 # Revlogs are being used for file storage.
59 59 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
60 60 # The storage part of the repository is shared from an external source.
61 61 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
62 62 # LFS supported for backing file storage.
63 63 REPO_FEATURE_LFS = b'lfs'
64 64 # Repository supports being stream cloned.
65 65 REPO_FEATURE_STREAM_CLONE = b'streamclone'
66 66 # Repository supports (at least) some sidedata to be stored
67 67 REPO_FEATURE_SIDE_DATA = b'side-data'
68 68 # Files storage may lack data for all ancestors.
69 69 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
70 70
71 71 REVISION_FLAG_CENSORED = 1 << 15
72 72 REVISION_FLAG_ELLIPSIS = 1 << 14
73 73 REVISION_FLAG_EXTSTORED = 1 << 13
74 74 REVISION_FLAG_HASCOPIESINFO = 1 << 12
75 75
76 76 REVISION_FLAGS_KNOWN = (
77 77 REVISION_FLAG_CENSORED
78 78 | REVISION_FLAG_ELLIPSIS
79 79 | REVISION_FLAG_EXTSTORED
80 80 | REVISION_FLAG_HASCOPIESINFO
81 81 )
82 82
83 83 CG_DELTAMODE_STD = b'default'
84 84 CG_DELTAMODE_PREV = b'previous'
85 85 CG_DELTAMODE_FULL = b'fulltext'
86 86 CG_DELTAMODE_P1 = b'p1'
87 87
88 88
89 89 ## Cache related constants:
90 90 #
91 91 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
92 92
93 93 # Warm branchmaps of all known repoview's filter-level
94 94 CACHE_BRANCHMAP_ALL = b"branchmap-all"
95 95 # Warm branchmaps of repoview's filter-level used by server
96 96 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
97 97 # Warm internal changelog cache (eg: persistent nodemap)
98 98 CACHE_CHANGELOG_CACHE = b"changelog-cache"
99 99 # check of a branchmap can use the "pure topo" mode
100 100 CACHE_BRANCHMAP_DETECT_PURE_TOPO = b"branchmap-detect-pure-topo"
101 101 # Warm full manifest cache
102 102 CACHE_FULL_MANIFEST = b"full-manifest"
103 103 # Warm file-node-tags cache
104 104 CACHE_FILE_NODE_TAGS = b"file-node-tags"
105 105 # Warm internal manifestlog cache (eg: persistent nodemap)
106 106 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
107 107 # Warn rev branch cache
108 108 CACHE_REV_BRANCH = b"rev-branch-cache"
109 109 # Warm tags' cache for default repoview'
110 110 CACHE_TAGS_DEFAULT = b"tags-default"
111 111 # Warm tags' cache for repoview's filter-level used by server
112 112 CACHE_TAGS_SERVED = b"tags-served"
113 113
114 114 # the cache to warm by default after a simple transaction
115 115 # (this is a mutable set to let extension update it)
116 116 CACHES_DEFAULT = {
117 117 CACHE_BRANCHMAP_SERVED,
118 118 }
119 119
120 120 # the caches to warm when warming all of them
121 121 # (this is a mutable set to let extension update it)
122 122 CACHES_ALL = {
123 123 CACHE_BRANCHMAP_SERVED,
124 124 CACHE_BRANCHMAP_ALL,
125 125 CACHE_BRANCHMAP_DETECT_PURE_TOPO,
126 126 CACHE_REV_BRANCH,
127 127 CACHE_CHANGELOG_CACHE,
128 128 CACHE_FILE_NODE_TAGS,
129 129 CACHE_FULL_MANIFEST,
130 130 CACHE_MANIFESTLOG_CACHE,
131 131 CACHE_TAGS_DEFAULT,
132 132 CACHE_TAGS_SERVED,
133 133 }
134 134
135 135 # the cache to warm by default on simple call
136 136 # (this is a mutable set to let extension update it)
137 137 CACHES_POST_CLONE = CACHES_ALL.copy()
138 138 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
139 139 CACHES_POST_CLONE.discard(CACHE_REV_BRANCH)
140 140
141 141
142 142 class _ipeerconnection(Protocol):
143 143 """Represents a "connection" to a repository.
144 144
145 145 This is the base interface for representing a connection to a repository.
146 146 It holds basic properties and methods applicable to all peer types.
147 147
148 148 This is not a complete interface definition and should not be used
149 149 outside of this module.
150 150 """
151 151
152 152 ui: Ui
153 153 """ui.ui instance"""
154 154
155 155 path: urlutil.path | None
156 156 """a urlutil.path instance or None"""
157 157
158 158 @abc.abstractmethod
159 159 def url(self):
160 160 """Returns a URL string representing this peer.
161 161
162 162 Currently, implementations expose the raw URL used to construct the
163 163 instance. It may contain credentials as part of the URL. The
164 164 expectations of the value aren't well-defined and this could lead to
165 165 data leakage.
166 166
167 167 TODO audit/clean consumers and more clearly define the contents of this
168 168 value.
169 169 """
170 170
171 171 @abc.abstractmethod
172 172 def local(self):
173 173 """Returns a local repository instance.
174 174
175 175 If the peer represents a local repository, returns an object that
176 176 can be used to interface with it. Otherwise returns ``None``.
177 177 """
178 178
179 179 @abc.abstractmethod
180 180 def canpush(self):
181 181 """Returns a boolean indicating if this peer can be pushed to."""
182 182
183 183 @abc.abstractmethod
184 184 def close(self):
185 185 """Close the connection to this peer.
186 186
187 187 This is called when the peer will no longer be used. Resources
188 188 associated with the peer should be cleaned up.
189 189 """
190 190
191 191
192 192 class ipeercapabilities(Protocol):
193 193 """Peer sub-interface related to capabilities."""
194 194
195 195 @abc.abstractmethod
196 def capable(self, name):
196 def capable(self, name: bytes) -> bool | bytes:
197 197 """Determine support for a named capability.
198 198
199 199 Returns ``False`` if capability not supported.
200 200
201 201 Returns ``True`` if boolean capability is supported. Returns a string
202 202 if capability support is non-boolean.
203 203
204 204 Capability strings may or may not map to wire protocol capabilities.
205 205 """
206 206
207 207 @abc.abstractmethod
208 def requirecap(self, name, purpose):
208 def requirecap(self, name: bytes, purpose: bytes) -> None:
209 209 """Require a capability to be present.
210 210
211 211 Raises a ``CapabilityError`` if the capability isn't present.
212 212 """
213 213
214 214
215 215 class ipeercommands(Protocol):
216 216 """Client-side interface for communicating over the wire protocol.
217 217
218 218 This interface is used as a gateway to the Mercurial wire protocol.
219 219 methods commonly call wire protocol commands of the same name.
220 220 """
221 221
222 222 @abc.abstractmethod
223 223 def branchmap(self):
224 224 """Obtain heads in named branches.
225 225
226 226 Returns a dict mapping branch name to an iterable of nodes that are
227 227 heads on that branch.
228 228 """
229 229
230 230 @abc.abstractmethod
231 231 def capabilities(self):
232 232 """Obtain capabilities of the peer.
233 233
234 234 Returns a set of string capabilities.
235 235 """
236 236
237 237 @abc.abstractmethod
238 238 def get_cached_bundle_inline(self, path):
239 239 """Retrieve a clonebundle across the wire.
240 240
241 241 Returns a chunkbuffer
242 242 """
243 243
244 244 @abc.abstractmethod
245 245 def clonebundles(self):
246 246 """Obtains the clone bundles manifest for the repo.
247 247
248 248 Returns the manifest as unparsed bytes.
249 249 """
250 250
251 251 @abc.abstractmethod
252 252 def debugwireargs(self, one, two, three=None, four=None, five=None):
253 253 """Used to facilitate debugging of arguments passed over the wire."""
254 254
255 255 @abc.abstractmethod
256 256 def getbundle(self, source, **kwargs):
257 257 """Obtain remote repository data as a bundle.
258 258
259 259 This command is how the bulk of repository data is transferred from
260 260 the peer to the local repository
261 261
262 262 Returns a generator of bundle data.
263 263 """
264 264
265 265 @abc.abstractmethod
266 266 def heads(self):
267 267 """Determine all known head revisions in the peer.
268 268
269 269 Returns an iterable of binary nodes.
270 270 """
271 271
272 272 @abc.abstractmethod
273 273 def known(self, nodes):
274 274 """Determine whether multiple nodes are known.
275 275
276 276 Accepts an iterable of nodes whose presence to check for.
277 277
278 278 Returns an iterable of booleans indicating of the corresponding node
279 279 at that index is known to the peer.
280 280 """
281 281
282 282 @abc.abstractmethod
283 283 def listkeys(self, namespace):
284 284 """Obtain all keys in a pushkey namespace.
285 285
286 286 Returns an iterable of key names.
287 287 """
288 288
289 289 @abc.abstractmethod
290 290 def lookup(self, key):
291 291 """Resolve a value to a known revision.
292 292
293 293 Returns a binary node of the resolved revision on success.
294 294 """
295 295
296 296 @abc.abstractmethod
297 297 def pushkey(self, namespace, key, old, new):
298 298 """Set a value using the ``pushkey`` protocol.
299 299
300 300 Arguments correspond to the pushkey namespace and key to operate on and
301 301 the old and new values for that key.
302 302
303 303 Returns a string with the peer result. The value inside varies by the
304 304 namespace.
305 305 """
306 306
307 307 @abc.abstractmethod
308 308 def stream_out(self):
309 309 """Obtain streaming clone data.
310 310
311 311 Successful result should be a generator of data chunks.
312 312 """
313 313
314 314 @abc.abstractmethod
315 315 def unbundle(self, bundle, heads, url):
316 316 """Transfer repository data to the peer.
317 317
318 318 This is how the bulk of data during a push is transferred.
319 319
320 320 Returns the integer number of heads added to the peer.
321 321 """
322 322
323 323
324 324 class ipeerlegacycommands(Protocol):
325 325 """Interface for implementing support for legacy wire protocol commands.
326 326
327 327 Wire protocol commands transition to legacy status when they are no longer
328 328 used by modern clients. To facilitate identifying which commands are
329 329 legacy, the interfaces are split.
330 330 """
331 331
332 332 @abc.abstractmethod
333 333 def between(self, pairs):
334 334 """Obtain nodes between pairs of nodes.
335 335
336 336 ``pairs`` is an iterable of node pairs.
337 337
338 338 Returns an iterable of iterables of nodes corresponding to each
339 339 requested pair.
340 340 """
341 341
342 342 @abc.abstractmethod
343 343 def branches(self, nodes):
344 344 """Obtain ancestor changesets of specific nodes back to a branch point.
345 345
346 346 For each requested node, the peer finds the first ancestor node that is
347 347 a DAG root or is a merge.
348 348
349 349 Returns an iterable of iterables with the resolved values for each node.
350 350 """
351 351
352 352 @abc.abstractmethod
353 353 def changegroup(self, nodes, source):
354 354 """Obtain a changegroup with data for descendants of specified nodes."""
355 355
356 356 @abc.abstractmethod
357 357 def changegroupsubset(self, bases, heads, source):
358 358 pass
359 359
360 360
361 361 class ipeercommandexecutor(Protocol):
362 362 """Represents a mechanism to execute remote commands.
363 363
364 364 This is the primary interface for requesting that wire protocol commands
365 365 be executed. Instances of this interface are active in a context manager
366 366 and have a well-defined lifetime. When the context manager exits, all
367 367 outstanding requests are waited on.
368 368 """
369 369
370 370 @abc.abstractmethod
371 371 def callcommand(self, name, args):
372 372 """Request that a named command be executed.
373 373
374 374 Receives the command name and a dictionary of command arguments.
375 375
376 376 Returns a ``concurrent.futures.Future`` that will resolve to the
377 377 result of that command request. That exact value is left up to
378 378 the implementation and possibly varies by command.
379 379
380 380 Not all commands can coexist with other commands in an executor
381 381 instance: it depends on the underlying wire protocol transport being
382 382 used and the command itself.
383 383
384 384 Implementations MAY call ``sendcommands()`` automatically if the
385 385 requested command can not coexist with other commands in this executor.
386 386
387 387 Implementations MAY call ``sendcommands()`` automatically when the
388 388 future's ``result()`` is called. So, consumers using multiple
389 389 commands with an executor MUST ensure that ``result()`` is not called
390 390 until all command requests have been issued.
391 391 """
392 392
393 393 @abc.abstractmethod
394 394 def sendcommands(self):
395 395 """Trigger submission of queued command requests.
396 396
397 397 Not all transports submit commands as soon as they are requested to
398 398 run. When called, this method forces queued command requests to be
399 399 issued. It will no-op if all commands have already been sent.
400 400
401 401 When called, no more new commands may be issued with this executor.
402 402 """
403 403
404 404 @abc.abstractmethod
405 405 def close(self):
406 406 """Signal that this command request is finished.
407 407
408 408 When called, no more new commands may be issued. All outstanding
409 409 commands that have previously been issued are waited on before
410 410 returning. This not only includes waiting for the futures to resolve,
411 411 but also waiting for all response data to arrive. In other words,
412 412 calling this waits for all on-wire state for issued command requests
413 413 to finish.
414 414
415 415 When used as a context manager, this method is called when exiting the
416 416 context manager.
417 417
418 418 This method may call ``sendcommands()`` if there are buffered commands.
419 419 """
420 420
421 421
422 422 class ipeerrequests(Protocol):
423 423 """Interface for executing commands on a peer."""
424 424
425 425 limitedarguments: bool
426 426 """True if the peer cannot receive large argument value for commands."""
427 427
428 428 @abc.abstractmethod
429 429 def commandexecutor(self):
430 430 """A context manager that resolves to an ipeercommandexecutor.
431 431
432 432 The object this resolves to can be used to issue command requests
433 433 to the peer.
434 434
435 435 Callers should call its ``callcommand`` method to issue command
436 436 requests.
437 437
438 438 A new executor should be obtained for each distinct set of commands
439 439 (possibly just a single command) that the consumer wants to execute
440 440 as part of a single operation or round trip. This is because some
441 441 peers are half-duplex and/or don't support persistent connections.
442 442 e.g. in the case of HTTP peers, commands sent to an executor represent
443 443 a single HTTP request. While some peers may support multiple command
444 444 sends over the wire per executor, consumers need to code to the least
445 445 capable peer. So it should be assumed that command executors buffer
446 446 called commands until they are told to send them and that each
447 447 command executor could result in a new connection or wire-level request
448 448 being issued.
449 449 """
450 450
451 451
452 452 # TODO: make this a Protocol class when 3.11 is the minimum supported version?
453 453 class peer(_ipeerconnection, ipeercapabilities, ipeerrequests):
454 454 """Unified interface for peer repositories.
455 455
456 456 All peer instances must conform to this interface.
457 457 """
458 458
459 459 limitedarguments: bool = False
460
461 def __init__(self, ui, path=None, remotehidden=False):
460 path: urlutil.path | None
461 ui: Ui
462
463 def __init__(
464 self,
465 ui: Ui,
466 path: urlutil.path | None = None,
467 remotehidden: bool = False,
468 ) -> None:
462 469 self.ui = ui
463 470 self.path = path
464 471
465 def capable(self, name):
472 def capable(self, name: bytes) -> bool | bytes:
466 473 # TODO: this class should maybe subclass ipeercommands too, otherwise it
467 474 # is assuming whatever uses this as a mixin also has this interface.
468 475 caps = self.capabilities() # pytype: disable=attribute-error
469 476 if name in caps:
470 477 return True
471 478
472 479 name = b'%s=' % name
473 480 for cap in caps:
474 481 if cap.startswith(name):
475 482 return cap[len(name) :]
476 483
477 484 return False
478 485
479 def requirecap(self, name, purpose):
486 def requirecap(self, name: bytes, purpose: bytes) -> None:
480 487 if self.capable(name):
481 488 return
482 489
483 490 raise error.CapabilityError(
484 491 _(
485 492 b'cannot %s; remote repository does not support the '
486 493 b'\'%s\' capability'
487 494 )
488 495 % (purpose, name)
489 496 )
490 497
491 498
492 499 class iverifyproblem(Protocol):
493 500 """Represents a problem with the integrity of the repository.
494 501
495 502 Instances of this interface are emitted to describe an integrity issue
496 503 with a repository (e.g. corrupt storage, missing data, etc).
497 504
498 505 Instances are essentially messages associated with severity.
499 506 """
500 507
501 508 warning: bytes | None
502 509 """Message indicating a non-fatal problem."""
503 510
504 511 error: bytes | None
505 512 """Message indicating a fatal problem."""
506 513
507 514 node: bytes | None
508 515 """Revision encountering the problem.
509 516
510 517 ``None`` means the problem doesn't apply to a single revision.
511 518 """
512 519
513 520
514 521 class irevisiondelta(Protocol):
515 522 """Represents a delta between one revision and another.
516 523
517 524 Instances convey enough information to allow a revision to be exchanged
518 525 with another repository.
519 526
520 527 Instances represent the fulltext revision data or a delta against
521 528 another revision. Therefore the ``revision`` and ``delta`` attributes
522 529 are mutually exclusive.
523 530
524 531 Typically used for changegroup generation.
525 532 """
526 533
527 534 node: bytes
528 535 """20 byte node of this revision."""
529 536
530 537 p1node: bytes
531 538 """20 byte node of 1st parent of this revision."""
532 539
533 540 p2node: bytes
534 541 """20 byte node of 2nd parent of this revision."""
535 542
536 543 # TODO: is this really optional? revlog.revlogrevisiondelta defaults to None
537 544 linknode: bytes | None
538 545 """20 byte node of the changelog revision this node is linked to."""
539 546
540 547 flags: int
541 548 """2 bytes of integer flags that apply to this revision.
542 549
543 550 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
544 551 """
545 552
546 553 basenode: bytes
547 554 """20 byte node of the revision this data is a delta against.
548 555
549 556 ``nullid`` indicates that the revision is a full revision and not
550 557 a delta.
551 558 """
552 559
553 560 baserevisionsize: int | None
554 561 """Size of base revision this delta is against.
555 562
556 563 May be ``None`` if ``basenode`` is ``nullid``.
557 564 """
558 565
559 566 # TODO: is this really optional? (Seems possible in
560 567 # storageutil.emitrevisions()).
561 568 revision: bytes | None
562 569 """Raw fulltext of revision data for this node."""
563 570
564 571 delta: bytes | None
565 572 """Delta between ``basenode`` and ``node``.
566 573
567 574 Stored in the bdiff delta format.
568 575 """
569 576
570 577 sidedata: bytes | None
571 578 """Raw sidedata bytes for the given revision."""
572 579
573 580 protocol_flags: int
574 581 """Single byte of integer flags that can influence the protocol.
575 582
576 583 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
577 584 """
578 585
579 586
580 587 class ifilerevisionssequence(Protocol):
581 588 """Contains index data for all revisions of a file.
582 589
583 590 Types implementing this behave like lists of tuples. The index
584 591 in the list corresponds to the revision number. The values contain
585 592 index metadata.
586 593
587 594 The *null* revision (revision number -1) is always the last item
588 595 in the index.
589 596 """
590 597
591 598 @abc.abstractmethod
592 599 def __len__(self):
593 600 """The total number of revisions."""
594 601
595 602 @abc.abstractmethod
596 603 def __getitem__(self, rev):
597 604 """Returns the object having a specific revision number.
598 605
599 606 Returns an 8-tuple with the following fields:
600 607
601 608 offset+flags
602 609 Contains the offset and flags for the revision. 64-bit unsigned
603 610 integer where first 6 bytes are the offset and the next 2 bytes
604 611 are flags. The offset can be 0 if it is not used by the store.
605 612 compressed size
606 613 Size of the revision data in the store. It can be 0 if it isn't
607 614 needed by the store.
608 615 uncompressed size
609 616 Fulltext size. It can be 0 if it isn't needed by the store.
610 617 base revision
611 618 Revision number of revision the delta for storage is encoded
612 619 against. -1 indicates not encoded against a base revision.
613 620 link revision
614 621 Revision number of changelog revision this entry is related to.
615 622 p1 revision
616 623 Revision number of 1st parent. -1 if no 1st parent.
617 624 p2 revision
618 625 Revision number of 2nd parent. -1 if no 1st parent.
619 626 node
620 627 Binary node value for this revision number.
621 628
622 629 Negative values should index off the end of the sequence. ``-1``
623 630 should return the null revision. ``-2`` should return the most
624 631 recent revision.
625 632 """
626 633
627 634 @abc.abstractmethod
628 635 def __contains__(self, rev):
629 636 """Whether a revision number exists."""
630 637
631 638 @abc.abstractmethod
632 639 def insert(self, i, entry):
633 640 """Add an item to the index at specific revision."""
634 641
635 642
636 643 class ifileindex(Protocol):
637 644 """Storage interface for index data of a single file.
638 645
639 646 File storage data is divided into index metadata and data storage.
640 647 This interface defines the index portion of the interface.
641 648
642 649 The index logically consists of:
643 650
644 651 * A mapping between revision numbers and nodes.
645 652 * DAG data (storing and querying the relationship between nodes).
646 653 * Metadata to facilitate storage.
647 654 """
648 655
649 656 nullid: bytes
650 657 """node for the null revision for use as delta base."""
651 658
652 659 @abc.abstractmethod
653 660 def __len__(self) -> int:
654 661 """Obtain the number of revisions stored for this file."""
655 662
656 663 @abc.abstractmethod
657 664 def __iter__(self) -> Iterator[int]:
658 665 """Iterate over revision numbers for this file."""
659 666
660 667 @abc.abstractmethod
661 668 def hasnode(self, node):
662 669 """Returns a bool indicating if a node is known to this store.
663 670
664 671 Implementations must only return True for full, binary node values:
665 672 hex nodes, revision numbers, and partial node matches must be
666 673 rejected.
667 674
668 675 The null node is never present.
669 676 """
670 677
671 678 @abc.abstractmethod
672 679 def revs(self, start=0, stop=None):
673 680 """Iterate over revision numbers for this file, with control."""
674 681
675 682 @abc.abstractmethod
676 683 def parents(self, node):
677 684 """Returns a 2-tuple of parent nodes for a revision.
678 685
679 686 Values will be ``nullid`` if the parent is empty.
680 687 """
681 688
682 689 @abc.abstractmethod
683 690 def parentrevs(self, rev):
684 691 """Like parents() but operates on revision numbers."""
685 692
686 693 @abc.abstractmethod
687 694 def rev(self, node):
688 695 """Obtain the revision number given a node.
689 696
690 697 Raises ``error.LookupError`` if the node is not known.
691 698 """
692 699
693 700 @abc.abstractmethod
694 701 def node(self, rev):
695 702 """Obtain the node value given a revision number.
696 703
697 704 Raises ``IndexError`` if the node is not known.
698 705 """
699 706
700 707 @abc.abstractmethod
701 708 def lookup(self, node):
702 709 """Attempt to resolve a value to a node.
703 710
704 711 Value can be a binary node, hex node, revision number, or a string
705 712 that can be converted to an integer.
706 713
707 714 Raises ``error.LookupError`` if a node could not be resolved.
708 715 """
709 716
710 717 @abc.abstractmethod
711 718 def linkrev(self, rev):
712 719 """Obtain the changeset revision number a revision is linked to."""
713 720
714 721 @abc.abstractmethod
715 722 def iscensored(self, rev):
716 723 """Return whether a revision's content has been censored."""
717 724
718 725 @abc.abstractmethod
719 726 def commonancestorsheads(self, node1, node2):
720 727 """Obtain an iterable of nodes containing heads of common ancestors.
721 728
722 729 See ``ancestor.commonancestorsheads()``.
723 730 """
724 731
725 732 @abc.abstractmethod
726 733 def descendants(self, revs):
727 734 """Obtain descendant revision numbers for a set of revision numbers.
728 735
729 736 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
730 737 """
731 738
732 739 @abc.abstractmethod
733 740 def heads(self, start=None, stop=None):
734 741 """Obtain a list of nodes that are DAG heads, with control.
735 742
736 743 The set of revisions examined can be limited by specifying
737 744 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
738 745 iterable of nodes. DAG traversal starts at earlier revision
739 746 ``start`` and iterates forward until any node in ``stop`` is
740 747 encountered.
741 748 """
742 749
743 750 @abc.abstractmethod
744 751 def children(self, node):
745 752 """Obtain nodes that are children of a node.
746 753
747 754 Returns a list of nodes.
748 755 """
749 756
750 757
751 758 class ifiledata(Protocol):
752 759 """Storage interface for data storage of a specific file.
753 760
754 761 This complements ``ifileindex`` and provides an interface for accessing
755 762 data for a tracked file.
756 763 """
757 764
758 765 @abc.abstractmethod
759 766 def size(self, rev):
760 767 """Obtain the fulltext size of file data.
761 768
762 769 Any metadata is excluded from size measurements.
763 770 """
764 771
765 772 @abc.abstractmethod
766 773 def revision(self, node):
767 774 """Obtain fulltext data for a node.
768 775
769 776 By default, any storage transformations are applied before the data
770 777 is returned. If ``raw`` is True, non-raw storage transformations
771 778 are not applied.
772 779
773 780 The fulltext data may contain a header containing metadata. Most
774 781 consumers should use ``read()`` to obtain the actual file data.
775 782 """
776 783
777 784 @abc.abstractmethod
778 785 def rawdata(self, node):
779 786 """Obtain raw data for a node."""
780 787
781 788 @abc.abstractmethod
782 789 def read(self, node):
783 790 """Resolve file fulltext data.
784 791
785 792 This is similar to ``revision()`` except any metadata in the data
786 793 headers is stripped.
787 794 """
788 795
789 796 @abc.abstractmethod
790 797 def renamed(self, node):
791 798 """Obtain copy metadata for a node.
792 799
793 800 Returns ``False`` if no copy metadata is stored or a 2-tuple of
794 801 (path, node) from which this revision was copied.
795 802 """
796 803
797 804 @abc.abstractmethod
798 805 def cmp(self, node, fulltext):
799 806 """Compare fulltext to another revision.
800 807
801 808 Returns True if the fulltext is different from what is stored.
802 809
803 810 This takes copy metadata into account.
804 811
805 812 TODO better document the copy metadata and censoring logic.
806 813 """
807 814
808 815 @abc.abstractmethod
809 816 def emitrevisions(
810 817 self,
811 818 nodes,
812 819 nodesorder=None,
813 820 revisiondata=False,
814 821 assumehaveparentrevisions=False,
815 822 deltamode=CG_DELTAMODE_STD,
816 823 ):
817 824 """Produce ``irevisiondelta`` for revisions.
818 825
819 826 Given an iterable of nodes, emits objects conforming to the
820 827 ``irevisiondelta`` interface that describe revisions in storage.
821 828
822 829 This method is a generator.
823 830
824 831 The input nodes may be unordered. Implementations must ensure that a
825 832 node's parents are emitted before the node itself. Transitively, this
826 833 means that a node may only be emitted once all its ancestors in
827 834 ``nodes`` have also been emitted.
828 835
829 836 By default, emits "index" data (the ``node``, ``p1node``, and
830 837 ``p2node`` attributes). If ``revisiondata`` is set, revision data
831 838 will also be present on the emitted objects.
832 839
833 840 With default argument values, implementations can choose to emit
834 841 either fulltext revision data or a delta. When emitting deltas,
835 842 implementations must consider whether the delta's base revision
836 843 fulltext is available to the receiver.
837 844
838 845 The base revision fulltext is guaranteed to be available if any of
839 846 the following are met:
840 847
841 848 * Its fulltext revision was emitted by this method call.
842 849 * A delta for that revision was emitted by this method call.
843 850 * ``assumehaveparentrevisions`` is True and the base revision is a
844 851 parent of the node.
845 852
846 853 ``nodesorder`` can be used to control the order that revisions are
847 854 emitted. By default, revisions can be reordered as long as they are
848 855 in DAG topological order (see above). If the value is ``nodes``,
849 856 the iteration order from ``nodes`` should be used. If the value is
850 857 ``storage``, then the native order from the backing storage layer
851 858 is used. (Not all storage layers will have strong ordering and behavior
852 859 of this mode is storage-dependent.) ``nodes`` ordering can force
853 860 revisions to be emitted before their ancestors, so consumers should
854 861 use it with care.
855 862
856 863 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
857 864 be set and it is the caller's responsibility to resolve it, if needed.
858 865
859 866 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
860 867 all revision data should be emitted as deltas against the revision
861 868 emitted just prior. The initial revision should be a delta against its
862 869 1st parent.
863 870 """
864 871
865 872
866 873 class ifilemutation(Protocol):
867 874 """Storage interface for mutation events of a tracked file."""
868 875
869 876 @abc.abstractmethod
870 877 def add(self, filedata, meta, transaction, linkrev, p1, p2):
871 878 """Add a new revision to the store.
872 879
873 880 Takes file data, dictionary of metadata, a transaction, linkrev,
874 881 and parent nodes.
875 882
876 883 Returns the node that was added.
877 884
878 885 May no-op if a revision matching the supplied data is already stored.
879 886 """
880 887
881 888 @abc.abstractmethod
882 889 def addrevision(
883 890 self,
884 891 revisiondata,
885 892 transaction,
886 893 linkrev,
887 894 p1,
888 895 p2,
889 896 node=None,
890 897 flags=0,
891 898 cachedelta=None,
892 899 ):
893 900 """Add a new revision to the store and return its number.
894 901
895 902 This is similar to ``add()`` except it operates at a lower level.
896 903
897 904 The data passed in already contains a metadata header, if any.
898 905
899 906 ``node`` and ``flags`` can be used to define the expected node and
900 907 the flags to use with storage. ``flags`` is a bitwise value composed
901 908 of the various ``REVISION_FLAG_*`` constants.
902 909
903 910 ``add()`` is usually called when adding files from e.g. the working
904 911 directory. ``addrevision()`` is often called by ``add()`` and for
905 912 scenarios where revision data has already been computed, such as when
906 913 applying raw data from a peer repo.
907 914 """
908 915
909 916 @abc.abstractmethod
910 917 def addgroup(
911 918 self,
912 919 deltas,
913 920 linkmapper,
914 921 transaction,
915 922 addrevisioncb=None,
916 923 duplicaterevisioncb=None,
917 924 maybemissingparents=False,
918 925 ):
919 926 """Process a series of deltas for storage.
920 927
921 928 ``deltas`` is an iterable of 7-tuples of
922 929 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
923 930 to add.
924 931
925 932 The ``delta`` field contains ``mpatch`` data to apply to a base
926 933 revision, identified by ``deltabase``. The base node can be
927 934 ``nullid``, in which case the header from the delta can be ignored
928 935 and the delta used as the fulltext.
929 936
930 937 ``alwayscache`` instructs the lower layers to cache the content of the
931 938 newly added revision, even if it needs to be explicitly computed.
932 939 This used to be the default when ``addrevisioncb`` was provided up to
933 940 Mercurial 5.8.
934 941
935 942 ``addrevisioncb`` should be called for each new rev as it is committed.
936 943 ``duplicaterevisioncb`` should be called for all revs with a
937 944 pre-existing node.
938 945
939 946 ``maybemissingparents`` is a bool indicating whether the incoming
940 947 data may reference parents/ancestor revisions that aren't present.
941 948 This flag is set when receiving data into a "shallow" store that
942 949 doesn't hold all history.
943 950
944 951 Returns a list of nodes that were processed. A node will be in the list
945 952 even if it existed in the store previously.
946 953 """
947 954
948 955 @abc.abstractmethod
949 956 def censorrevision(self, tr, node, tombstone=b''):
950 957 """Remove the content of a single revision.
951 958
952 959 The specified ``node`` will have its content purged from storage.
953 960 Future attempts to access the revision data for this node will
954 961 result in failure.
955 962
956 963 A ``tombstone`` message can optionally be stored. This message may be
957 964 displayed to users when they attempt to access the missing revision
958 965 data.
959 966
960 967 Storage backends may have stored deltas against the previous content
961 968 in this revision. As part of censoring a revision, these storage
962 969 backends are expected to rewrite any internally stored deltas such
963 970 that they no longer reference the deleted content.
964 971 """
965 972
966 973 @abc.abstractmethod
967 974 def getstrippoint(self, minlink):
968 975 """Find the minimum revision that must be stripped to strip a linkrev.
969 976
970 977 Returns a 2-tuple containing the minimum revision number and a set
971 978 of all revisions numbers that would be broken by this strip.
972 979
973 980 TODO this is highly revlog centric and should be abstracted into
974 981 a higher-level deletion API. ``repair.strip()`` relies on this.
975 982 """
976 983
977 984 @abc.abstractmethod
978 985 def strip(self, minlink, transaction):
979 986 """Remove storage of items starting at a linkrev.
980 987
981 988 This uses ``getstrippoint()`` to determine the first node to remove.
982 989 Then it effectively truncates storage for all revisions after that.
983 990
984 991 TODO this is highly revlog centric and should be abstracted into a
985 992 higher-level deletion API.
986 993 """
987 994
988 995
989 996 class ifilestorage(ifileindex, ifiledata, ifilemutation, Protocol):
990 997 """Complete storage interface for a single tracked file."""
991 998
992 999 @abc.abstractmethod
993 1000 def files(self):
994 1001 """Obtain paths that are backing storage for this file.
995 1002
996 1003 TODO this is used heavily by verify code and there should probably
997 1004 be a better API for that.
998 1005 """
999 1006
1000 1007 @abc.abstractmethod
1001 1008 def storageinfo(
1002 1009 self,
1003 1010 exclusivefiles=False,
1004 1011 sharedfiles=False,
1005 1012 revisionscount=False,
1006 1013 trackedsize=False,
1007 1014 storedsize=False,
1008 1015 ):
1009 1016 """Obtain information about storage for this file's data.
1010 1017
1011 1018 Returns a dict describing storage for this tracked path. The keys
1012 1019 in the dict map to arguments of the same. The arguments are bools
1013 1020 indicating whether to calculate and obtain that data.
1014 1021
1015 1022 exclusivefiles
1016 1023 Iterable of (vfs, path) describing files that are exclusively
1017 1024 used to back storage for this tracked path.
1018 1025
1019 1026 sharedfiles
1020 1027 Iterable of (vfs, path) describing files that are used to back
1021 1028 storage for this tracked path. Those files may also provide storage
1022 1029 for other stored entities.
1023 1030
1024 1031 revisionscount
1025 1032 Number of revisions available for retrieval.
1026 1033
1027 1034 trackedsize
1028 1035 Total size in bytes of all tracked revisions. This is a sum of the
1029 1036 length of the fulltext of all revisions.
1030 1037
1031 1038 storedsize
1032 1039 Total size in bytes used to store data for all tracked revisions.
1033 1040 This is commonly less than ``trackedsize`` due to internal usage
1034 1041 of deltas rather than fulltext revisions.
1035 1042
1036 1043 Not all storage backends may support all queries are have a reasonable
1037 1044 value to use. In that case, the value should be set to ``None`` and
1038 1045 callers are expected to handle this special value.
1039 1046 """
1040 1047
1041 1048 @abc.abstractmethod
1042 1049 def verifyintegrity(self, state) -> Iterable[iverifyproblem]:
1043 1050 """Verifies the integrity of file storage.
1044 1051
1045 1052 ``state`` is a dict holding state of the verifier process. It can be
1046 1053 used to communicate data between invocations of multiple storage
1047 1054 primitives.
1048 1055
1049 1056 If individual revisions cannot have their revision content resolved,
1050 1057 the method is expected to set the ``skipread`` key to a set of nodes
1051 1058 that encountered problems. If set, the method can also add the node(s)
1052 1059 to ``safe_renamed`` in order to indicate nodes that may perform the
1053 1060 rename checks with currently accessible data.
1054 1061
1055 1062 The method yields objects conforming to the ``iverifyproblem``
1056 1063 interface.
1057 1064 """
1058 1065
1059 1066
1060 1067 class idirs(Protocol):
1061 1068 """Interface representing a collection of directories from paths.
1062 1069
1063 1070 This interface is essentially a derived data structure representing
1064 1071 directories from a collection of paths.
1065 1072 """
1066 1073
1067 1074 @abc.abstractmethod
1068 1075 def addpath(self, path):
1069 1076 """Add a path to the collection.
1070 1077
1071 1078 All directories in the path will be added to the collection.
1072 1079 """
1073 1080
1074 1081 @abc.abstractmethod
1075 1082 def delpath(self, path):
1076 1083 """Remove a path from the collection.
1077 1084
1078 1085 If the removal was the last path in a particular directory, the
1079 1086 directory is removed from the collection.
1080 1087 """
1081 1088
1082 1089 @abc.abstractmethod
1083 1090 def __iter__(self):
1084 1091 """Iterate over the directories in this collection of paths."""
1085 1092
1086 1093 @abc.abstractmethod
1087 1094 def __contains__(self, path):
1088 1095 """Whether a specific directory is in this collection."""
1089 1096
1090 1097
1091 1098 class imanifestdict(Protocol):
1092 1099 """Interface representing a manifest data structure.
1093 1100
1094 1101 A manifest is effectively a dict mapping paths to entries. Each entry
1095 1102 consists of a binary node and extra flags affecting that entry.
1096 1103 """
1097 1104
1098 1105 @abc.abstractmethod
1099 1106 def __getitem__(self, key: bytes) -> bytes:
1100 1107 """Returns the binary node value for a path in the manifest.
1101 1108
1102 1109 Raises ``KeyError`` if the path does not exist in the manifest.
1103 1110
1104 1111 Equivalent to ``self.find(path)[0]``.
1105 1112 """
1106 1113
1107 1114 @abc.abstractmethod
1108 1115 def find(self, path: bytes) -> tuple[bytes, bytes]:
1109 1116 """Returns the entry for a path in the manifest.
1110 1117
1111 1118 Returns a 2-tuple of (node, flags).
1112 1119
1113 1120 Raises ``KeyError`` if the path does not exist in the manifest.
1114 1121 """
1115 1122
1116 1123 @abc.abstractmethod
1117 1124 def __len__(self) -> int:
1118 1125 """Return the number of entries in the manifest."""
1119 1126
1120 1127 @abc.abstractmethod
1121 1128 def __nonzero__(self) -> bool:
1122 1129 """Returns True if the manifest has entries, False otherwise."""
1123 1130
1124 1131 __bool__ = __nonzero__
1125 1132
1126 1133 @abc.abstractmethod
1127 1134 def set(self, path: bytes, node: bytes, flags: bytes) -> None:
1128 1135 """Define the node value and flags for a path in the manifest.
1129 1136
1130 1137 Equivalent to __setitem__ followed by setflag, but can be more efficient.
1131 1138 """
1132 1139
1133 1140 @abc.abstractmethod
1134 1141 def __setitem__(self, path: bytes, node: bytes) -> None:
1135 1142 """Define the node value for a path in the manifest.
1136 1143
1137 1144 If the path is already in the manifest, its flags will be copied to
1138 1145 the new entry.
1139 1146 """
1140 1147
1141 1148 @abc.abstractmethod
1142 1149 def __contains__(self, path: bytes) -> bool:
1143 1150 """Whether a path exists in the manifest."""
1144 1151
1145 1152 @abc.abstractmethod
1146 1153 def __delitem__(self, path: bytes) -> None:
1147 1154 """Remove a path from the manifest.
1148 1155
1149 1156 Raises ``KeyError`` if the path is not in the manifest.
1150 1157 """
1151 1158
1152 1159 @abc.abstractmethod
1153 1160 def __iter__(self) -> Iterator[bytes]:
1154 1161 """Iterate over paths in the manifest."""
1155 1162
1156 1163 @abc.abstractmethod
1157 1164 def iterkeys(self) -> Iterator[bytes]:
1158 1165 """Iterate over paths in the manifest."""
1159 1166
1160 1167 @abc.abstractmethod
1161 1168 def keys(self) -> list[bytes]:
1162 1169 """Obtain a list of paths in the manifest."""
1163 1170
1164 1171 @abc.abstractmethod
1165 1172 def filesnotin(self, other, match=None) -> Set[bytes]:
1166 1173 """Obtain the set of paths in this manifest but not in another.
1167 1174
1168 1175 ``match`` is an optional matcher function to be applied to both
1169 1176 manifests.
1170 1177
1171 1178 Returns a set of paths.
1172 1179 """
1173 1180
1174 1181 @abc.abstractmethod
1175 1182 def dirs(self) -> pathutil.dirs:
1176 1183 """Returns an object implementing the ``idirs`` interface."""
1177 1184
1178 1185 @abc.abstractmethod
1179 1186 def hasdir(self, dir: bytes) -> bool:
1180 1187 """Returns a bool indicating if a directory is in this manifest."""
1181 1188
1182 1189 @abc.abstractmethod
1183 1190 def walk(self, match: matchmod.basematcher) -> Iterator[bytes]:
1184 1191 """Generator of paths in manifest satisfying a matcher.
1185 1192
1186 1193 If the matcher has explicit files listed and they don't exist in
1187 1194 the manifest, ``match.bad()`` is called for each missing file.
1188 1195 """
1189 1196
1190 1197 @abc.abstractmethod
1191 1198 def diff(
1192 1199 self,
1193 1200 other: Any, # TODO: 'manifestdict' or (better) equivalent interface
1194 1201 match: matchmod.basematcher | None = None,
1195 1202 clean: bool = False,
1196 1203 ) -> dict[
1197 1204 bytes,
1198 1205 tuple[tuple[bytes | None, bytes], tuple[bytes | None, bytes]] | None,
1199 1206 ]:
1200 1207 """Find differences between this manifest and another.
1201 1208
1202 1209 This manifest is compared to ``other``.
1203 1210
1204 1211 If ``match`` is provided, the two manifests are filtered against this
1205 1212 matcher and only entries satisfying the matcher are compared.
1206 1213
1207 1214 If ``clean`` is True, unchanged files are included in the returned
1208 1215 object.
1209 1216
1210 1217 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1211 1218 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1212 1219 represents the node and flags for this manifest and ``(node2, flag2)``
1213 1220 are the same for the other manifest.
1214 1221 """
1215 1222
1216 1223 @abc.abstractmethod
1217 1224 def setflag(self, path: bytes, flag: bytes) -> None:
1218 1225 """Set the flag value for a given path.
1219 1226
1220 1227 Raises ``KeyError`` if the path is not already in the manifest.
1221 1228 """
1222 1229
1223 1230 @abc.abstractmethod
1224 1231 def get(self, path: bytes, default=None) -> bytes | None:
1225 1232 """Obtain the node value for a path or a default value if missing."""
1226 1233
1227 1234 @abc.abstractmethod
1228 1235 def flags(self, path: bytes) -> bytes:
1229 1236 """Return the flags value for a path (default: empty bytestring)."""
1230 1237
1231 1238 @abc.abstractmethod
1232 1239 def copy(self) -> 'imanifestdict':
1233 1240 """Return a copy of this manifest."""
1234 1241
1235 1242 @abc.abstractmethod
1236 1243 def items(self) -> Iterator[tuple[bytes, bytes]]:
1237 1244 """Returns an iterable of (path, node) for items in this manifest."""
1238 1245
1239 1246 @abc.abstractmethod
1240 1247 def iteritems(self) -> Iterator[tuple[bytes, bytes]]:
1241 1248 """Identical to items()."""
1242 1249
1243 1250 @abc.abstractmethod
1244 1251 def iterentries(self) -> Iterator[tuple[bytes, bytes, bytes]]:
1245 1252 """Returns an iterable of (path, node, flags) for this manifest.
1246 1253
1247 1254 Similar to ``iteritems()`` except items are a 3-tuple and include
1248 1255 flags.
1249 1256 """
1250 1257
1251 1258 @abc.abstractmethod
1252 1259 def text(self) -> ByteString:
1253 1260 """Obtain the raw data representation for this manifest.
1254 1261
1255 1262 Result is used to create a manifest revision.
1256 1263 """
1257 1264
1258 1265 @abc.abstractmethod
1259 1266 def fastdelta(
1260 1267 self, base: ByteString, changes: Iterable[tuple[bytes, bool]]
1261 1268 ) -> tuple[ByteString, ByteString]:
1262 1269 """Obtain a delta between this manifest and another given changes.
1263 1270
1264 1271 ``base`` in the raw data representation for another manifest.
1265 1272
1266 1273 ``changes`` is an iterable of ``(path, to_delete)``.
1267 1274
1268 1275 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1269 1276 delta between ``base`` and this manifest.
1270 1277
1271 1278 If this manifest implementation can't support ``fastdelta()``,
1272 1279 raise ``mercurial.manifest.FastdeltaUnavailable``.
1273 1280 """
1274 1281
1275 1282
1276 1283 class imanifestrevisionbase(Protocol):
1277 1284 """Base interface representing a single revision of a manifest.
1278 1285
1279 1286 Should not be used as a primary interface: should always be inherited
1280 1287 as part of a larger interface.
1281 1288 """
1282 1289
1283 1290 @abc.abstractmethod
1284 1291 def copy(self):
1285 1292 """Obtain a copy of this manifest instance.
1286 1293
1287 1294 Returns an object conforming to the ``imanifestrevisionwritable``
1288 1295 interface. The instance will be associated with the same
1289 1296 ``imanifestlog`` collection as this instance.
1290 1297 """
1291 1298
1292 1299 @abc.abstractmethod
1293 1300 def read(self):
1294 1301 """Obtain the parsed manifest data structure.
1295 1302
1296 1303 The returned object conforms to the ``imanifestdict`` interface.
1297 1304 """
1298 1305
1299 1306
1300 1307 class imanifestrevisionstored(imanifestrevisionbase, Protocol):
1301 1308 """Interface representing a manifest revision committed to storage."""
1302 1309
1303 1310 @abc.abstractmethod
1304 1311 def node(self) -> bytes:
1305 1312 """The binary node for this manifest."""
1306 1313
1307 1314 parents: list[bytes]
1308 1315 """List of binary nodes that are parents for this manifest revision."""
1309 1316
1310 1317 @abc.abstractmethod
1311 1318 def readdelta(self, shallow: bool = False):
1312 1319 """Obtain the manifest data structure representing changes from parent.
1313 1320
1314 1321 This manifest is compared to its 1st parent. A new manifest
1315 1322 representing those differences is constructed.
1316 1323
1317 1324 If `shallow` is True, this will read the delta for this directory,
1318 1325 without recursively reading subdirectory manifests. Instead, any
1319 1326 subdirectory entry will be reported as it appears in the manifest, i.e.
1320 1327 the subdirectory will be reported among files and distinguished only by
1321 1328 its 't' flag. This only apply if the underlying manifest support it.
1322 1329
1323 1330 The returned object conforms to the ``imanifestdict`` interface.
1324 1331 """
1325 1332
1326 1333 @abc.abstractmethod
1327 1334 def read_any_fast_delta(
1328 1335 self,
1329 1336 valid_bases: Collection[int] | None = None,
1330 1337 *,
1331 1338 shallow: bool = False,
1332 1339 ):
1333 1340 """read some manifest information as fast if possible
1334 1341
1335 1342 This might return a "delta", a manifest object containing only file
1336 1343 changed compared to another revisions. The `valid_bases` argument
1337 1344 control the set of revision that might be used as a base.
1338 1345
1339 1346 If no delta can be retrieved quickly, a full read of the manifest will
1340 1347 be performed instead.
1341 1348
1342 1349 The function return a tuple with two elements. The first one is the
1343 1350 delta base used (or None if we did a full read), the second one is the
1344 1351 manifest information.
1345 1352
1346 1353 If `shallow` is True, this will read the delta for this directory,
1347 1354 without recursively reading subdirectory manifests. Instead, any
1348 1355 subdirectory entry will be reported as it appears in the manifest, i.e.
1349 1356 the subdirectory will be reported among files and distinguished only by
1350 1357 its 't' flag. This only apply if the underlying manifest support it.
1351 1358
1352 1359 The returned object conforms to the ``imanifestdict`` interface.
1353 1360 """
1354 1361
1355 1362 @abc.abstractmethod
1356 1363 def read_delta_parents(self, *, shallow: bool = False, exact: bool = True):
1357 1364 """return a diff from this revision against both parents.
1358 1365
1359 1366 If `exact` is False, this might return a superset of the diff, containing
1360 1367 files that are actually present as is in one of the parents.
1361 1368
1362 1369 If `shallow` is True, this will read the delta for this directory,
1363 1370 without recursively reading subdirectory manifests. Instead, any
1364 1371 subdirectory entry will be reported as it appears in the manifest, i.e.
1365 1372 the subdirectory will be reported among files and distinguished only by
1366 1373 its 't' flag. This only apply if the underlying manifest support it.
1367 1374
1368 1375 The returned object conforms to the ``imanifestdict`` interface."""
1369 1376
1370 1377 @abc.abstractmethod
1371 1378 def read_delta_new_entries(self, *, shallow: bool = False):
1372 1379 """Return a manifest containing just the entries that might be new to
1373 1380 the repository.
1374 1381
1375 1382 This is often equivalent to a diff against both parents, but without
1376 1383 garantee. For performance reason, It might contains more files in some cases.
1377 1384
1378 1385 If `shallow` is True, this will read the delta for this directory,
1379 1386 without recursively reading subdirectory manifests. Instead, any
1380 1387 subdirectory entry will be reported as it appears in the manifest, i.e.
1381 1388 the subdirectory will be reported among files and distinguished only by
1382 1389 its 't' flag. This only apply if the underlying manifest support it.
1383 1390
1384 1391 The returned object conforms to the ``imanifestdict`` interface."""
1385 1392
1386 1393 @abc.abstractmethod
1387 1394 def readfast(self, shallow: bool = False):
1388 1395 """Calls either ``read()`` or ``readdelta()``.
1389 1396
1390 1397 The faster of the two options is called.
1391 1398 """
1392 1399
1393 1400 @abc.abstractmethod
1394 1401 def find(self, key: bytes) -> tuple[bytes, bytes]:
1395 1402 """Calls ``self.read().find(key)``.
1396 1403
1397 1404 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1398 1405 """
1399 1406
1400 1407
1401 1408 class imanifestrevisionwritable(imanifestrevisionbase, Protocol):
1402 1409 """Interface representing a manifest revision that can be committed."""
1403 1410
1404 1411 @abc.abstractmethod
1405 1412 def write(
1406 1413 self, transaction, linkrev, p1node, p2node, added, removed, match=None
1407 1414 ):
1408 1415 """Add this revision to storage.
1409 1416
1410 1417 Takes a transaction object, the changeset revision number it will
1411 1418 be associated with, its parent nodes, and lists of added and
1412 1419 removed paths.
1413 1420
1414 1421 If match is provided, storage can choose not to inspect or write out
1415 1422 items that do not match. Storage is still required to be able to provide
1416 1423 the full manifest in the future for any directories written (these
1417 1424 manifests should not be "narrowed on disk").
1418 1425
1419 1426 Returns the binary node of the created revision.
1420 1427 """
1421 1428
1422 1429
1423 1430 class imanifeststorage(Protocol):
1424 1431 """Storage interface for manifest data."""
1425 1432
1426 1433 nodeconstants: NodeConstants
1427 1434 """nodeconstants used by the current repository."""
1428 1435
1429 1436 tree: bytes
1430 1437 """The path to the directory this manifest tracks.
1431 1438
1432 1439 The empty bytestring represents the root manifest.
1433 1440 """
1434 1441
1435 1442 index: ifilerevisionssequence
1436 1443 """An ``ifilerevisionssequence`` instance."""
1437 1444
1438 1445 opener: Vfs
1439 1446 """VFS opener to use to access underlying files used for storage.
1440 1447
1441 1448 TODO this is revlog specific and should not be exposed.
1442 1449 """
1443 1450
1444 1451 # TODO: finish type hints
1445 1452 fulltextcache: dict
1446 1453 """Dict with cache of fulltexts.
1447 1454
1448 1455 TODO this doesn't feel appropriate for the storage interface.
1449 1456 """
1450 1457
1451 1458 @abc.abstractmethod
1452 1459 def __len__(self):
1453 1460 """Obtain the number of revisions stored for this manifest."""
1454 1461
1455 1462 @abc.abstractmethod
1456 1463 def __iter__(self):
1457 1464 """Iterate over revision numbers for this manifest."""
1458 1465
1459 1466 @abc.abstractmethod
1460 1467 def rev(self, node):
1461 1468 """Obtain the revision number given a binary node.
1462 1469
1463 1470 Raises ``error.LookupError`` if the node is not known.
1464 1471 """
1465 1472
1466 1473 @abc.abstractmethod
1467 1474 def node(self, rev):
1468 1475 """Obtain the node value given a revision number.
1469 1476
1470 1477 Raises ``error.LookupError`` if the revision is not known.
1471 1478 """
1472 1479
1473 1480 @abc.abstractmethod
1474 1481 def lookup(self, value):
1475 1482 """Attempt to resolve a value to a node.
1476 1483
1477 1484 Value can be a binary node, hex node, revision number, or a bytes
1478 1485 that can be converted to an integer.
1479 1486
1480 1487 Raises ``error.LookupError`` if a ndoe could not be resolved.
1481 1488 """
1482 1489
1483 1490 @abc.abstractmethod
1484 1491 def parents(self, node):
1485 1492 """Returns a 2-tuple of parent nodes for a node.
1486 1493
1487 1494 Values will be ``nullid`` if the parent is empty.
1488 1495 """
1489 1496
1490 1497 @abc.abstractmethod
1491 1498 def parentrevs(self, rev):
1492 1499 """Like parents() but operates on revision numbers."""
1493 1500
1494 1501 @abc.abstractmethod
1495 1502 def linkrev(self, rev):
1496 1503 """Obtain the changeset revision number a revision is linked to."""
1497 1504
1498 1505 @abc.abstractmethod
1499 1506 def revision(self, node):
1500 1507 """Obtain fulltext data for a node."""
1501 1508
1502 1509 @abc.abstractmethod
1503 1510 def rawdata(self, node):
1504 1511 """Obtain raw data for a node."""
1505 1512
1506 1513 @abc.abstractmethod
1507 1514 def revdiff(self, rev1, rev2):
1508 1515 """Obtain a delta between two revision numbers.
1509 1516
1510 1517 The returned data is the result of ``bdiff.bdiff()`` on the raw
1511 1518 revision data.
1512 1519 """
1513 1520
1514 1521 @abc.abstractmethod
1515 1522 def cmp(self, node, fulltext):
1516 1523 """Compare fulltext to another revision.
1517 1524
1518 1525 Returns True if the fulltext is different from what is stored.
1519 1526 """
1520 1527
1521 1528 @abc.abstractmethod
1522 1529 def emitrevisions(
1523 1530 self,
1524 1531 nodes,
1525 1532 nodesorder=None,
1526 1533 revisiondata=False,
1527 1534 assumehaveparentrevisions=False,
1528 1535 ):
1529 1536 """Produce ``irevisiondelta`` describing revisions.
1530 1537
1531 1538 See the documentation for ``ifiledata`` for more.
1532 1539 """
1533 1540
1534 1541 @abc.abstractmethod
1535 1542 def addgroup(
1536 1543 self,
1537 1544 deltas,
1538 1545 linkmapper,
1539 1546 transaction,
1540 1547 addrevisioncb=None,
1541 1548 duplicaterevisioncb=None,
1542 1549 ):
1543 1550 """Process a series of deltas for storage.
1544 1551
1545 1552 See the documentation in ``ifilemutation`` for more.
1546 1553 """
1547 1554
1548 1555 @abc.abstractmethod
1549 1556 def rawsize(self, rev):
1550 1557 """Obtain the size of tracked data.
1551 1558
1552 1559 Is equivalent to ``len(m.rawdata(node))``.
1553 1560
1554 1561 TODO this method is only used by upgrade code and may be removed.
1555 1562 """
1556 1563
1557 1564 @abc.abstractmethod
1558 1565 def getstrippoint(self, minlink):
1559 1566 """Find minimum revision that must be stripped to strip a linkrev.
1560 1567
1561 1568 See the documentation in ``ifilemutation`` for more.
1562 1569 """
1563 1570
1564 1571 @abc.abstractmethod
1565 1572 def strip(self, minlink, transaction):
1566 1573 """Remove storage of items starting at a linkrev.
1567 1574
1568 1575 See the documentation in ``ifilemutation`` for more.
1569 1576 """
1570 1577
1571 1578 @abc.abstractmethod
1572 1579 def checksize(self):
1573 1580 """Obtain the expected sizes of backing files.
1574 1581
1575 1582 TODO this is used by verify and it should not be part of the interface.
1576 1583 """
1577 1584
1578 1585 @abc.abstractmethod
1579 1586 def files(self):
1580 1587 """Obtain paths that are backing storage for this manifest.
1581 1588
1582 1589 TODO this is used by verify and there should probably be a better API
1583 1590 for this functionality.
1584 1591 """
1585 1592
1586 1593 @abc.abstractmethod
1587 1594 def deltaparent(self, rev):
1588 1595 """Obtain the revision that a revision is delta'd against.
1589 1596
1590 1597 TODO delta encoding is an implementation detail of storage and should
1591 1598 not be exposed to the storage interface.
1592 1599 """
1593 1600
1594 1601 @abc.abstractmethod
1595 1602 def clone(self, tr, dest, **kwargs):
1596 1603 """Clone this instance to another."""
1597 1604
1598 1605 @abc.abstractmethod
1599 1606 def clearcaches(self, clear_persisted_data=False):
1600 1607 """Clear any caches associated with this instance."""
1601 1608
1602 1609 @abc.abstractmethod
1603 1610 def dirlog(self, d):
1604 1611 """Obtain a manifest storage instance for a tree."""
1605 1612
1606 1613 @abc.abstractmethod
1607 1614 def add(
1608 1615 self,
1609 1616 m,
1610 1617 transaction,
1611 1618 link,
1612 1619 p1,
1613 1620 p2,
1614 1621 added,
1615 1622 removed,
1616 1623 readtree=None,
1617 1624 match=None,
1618 1625 ):
1619 1626 """Add a revision to storage.
1620 1627
1621 1628 ``m`` is an object conforming to ``imanifestdict``.
1622 1629
1623 1630 ``link`` is the linkrev revision number.
1624 1631
1625 1632 ``p1`` and ``p2`` are the parent revision numbers.
1626 1633
1627 1634 ``added`` and ``removed`` are iterables of added and removed paths,
1628 1635 respectively.
1629 1636
1630 1637 ``readtree`` is a function that can be used to read the child tree(s)
1631 1638 when recursively writing the full tree structure when using
1632 1639 treemanifets.
1633 1640
1634 1641 ``match`` is a matcher that can be used to hint to storage that not all
1635 1642 paths must be inspected; this is an optimization and can be safely
1636 1643 ignored. Note that the storage must still be able to reproduce a full
1637 1644 manifest including files that did not match.
1638 1645 """
1639 1646
1640 1647 @abc.abstractmethod
1641 1648 def storageinfo(
1642 1649 self,
1643 1650 exclusivefiles=False,
1644 1651 sharedfiles=False,
1645 1652 revisionscount=False,
1646 1653 trackedsize=False,
1647 1654 storedsize=False,
1648 1655 ):
1649 1656 """Obtain information about storage for this manifest's data.
1650 1657
1651 1658 See ``ifilestorage.storageinfo()`` for a description of this method.
1652 1659 This one behaves the same way, except for manifest data.
1653 1660 """
1654 1661
1655 1662 @abc.abstractmethod
1656 1663 def get_revlog(self):
1657 1664 """return an actual revlog instance if any
1658 1665
1659 1666 This exist because a lot of code leverage the fact the underlying
1660 1667 storage is a revlog for optimization, so giving simple way to access
1661 1668 the revlog instance helps such code.
1662 1669 """
1663 1670
1664 1671
1665 1672 class imanifestlog(Protocol):
1666 1673 """Interface representing a collection of manifest snapshots.
1667 1674
1668 1675 Represents the root manifest in a repository.
1669 1676
1670 1677 Also serves as a means to access nested tree manifests and to cache
1671 1678 tree manifests.
1672 1679 """
1673 1680
1674 1681 nodeconstants: NodeConstants
1675 1682 """nodeconstants used by the current repository."""
1676 1683
1677 1684 narrowed: bool
1678 1685 """True, is the manifest is narrowed by a matcher"""
1679 1686
1680 1687 @abc.abstractmethod
1681 1688 def __getitem__(self, node):
1682 1689 """Obtain a manifest instance for a given binary node.
1683 1690
1684 1691 Equivalent to calling ``self.get('', node)``.
1685 1692
1686 1693 The returned object conforms to the ``imanifestrevisionstored``
1687 1694 interface.
1688 1695 """
1689 1696
1690 1697 @abc.abstractmethod
1691 1698 def get(self, tree, node, verify=True):
1692 1699 """Retrieve the manifest instance for a given directory and binary node.
1693 1700
1694 1701 ``node`` always refers to the node of the root manifest (which will be
1695 1702 the only manifest if flat manifests are being used).
1696 1703
1697 1704 If ``tree`` is the empty string, the root manifest is returned.
1698 1705 Otherwise the manifest for the specified directory will be returned
1699 1706 (requires tree manifests).
1700 1707
1701 1708 If ``verify`` is True, ``LookupError`` is raised if the node is not
1702 1709 known.
1703 1710
1704 1711 The returned object conforms to the ``imanifestrevisionstored``
1705 1712 interface.
1706 1713 """
1707 1714
1708 1715 @abc.abstractmethod
1709 1716 def getstorage(self, tree):
1710 1717 """Retrieve an interface to storage for a particular tree.
1711 1718
1712 1719 If ``tree`` is the empty bytestring, storage for the root manifest will
1713 1720 be returned. Otherwise storage for a tree manifest is returned.
1714 1721
1715 1722 TODO formalize interface for returned object.
1716 1723 """
1717 1724
1718 1725 @abc.abstractmethod
1719 1726 def clearcaches(self, clear_persisted_data: bool = False) -> None:
1720 1727 """Clear caches associated with this collection."""
1721 1728
1722 1729 @abc.abstractmethod
1723 1730 def rev(self, node):
1724 1731 """Obtain the revision number for a binary node.
1725 1732
1726 1733 Raises ``error.LookupError`` if the node is not known.
1727 1734 """
1728 1735
1729 1736 @abc.abstractmethod
1730 1737 def update_caches(self, transaction):
1731 1738 """update whatever cache are relevant for the used storage."""
1732 1739
1733 1740
1734 1741 class ilocalrepositoryfilestorage(Protocol):
1735 1742 """Local repository sub-interface providing access to tracked file storage.
1736 1743
1737 1744 This interface defines how a repository accesses storage for a single
1738 1745 tracked file path.
1739 1746 """
1740 1747
1741 1748 @abc.abstractmethod
1742 1749 def file(self, f):
1743 1750 """Obtain a filelog for a tracked path.
1744 1751
1745 1752 The returned type conforms to the ``ifilestorage`` interface.
1746 1753 """
1747 1754
1748 1755
1749 1756 class ilocalrepositorymain(Protocol):
1750 1757 """Main interface for local repositories.
1751 1758
1752 1759 This currently captures the reality of things - not how things should be.
1753 1760 """
1754 1761
1755 1762 nodeconstants: NodeConstants
1756 1763 """Constant nodes matching the hash function used by the repository."""
1757 1764
1758 1765 nullid: bytes
1759 1766 """null revision for the hash function used by the repository."""
1760 1767
1761 1768 supported: set[bytes]
1762 1769 """Set of requirements that this repo is capable of opening."""
1763 1770
1764 1771 requirements: set[bytes]
1765 1772 """Set of requirements this repo uses."""
1766 1773
1767 1774 features: set[bytes]
1768 1775 """Set of "features" this repository supports.
1769 1776
1770 1777 A "feature" is a loosely-defined term. It can refer to a feature
1771 1778 in the classical sense or can describe an implementation detail
1772 1779 of the repository. For example, a ``readonly`` feature may denote
1773 1780 the repository as read-only. Or a ``revlogfilestore`` feature may
1774 1781 denote that the repository is using revlogs for file storage.
1775 1782
1776 1783 The intent of features is to provide a machine-queryable mechanism
1777 1784 for repo consumers to test for various repository characteristics.
1778 1785
1779 1786 Features are similar to ``requirements``. The main difference is that
1780 1787 requirements are stored on-disk and represent requirements to open the
1781 1788 repository. Features are more run-time capabilities of the repository
1782 1789 and more granular capabilities (which may be derived from requirements).
1783 1790 """
1784 1791
1785 1792 filtername: bytes
1786 1793 """Name of the repoview that is active on this repo."""
1787 1794
1788 1795 vfs_map: Mapping[bytes, Vfs]
1789 1796 """a bytes-key β†’ vfs mapping used by transaction and others"""
1790 1797
1791 1798 wvfs: Vfs
1792 1799 """VFS used to access the working directory."""
1793 1800
1794 1801 vfs: Vfs
1795 1802 """VFS rooted at the .hg directory.
1796 1803
1797 1804 Used to access repository data not in the store.
1798 1805 """
1799 1806
1800 1807 svfs: Vfs
1801 1808 """VFS rooted at the store.
1802 1809
1803 1810 Used to access repository data in the store. Typically .hg/store.
1804 1811 But can point elsewhere if the store is shared.
1805 1812 """
1806 1813
1807 1814 root: bytes
1808 1815 """Path to the root of the working directory."""
1809 1816
1810 1817 path: bytes
1811 1818 """Path to the .hg directory."""
1812 1819
1813 1820 origroot: bytes
1814 1821 """The filesystem path that was used to construct the repo."""
1815 1822
1816 1823 auditor: Any
1817 1824 """A pathauditor for the working directory.
1818 1825
1819 1826 This checks if a path refers to a nested repository.
1820 1827
1821 1828 Operates on the filesystem.
1822 1829 """
1823 1830
1824 1831 nofsauditor: Any # TODO: add type hints
1825 1832 """A pathauditor for the working directory.
1826 1833
1827 1834 This is like ``auditor`` except it doesn't do filesystem checks.
1828 1835 """
1829 1836
1830 1837 baseui: Ui
1831 1838 """Original ui instance passed into constructor."""
1832 1839
1833 1840 ui: Ui
1834 1841 """Main ui instance for this instance."""
1835 1842
1836 1843 sharedpath: bytes
1837 1844 """Path to the .hg directory of the repo this repo was shared from."""
1838 1845
1839 1846 store: Any # TODO: add type hints
1840 1847 """A store instance."""
1841 1848
1842 1849 spath: bytes
1843 1850 """Path to the store."""
1844 1851
1845 1852 sjoin: Callable # TODO: add type hints
1846 1853 """Alias to self.store.join."""
1847 1854
1848 1855 cachevfs: Vfs
1849 1856 """A VFS used to access the cache directory.
1850 1857
1851 1858 Typically .hg/cache.
1852 1859 """
1853 1860
1854 1861 wcachevfs: Vfs
1855 1862 """A VFS used to access the cache directory dedicated to working copy
1856 1863
1857 1864 Typically .hg/wcache.
1858 1865 """
1859 1866
1860 1867 filteredrevcache: Any # TODO: add type hints
1861 1868 """Holds sets of revisions to be filtered."""
1862 1869
1863 1870 names: Any # TODO: add type hints
1864 1871 """A ``namespaces`` instance."""
1865 1872
1866 1873 filecopiesmode: Any # TODO: add type hints
1867 1874 """The way files copies should be dealt with in this repo."""
1868 1875
1869 1876 @abc.abstractmethod
1870 1877 def close(self):
1871 1878 """Close the handle on this repository."""
1872 1879
1873 1880 @abc.abstractmethod
1874 1881 def peer(self, path=None):
1875 1882 """Obtain an object conforming to the ``peer`` interface."""
1876 1883
1877 1884 @abc.abstractmethod
1878 1885 def unfiltered(self):
1879 1886 """Obtain an unfiltered/raw view of this repo."""
1880 1887
1881 1888 @abc.abstractmethod
1882 1889 def filtered(self, name, visibilityexceptions=None):
1883 1890 """Obtain a named view of this repository."""
1884 1891
1885 1892 obsstore: Any # TODO: add type hints
1886 1893 """A store of obsolescence data."""
1887 1894
1888 1895 changelog: Any # TODO: add type hints
1889 1896 """A handle on the changelog revlog."""
1890 1897
1891 1898 manifestlog: imanifestlog
1892 1899 """An instance conforming to the ``imanifestlog`` interface.
1893 1900
1894 1901 Provides access to manifests for the repository.
1895 1902 """
1896 1903
1897 1904 dirstate: intdirstate.idirstate
1898 1905 """Working directory state."""
1899 1906
1900 1907 narrowpats: Any # TODO: add type hints
1901 1908 """Matcher patterns for this repository's narrowspec."""
1902 1909
1903 1910 @abc.abstractmethod
1904 1911 def narrowmatch(self, match=None, includeexact=False):
1905 1912 """Obtain a matcher for the narrowspec."""
1906 1913
1907 1914 @abc.abstractmethod
1908 1915 def setnarrowpats(self, newincludes, newexcludes):
1909 1916 """Define the narrowspec for this repository."""
1910 1917
1911 1918 @abc.abstractmethod
1912 1919 def __getitem__(self, changeid):
1913 1920 """Try to resolve a changectx."""
1914 1921
1915 1922 @abc.abstractmethod
1916 1923 def __contains__(self, changeid):
1917 1924 """Whether a changeset exists."""
1918 1925
1919 1926 @abc.abstractmethod
1920 1927 def __nonzero__(self):
1921 1928 """Always returns True."""
1922 1929 return True
1923 1930
1924 1931 __bool__ = __nonzero__
1925 1932
1926 1933 @abc.abstractmethod
1927 1934 def __len__(self):
1928 1935 """Returns the number of changesets in the repo."""
1929 1936
1930 1937 @abc.abstractmethod
1931 1938 def __iter__(self):
1932 1939 """Iterate over revisions in the changelog."""
1933 1940
1934 1941 @abc.abstractmethod
1935 1942 def revs(self, expr, *args):
1936 1943 """Evaluate a revset.
1937 1944
1938 1945 Emits revisions.
1939 1946 """
1940 1947
1941 1948 @abc.abstractmethod
1942 1949 def set(self, expr, *args):
1943 1950 """Evaluate a revset.
1944 1951
1945 1952 Emits changectx instances.
1946 1953 """
1947 1954
1948 1955 @abc.abstractmethod
1949 1956 def anyrevs(self, specs, user=False, localalias=None):
1950 1957 """Find revisions matching one of the given revsets."""
1951 1958
1952 1959 @abc.abstractmethod
1953 1960 def url(self):
1954 1961 """Returns a string representing the location of this repo."""
1955 1962
1956 1963 @abc.abstractmethod
1957 1964 def hook(self, name, throw=False, **args):
1958 1965 """Call a hook."""
1959 1966
1960 1967 @abc.abstractmethod
1961 1968 def tags(self):
1962 1969 """Return a mapping of tag to node."""
1963 1970
1964 1971 @abc.abstractmethod
1965 1972 def tagtype(self, tagname):
1966 1973 """Return the type of a given tag."""
1967 1974
1968 1975 @abc.abstractmethod
1969 1976 def tagslist(self):
1970 1977 """Return a list of tags ordered by revision."""
1971 1978
1972 1979 @abc.abstractmethod
1973 1980 def nodetags(self, node):
1974 1981 """Return the tags associated with a node."""
1975 1982
1976 1983 @abc.abstractmethod
1977 1984 def nodebookmarks(self, node):
1978 1985 """Return the list of bookmarks pointing to the specified node."""
1979 1986
1980 1987 @abc.abstractmethod
1981 1988 def branchmap(self):
1982 1989 """Return a mapping of branch to heads in that branch."""
1983 1990
1984 1991 @abc.abstractmethod
1985 1992 def revbranchcache(self):
1986 1993 pass
1987 1994
1988 1995 @abc.abstractmethod
1989 1996 def register_changeset(self, rev, changelogrevision):
1990 1997 """Extension point for caches for new nodes.
1991 1998
1992 1999 Multiple consumers are expected to need parts of the changelogrevision,
1993 2000 so it is provided as optimization to avoid duplicate lookups. A simple
1994 2001 cache would be fragile when other revisions are accessed, too."""
1995 2002 pass
1996 2003
1997 2004 @abc.abstractmethod
1998 2005 def branchtip(self, branchtip, ignoremissing=False):
1999 2006 """Return the tip node for a given branch."""
2000 2007
2001 2008 @abc.abstractmethod
2002 2009 def lookup(self, key):
2003 2010 """Resolve the node for a revision."""
2004 2011
2005 2012 @abc.abstractmethod
2006 2013 def lookupbranch(self, key):
2007 2014 """Look up the branch name of the given revision or branch name."""
2008 2015
2009 2016 @abc.abstractmethod
2010 2017 def known(self, nodes):
2011 2018 """Determine whether a series of nodes is known.
2012 2019
2013 2020 Returns a list of bools.
2014 2021 """
2015 2022
2016 2023 @abc.abstractmethod
2017 2024 def local(self):
2018 2025 """Whether the repository is local."""
2019 2026 return True
2020 2027
2021 2028 @abc.abstractmethod
2022 2029 def publishing(self):
2023 2030 """Whether the repository is a publishing repository."""
2024 2031
2025 2032 @abc.abstractmethod
2026 2033 def cancopy(self):
2027 2034 pass
2028 2035
2029 2036 @abc.abstractmethod
2030 2037 def shared(self):
2031 2038 """The type of shared repository or None."""
2032 2039
2033 2040 @abc.abstractmethod
2034 2041 def wjoin(self, f, *insidef):
2035 2042 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
2036 2043
2037 2044 @abc.abstractmethod
2038 2045 def setparents(self, p1, p2):
2039 2046 """Set the parent nodes of the working directory."""
2040 2047
2041 2048 @abc.abstractmethod
2042 2049 def filectx(self, path, changeid=None, fileid=None):
2043 2050 """Obtain a filectx for the given file revision."""
2044 2051
2045 2052 @abc.abstractmethod
2046 2053 def getcwd(self):
2047 2054 """Obtain the current working directory from the dirstate."""
2048 2055
2049 2056 @abc.abstractmethod
2050 2057 def pathto(self, f, cwd=None):
2051 2058 """Obtain the relative path to a file."""
2052 2059
2053 2060 @abc.abstractmethod
2054 2061 def adddatafilter(self, name, fltr):
2055 2062 pass
2056 2063
2057 2064 @abc.abstractmethod
2058 2065 def wread(self, filename):
2059 2066 """Read a file from wvfs, using data filters."""
2060 2067
2061 2068 @abc.abstractmethod
2062 2069 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2063 2070 """Write data to a file in the wvfs, using data filters."""
2064 2071
2065 2072 @abc.abstractmethod
2066 2073 def wwritedata(self, filename, data):
2067 2074 """Resolve data for writing to the wvfs, using data filters."""
2068 2075
2069 2076 @abc.abstractmethod
2070 2077 def currenttransaction(self):
2071 2078 """Obtain the current transaction instance or None."""
2072 2079
2073 2080 @abc.abstractmethod
2074 2081 def transaction(self, desc, report=None):
2075 2082 """Open a new transaction to write to the repository."""
2076 2083
2077 2084 @abc.abstractmethod
2078 2085 def undofiles(self):
2079 2086 """Returns a list of (vfs, path) for files to undo transactions."""
2080 2087
2081 2088 @abc.abstractmethod
2082 2089 def recover(self):
2083 2090 """Roll back an interrupted transaction."""
2084 2091
2085 2092 @abc.abstractmethod
2086 2093 def rollback(self, dryrun=False, force=False):
2087 2094 """Undo the last transaction.
2088 2095
2089 2096 DANGEROUS.
2090 2097 """
2091 2098
2092 2099 @abc.abstractmethod
2093 2100 def updatecaches(self, tr=None, full=False, caches=None):
2094 2101 """Warm repo caches."""
2095 2102
2096 2103 @abc.abstractmethod
2097 2104 def invalidatecaches(self):
2098 2105 """Invalidate cached data due to the repository mutating."""
2099 2106
2100 2107 @abc.abstractmethod
2101 2108 def invalidatevolatilesets(self):
2102 2109 pass
2103 2110
2104 2111 @abc.abstractmethod
2105 2112 def invalidatedirstate(self):
2106 2113 """Invalidate the dirstate."""
2107 2114
2108 2115 @abc.abstractmethod
2109 2116 def invalidate(self, clearfilecache=False):
2110 2117 pass
2111 2118
2112 2119 @abc.abstractmethod
2113 2120 def invalidateall(self):
2114 2121 pass
2115 2122
2116 2123 @abc.abstractmethod
2117 2124 def lock(self, wait=True):
2118 2125 """Lock the repository store and return a lock instance."""
2119 2126
2120 2127 @abc.abstractmethod
2121 2128 def currentlock(self):
2122 2129 """Return the lock if it's held or None."""
2123 2130
2124 2131 @abc.abstractmethod
2125 2132 def wlock(self, wait=True):
2126 2133 """Lock the non-store parts of the repository."""
2127 2134
2128 2135 @abc.abstractmethod
2129 2136 def currentwlock(self):
2130 2137 """Return the wlock if it's held or None."""
2131 2138
2132 2139 @abc.abstractmethod
2133 2140 def checkcommitpatterns(self, wctx, match, status, fail):
2134 2141 pass
2135 2142
2136 2143 @abc.abstractmethod
2137 2144 def commit(
2138 2145 self,
2139 2146 text=b'',
2140 2147 user=None,
2141 2148 date=None,
2142 2149 match=None,
2143 2150 force=False,
2144 2151 editor=False,
2145 2152 extra=None,
2146 2153 ):
2147 2154 """Add a new revision to the repository."""
2148 2155
2149 2156 @abc.abstractmethod
2150 2157 def commitctx(self, ctx, error=False, origctx=None):
2151 2158 """Commit a commitctx instance to the repository."""
2152 2159
2153 2160 @abc.abstractmethod
2154 2161 def destroying(self):
2155 2162 """Inform the repository that nodes are about to be destroyed."""
2156 2163
2157 2164 @abc.abstractmethod
2158 2165 def destroyed(self):
2159 2166 """Inform the repository that nodes have been destroyed."""
2160 2167
2161 2168 @abc.abstractmethod
2162 2169 def status(
2163 2170 self,
2164 2171 node1=b'.',
2165 2172 node2=None,
2166 2173 match=None,
2167 2174 ignored=False,
2168 2175 clean=False,
2169 2176 unknown=False,
2170 2177 listsubrepos=False,
2171 2178 ):
2172 2179 """Convenience method to call repo[x].status()."""
2173 2180
2174 2181 @abc.abstractmethod
2175 2182 def addpostdsstatus(self, ps):
2176 2183 pass
2177 2184
2178 2185 @abc.abstractmethod
2179 2186 def postdsstatus(self):
2180 2187 pass
2181 2188
2182 2189 @abc.abstractmethod
2183 2190 def clearpostdsstatus(self):
2184 2191 pass
2185 2192
2186 2193 @abc.abstractmethod
2187 2194 def heads(self, start=None):
2188 2195 """Obtain list of nodes that are DAG heads."""
2189 2196
2190 2197 @abc.abstractmethod
2191 2198 def branchheads(self, branch=None, start=None, closed=False):
2192 2199 pass
2193 2200
2194 2201 @abc.abstractmethod
2195 2202 def branches(self, nodes):
2196 2203 pass
2197 2204
2198 2205 @abc.abstractmethod
2199 2206 def between(self, pairs):
2200 2207 pass
2201 2208
2202 2209 @abc.abstractmethod
2203 2210 def checkpush(self, pushop):
2204 2211 pass
2205 2212
2206 2213 prepushoutgoinghooks: util.hooks
2207 2214 """util.hooks instance."""
2208 2215
2209 2216 @abc.abstractmethod
2210 2217 def pushkey(self, namespace, key, old, new):
2211 2218 pass
2212 2219
2213 2220 @abc.abstractmethod
2214 2221 def listkeys(self, namespace):
2215 2222 pass
2216 2223
2217 2224 @abc.abstractmethod
2218 2225 def debugwireargs(self, one, two, three=None, four=None, five=None):
2219 2226 pass
2220 2227
2221 2228 @abc.abstractmethod
2222 2229 def savecommitmessage(self, text):
2223 2230 pass
2224 2231
2225 2232 @abc.abstractmethod
2226 2233 def register_sidedata_computer(
2227 2234 self, kind, category, keys, computer, flags, replace=False
2228 2235 ):
2229 2236 pass
2230 2237
2231 2238 @abc.abstractmethod
2232 2239 def register_wanted_sidedata(self, category):
2233 2240 pass
2234 2241
2235 2242
2236 2243 class completelocalrepository(
2237 2244 ilocalrepositorymain,
2238 2245 ilocalrepositoryfilestorage,
2239 2246 Protocol,
2240 2247 ):
2241 2248 """Complete interface for a local repository."""
2242 2249
2243 2250
2244 2251 class iwireprotocolcommandcacher(Protocol):
2245 2252 """Represents a caching backend for wire protocol commands.
2246 2253
2247 2254 Wire protocol version 2 supports transparent caching of many commands.
2248 2255 To leverage this caching, servers can activate objects that cache
2249 2256 command responses. Objects handle both cache writing and reading.
2250 2257 This interface defines how that response caching mechanism works.
2251 2258
2252 2259 Wire protocol version 2 commands emit a series of objects that are
2253 2260 serialized and sent to the client. The caching layer exists between
2254 2261 the invocation of the command function and the sending of its output
2255 2262 objects to an output layer.
2256 2263
2257 2264 Instances of this interface represent a binding to a cache that
2258 2265 can serve a response (in place of calling a command function) and/or
2259 2266 write responses to a cache for subsequent use.
2260 2267
2261 2268 When a command request arrives, the following happens with regards
2262 2269 to this interface:
2263 2270
2264 2271 1. The server determines whether the command request is cacheable.
2265 2272 2. If it is, an instance of this interface is spawned.
2266 2273 3. The cacher is activated in a context manager (``__enter__`` is called).
2267 2274 4. A cache *key* for that request is derived. This will call the
2268 2275 instance's ``adjustcachekeystate()`` method so the derivation
2269 2276 can be influenced.
2270 2277 5. The cacher is informed of the derived cache key via a call to
2271 2278 ``setcachekey()``.
2272 2279 6. The cacher's ``lookup()`` method is called to test for presence of
2273 2280 the derived key in the cache.
2274 2281 7. If ``lookup()`` returns a hit, that cached result is used in place
2275 2282 of invoking the command function. ``__exit__`` is called and the instance
2276 2283 is discarded.
2277 2284 8. The command function is invoked.
2278 2285 9. ``onobject()`` is called for each object emitted by the command
2279 2286 function.
2280 2287 10. After the final object is seen, ``onfinished()`` is called.
2281 2288 11. ``__exit__`` is called to signal the end of use of the instance.
2282 2289
2283 2290 Cache *key* derivation can be influenced by the instance.
2284 2291
2285 2292 Cache keys are initially derived by a deterministic representation of
2286 2293 the command request. This includes the command name, arguments, protocol
2287 2294 version, etc. This initial key derivation is performed by CBOR-encoding a
2288 2295 data structure and feeding that output into a hasher.
2289 2296
2290 2297 Instances of this interface can influence this initial key derivation
2291 2298 via ``adjustcachekeystate()``.
2292 2299
2293 2300 The instance is informed of the derived cache key via a call to
2294 2301 ``setcachekey()``. The instance must store the key locally so it can
2295 2302 be consulted on subsequent operations that may require it.
2296 2303
2297 2304 When constructed, the instance has access to a callable that can be used
2298 2305 for encoding response objects. This callable receives as its single
2299 2306 argument an object emitted by a command function. It returns an iterable
2300 2307 of bytes chunks representing the encoded object. Unless the cacher is
2301 2308 caching native Python objects in memory or has a way of reconstructing
2302 2309 the original Python objects, implementations typically call this function
2303 2310 to produce bytes from the output objects and then store those bytes in
2304 2311 the cache. When it comes time to re-emit those bytes, they are wrapped
2305 2312 in a ``wireprototypes.encodedresponse`` instance to tell the output
2306 2313 layer that they are pre-encoded.
2307 2314
2308 2315 When receiving the objects emitted by the command function, instances
2309 2316 can choose what to do with those objects. The simplest thing to do is
2310 2317 re-emit the original objects. They will be forwarded to the output
2311 2318 layer and will be processed as if the cacher did not exist.
2312 2319
2313 2320 Implementations could also choose to not emit objects - instead locally
2314 2321 buffering objects or their encoded representation. They could then emit
2315 2322 a single "coalesced" object when ``onfinished()`` is called. In
2316 2323 this way, the implementation would function as a filtering layer of
2317 2324 sorts.
2318 2325
2319 2326 When caching objects, typically the encoded form of the object will
2320 2327 be stored. Keep in mind that if the original object is forwarded to
2321 2328 the output layer, it will need to be encoded there as well. For large
2322 2329 output, this redundant encoding could add overhead. Implementations
2323 2330 could wrap the encoded object data in ``wireprototypes.encodedresponse``
2324 2331 instances to avoid this overhead.
2325 2332 """
2326 2333
2327 2334 @abc.abstractmethod
2328 2335 def __enter__(self):
2329 2336 """Marks the instance as active.
2330 2337
2331 2338 Should return self.
2332 2339 """
2333 2340
2334 2341 @abc.abstractmethod
2335 2342 def __exit__(self, exctype, excvalue, exctb):
2336 2343 """Called when cacher is no longer used.
2337 2344
2338 2345 This can be used by implementations to perform cleanup actions (e.g.
2339 2346 disconnecting network sockets, aborting a partially cached response.
2340 2347 """
2341 2348
2342 2349 @abc.abstractmethod
2343 2350 def adjustcachekeystate(self, state):
2344 2351 """Influences cache key derivation by adjusting state to derive key.
2345 2352
2346 2353 A dict defining the state used to derive the cache key is passed.
2347 2354
2348 2355 Implementations can modify this dict to record additional state that
2349 2356 is wanted to influence key derivation.
2350 2357
2351 2358 Implementations are *highly* encouraged to not modify or delete
2352 2359 existing keys.
2353 2360 """
2354 2361
2355 2362 @abc.abstractmethod
2356 2363 def setcachekey(self, key):
2357 2364 """Record the derived cache key for this request.
2358 2365
2359 2366 Instances may mutate the key for internal usage, as desired. e.g.
2360 2367 instances may wish to prepend the repo name, introduce path
2361 2368 components for filesystem or URL addressing, etc. Behavior is up to
2362 2369 the cache.
2363 2370
2364 2371 Returns a bool indicating if the request is cacheable by this
2365 2372 instance.
2366 2373 """
2367 2374
2368 2375 @abc.abstractmethod
2369 2376 def lookup(self):
2370 2377 """Attempt to resolve an entry in the cache.
2371 2378
2372 2379 The instance is instructed to look for the cache key that it was
2373 2380 informed about via the call to ``setcachekey()``.
2374 2381
2375 2382 If there's no cache hit or the cacher doesn't wish to use the cached
2376 2383 entry, ``None`` should be returned.
2377 2384
2378 2385 Else, a dict defining the cached result should be returned. The
2379 2386 dict may have the following keys:
2380 2387
2381 2388 objs
2382 2389 An iterable of objects that should be sent to the client. That
2383 2390 iterable of objects is expected to be what the command function
2384 2391 would return if invoked or an equivalent representation thereof.
2385 2392 """
2386 2393
2387 2394 @abc.abstractmethod
2388 2395 def onobject(self, obj):
2389 2396 """Called when a new object is emitted from the command function.
2390 2397
2391 2398 Receives as its argument the object that was emitted from the
2392 2399 command function.
2393 2400
2394 2401 This method returns an iterator of objects to forward to the output
2395 2402 layer. The easiest implementation is a generator that just
2396 2403 ``yield obj``.
2397 2404 """
2398 2405
2399 2406 @abc.abstractmethod
2400 2407 def onfinished(self):
2401 2408 """Called after all objects have been emitted from the command function.
2402 2409
2403 2410 Implementations should return an iterator of objects to forward to
2404 2411 the output layer.
2405 2412
2406 2413 This method can be a generator.
2407 2414 """
General Comments 0
You need to be logged in to leave comments. Login now