##// END OF EJS Templates
interfaces: make the `peer` mixin not a Protocol to fix Python 3.10 failures...
Matt Harbison -
r53403:199b0e62 default
parent child Browse files
Show More
@@ -1,2406 +1,2407
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import annotations
10 10
11 11 import abc
12 12 import typing
13 13
14 14 from typing import (
15 15 Any,
16 16 Callable,
17 17 Collection,
18 18 Iterable,
19 19 Iterator,
20 20 Mapping,
21 21 Protocol,
22 22 Set,
23 23 )
24 24
25 25 from ..i18n import _
26 26 from .. import error
27 27
28 28 if typing.TYPE_CHECKING:
29 29 from typing import (
30 30 ByteString, # TODO: change to Buffer for 3.14
31 31 )
32 32
33 33 # Almost all mercurial modules are only imported in the type checking phase
34 34 # to avoid circular imports
35 35 from .. import (
36 36 match as matchmod,
37 37 pathutil,
38 38 util,
39 39 )
40 40 from ..utils import (
41 41 urlutil,
42 42 )
43 43
44 44 from . import dirstate as intdirstate
45 45
46 46 # TODO: make a protocol class for this
47 47 NodeConstants = Any
48 48
49 49 # TODO: create a Protocol class, since importing uimod here causes a cycle
50 50 # that confuses pytype.
51 51 Ui = Any
52 52
53 53 # TODO: make a protocol class for this
54 54 Vfs = Any
55 55
56 56 # Local repository feature string.
57 57
58 58 # Revlogs are being used for file storage.
59 59 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
60 60 # The storage part of the repository is shared from an external source.
61 61 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
62 62 # LFS supported for backing file storage.
63 63 REPO_FEATURE_LFS = b'lfs'
64 64 # Repository supports being stream cloned.
65 65 REPO_FEATURE_STREAM_CLONE = b'streamclone'
66 66 # Repository supports (at least) some sidedata to be stored
67 67 REPO_FEATURE_SIDE_DATA = b'side-data'
68 68 # Files storage may lack data for all ancestors.
69 69 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
70 70
71 71 REVISION_FLAG_CENSORED = 1 << 15
72 72 REVISION_FLAG_ELLIPSIS = 1 << 14
73 73 REVISION_FLAG_EXTSTORED = 1 << 13
74 74 REVISION_FLAG_HASCOPIESINFO = 1 << 12
75 75
76 76 REVISION_FLAGS_KNOWN = (
77 77 REVISION_FLAG_CENSORED
78 78 | REVISION_FLAG_ELLIPSIS
79 79 | REVISION_FLAG_EXTSTORED
80 80 | REVISION_FLAG_HASCOPIESINFO
81 81 )
82 82
83 83 CG_DELTAMODE_STD = b'default'
84 84 CG_DELTAMODE_PREV = b'previous'
85 85 CG_DELTAMODE_FULL = b'fulltext'
86 86 CG_DELTAMODE_P1 = b'p1'
87 87
88 88
89 89 ## Cache related constants:
90 90 #
91 91 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
92 92
93 93 # Warm branchmaps of all known repoview's filter-level
94 94 CACHE_BRANCHMAP_ALL = b"branchmap-all"
95 95 # Warm branchmaps of repoview's filter-level used by server
96 96 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
97 97 # Warm internal changelog cache (eg: persistent nodemap)
98 98 CACHE_CHANGELOG_CACHE = b"changelog-cache"
99 99 # check of a branchmap can use the "pure topo" mode
100 100 CACHE_BRANCHMAP_DETECT_PURE_TOPO = b"branchmap-detect-pure-topo"
101 101 # Warm full manifest cache
102 102 CACHE_FULL_MANIFEST = b"full-manifest"
103 103 # Warm file-node-tags cache
104 104 CACHE_FILE_NODE_TAGS = b"file-node-tags"
105 105 # Warm internal manifestlog cache (eg: persistent nodemap)
106 106 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
107 107 # Warn rev branch cache
108 108 CACHE_REV_BRANCH = b"rev-branch-cache"
109 109 # Warm tags' cache for default repoview'
110 110 CACHE_TAGS_DEFAULT = b"tags-default"
111 111 # Warm tags' cache for repoview's filter-level used by server
112 112 CACHE_TAGS_SERVED = b"tags-served"
113 113
114 114 # the cache to warm by default after a simple transaction
115 115 # (this is a mutable set to let extension update it)
116 116 CACHES_DEFAULT = {
117 117 CACHE_BRANCHMAP_SERVED,
118 118 }
119 119
120 120 # the caches to warm when warming all of them
121 121 # (this is a mutable set to let extension update it)
122 122 CACHES_ALL = {
123 123 CACHE_BRANCHMAP_SERVED,
124 124 CACHE_BRANCHMAP_ALL,
125 125 CACHE_BRANCHMAP_DETECT_PURE_TOPO,
126 126 CACHE_REV_BRANCH,
127 127 CACHE_CHANGELOG_CACHE,
128 128 CACHE_FILE_NODE_TAGS,
129 129 CACHE_FULL_MANIFEST,
130 130 CACHE_MANIFESTLOG_CACHE,
131 131 CACHE_TAGS_DEFAULT,
132 132 CACHE_TAGS_SERVED,
133 133 }
134 134
135 135 # the cache to warm by default on simple call
136 136 # (this is a mutable set to let extension update it)
137 137 CACHES_POST_CLONE = CACHES_ALL.copy()
138 138 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
139 139 CACHES_POST_CLONE.discard(CACHE_REV_BRANCH)
140 140
141 141
142 142 class _ipeerconnection(Protocol):
143 143 """Represents a "connection" to a repository.
144 144
145 145 This is the base interface for representing a connection to a repository.
146 146 It holds basic properties and methods applicable to all peer types.
147 147
148 148 This is not a complete interface definition and should not be used
149 149 outside of this module.
150 150 """
151 151
152 152 ui: Ui
153 153 """ui.ui instance"""
154 154
155 155 path: urlutil.path | None
156 156 """a urlutil.path instance or None"""
157 157
158 158 @abc.abstractmethod
159 159 def url(self):
160 160 """Returns a URL string representing this peer.
161 161
162 162 Currently, implementations expose the raw URL used to construct the
163 163 instance. It may contain credentials as part of the URL. The
164 164 expectations of the value aren't well-defined and this could lead to
165 165 data leakage.
166 166
167 167 TODO audit/clean consumers and more clearly define the contents of this
168 168 value.
169 169 """
170 170
171 171 @abc.abstractmethod
172 172 def local(self):
173 173 """Returns a local repository instance.
174 174
175 175 If the peer represents a local repository, returns an object that
176 176 can be used to interface with it. Otherwise returns ``None``.
177 177 """
178 178
179 179 @abc.abstractmethod
180 180 def canpush(self):
181 181 """Returns a boolean indicating if this peer can be pushed to."""
182 182
183 183 @abc.abstractmethod
184 184 def close(self):
185 185 """Close the connection to this peer.
186 186
187 187 This is called when the peer will no longer be used. Resources
188 188 associated with the peer should be cleaned up.
189 189 """
190 190
191 191
192 192 class ipeercapabilities(Protocol):
193 193 """Peer sub-interface related to capabilities."""
194 194
195 195 @abc.abstractmethod
196 196 def capable(self, name):
197 197 """Determine support for a named capability.
198 198
199 199 Returns ``False`` if capability not supported.
200 200
201 201 Returns ``True`` if boolean capability is supported. Returns a string
202 202 if capability support is non-boolean.
203 203
204 204 Capability strings may or may not map to wire protocol capabilities.
205 205 """
206 206
207 207 @abc.abstractmethod
208 208 def requirecap(self, name, purpose):
209 209 """Require a capability to be present.
210 210
211 211 Raises a ``CapabilityError`` if the capability isn't present.
212 212 """
213 213
214 214
215 215 class ipeercommands(Protocol):
216 216 """Client-side interface for communicating over the wire protocol.
217 217
218 218 This interface is used as a gateway to the Mercurial wire protocol.
219 219 methods commonly call wire protocol commands of the same name.
220 220 """
221 221
222 222 @abc.abstractmethod
223 223 def branchmap(self):
224 224 """Obtain heads in named branches.
225 225
226 226 Returns a dict mapping branch name to an iterable of nodes that are
227 227 heads on that branch.
228 228 """
229 229
230 230 @abc.abstractmethod
231 231 def capabilities(self):
232 232 """Obtain capabilities of the peer.
233 233
234 234 Returns a set of string capabilities.
235 235 """
236 236
237 237 @abc.abstractmethod
238 238 def get_cached_bundle_inline(self, path):
239 239 """Retrieve a clonebundle across the wire.
240 240
241 241 Returns a chunkbuffer
242 242 """
243 243
244 244 @abc.abstractmethod
245 245 def clonebundles(self):
246 246 """Obtains the clone bundles manifest for the repo.
247 247
248 248 Returns the manifest as unparsed bytes.
249 249 """
250 250
251 251 @abc.abstractmethod
252 252 def debugwireargs(self, one, two, three=None, four=None, five=None):
253 253 """Used to facilitate debugging of arguments passed over the wire."""
254 254
255 255 @abc.abstractmethod
256 256 def getbundle(self, source, **kwargs):
257 257 """Obtain remote repository data as a bundle.
258 258
259 259 This command is how the bulk of repository data is transferred from
260 260 the peer to the local repository
261 261
262 262 Returns a generator of bundle data.
263 263 """
264 264
265 265 @abc.abstractmethod
266 266 def heads(self):
267 267 """Determine all known head revisions in the peer.
268 268
269 269 Returns an iterable of binary nodes.
270 270 """
271 271
272 272 @abc.abstractmethod
273 273 def known(self, nodes):
274 274 """Determine whether multiple nodes are known.
275 275
276 276 Accepts an iterable of nodes whose presence to check for.
277 277
278 278 Returns an iterable of booleans indicating of the corresponding node
279 279 at that index is known to the peer.
280 280 """
281 281
282 282 @abc.abstractmethod
283 283 def listkeys(self, namespace):
284 284 """Obtain all keys in a pushkey namespace.
285 285
286 286 Returns an iterable of key names.
287 287 """
288 288
289 289 @abc.abstractmethod
290 290 def lookup(self, key):
291 291 """Resolve a value to a known revision.
292 292
293 293 Returns a binary node of the resolved revision on success.
294 294 """
295 295
296 296 @abc.abstractmethod
297 297 def pushkey(self, namespace, key, old, new):
298 298 """Set a value using the ``pushkey`` protocol.
299 299
300 300 Arguments correspond to the pushkey namespace and key to operate on and
301 301 the old and new values for that key.
302 302
303 303 Returns a string with the peer result. The value inside varies by the
304 304 namespace.
305 305 """
306 306
307 307 @abc.abstractmethod
308 308 def stream_out(self):
309 309 """Obtain streaming clone data.
310 310
311 311 Successful result should be a generator of data chunks.
312 312 """
313 313
314 314 @abc.abstractmethod
315 315 def unbundle(self, bundle, heads, url):
316 316 """Transfer repository data to the peer.
317 317
318 318 This is how the bulk of data during a push is transferred.
319 319
320 320 Returns the integer number of heads added to the peer.
321 321 """
322 322
323 323
324 324 class ipeerlegacycommands(Protocol):
325 325 """Interface for implementing support for legacy wire protocol commands.
326 326
327 327 Wire protocol commands transition to legacy status when they are no longer
328 328 used by modern clients. To facilitate identifying which commands are
329 329 legacy, the interfaces are split.
330 330 """
331 331
332 332 @abc.abstractmethod
333 333 def between(self, pairs):
334 334 """Obtain nodes between pairs of nodes.
335 335
336 336 ``pairs`` is an iterable of node pairs.
337 337
338 338 Returns an iterable of iterables of nodes corresponding to each
339 339 requested pair.
340 340 """
341 341
342 342 @abc.abstractmethod
343 343 def branches(self, nodes):
344 344 """Obtain ancestor changesets of specific nodes back to a branch point.
345 345
346 346 For each requested node, the peer finds the first ancestor node that is
347 347 a DAG root or is a merge.
348 348
349 349 Returns an iterable of iterables with the resolved values for each node.
350 350 """
351 351
352 352 @abc.abstractmethod
353 353 def changegroup(self, nodes, source):
354 354 """Obtain a changegroup with data for descendants of specified nodes."""
355 355
356 356 @abc.abstractmethod
357 357 def changegroupsubset(self, bases, heads, source):
358 358 pass
359 359
360 360
361 361 class ipeercommandexecutor(Protocol):
362 362 """Represents a mechanism to execute remote commands.
363 363
364 364 This is the primary interface for requesting that wire protocol commands
365 365 be executed. Instances of this interface are active in a context manager
366 366 and have a well-defined lifetime. When the context manager exits, all
367 367 outstanding requests are waited on.
368 368 """
369 369
370 370 @abc.abstractmethod
371 371 def callcommand(self, name, args):
372 372 """Request that a named command be executed.
373 373
374 374 Receives the command name and a dictionary of command arguments.
375 375
376 376 Returns a ``concurrent.futures.Future`` that will resolve to the
377 377 result of that command request. That exact value is left up to
378 378 the implementation and possibly varies by command.
379 379
380 380 Not all commands can coexist with other commands in an executor
381 381 instance: it depends on the underlying wire protocol transport being
382 382 used and the command itself.
383 383
384 384 Implementations MAY call ``sendcommands()`` automatically if the
385 385 requested command can not coexist with other commands in this executor.
386 386
387 387 Implementations MAY call ``sendcommands()`` automatically when the
388 388 future's ``result()`` is called. So, consumers using multiple
389 389 commands with an executor MUST ensure that ``result()`` is not called
390 390 until all command requests have been issued.
391 391 """
392 392
393 393 @abc.abstractmethod
394 394 def sendcommands(self):
395 395 """Trigger submission of queued command requests.
396 396
397 397 Not all transports submit commands as soon as they are requested to
398 398 run. When called, this method forces queued command requests to be
399 399 issued. It will no-op if all commands have already been sent.
400 400
401 401 When called, no more new commands may be issued with this executor.
402 402 """
403 403
404 404 @abc.abstractmethod
405 405 def close(self):
406 406 """Signal that this command request is finished.
407 407
408 408 When called, no more new commands may be issued. All outstanding
409 409 commands that have previously been issued are waited on before
410 410 returning. This not only includes waiting for the futures to resolve,
411 411 but also waiting for all response data to arrive. In other words,
412 412 calling this waits for all on-wire state for issued command requests
413 413 to finish.
414 414
415 415 When used as a context manager, this method is called when exiting the
416 416 context manager.
417 417
418 418 This method may call ``sendcommands()`` if there are buffered commands.
419 419 """
420 420
421 421
422 422 class ipeerrequests(Protocol):
423 423 """Interface for executing commands on a peer."""
424 424
425 425 limitedarguments: bool
426 426 """True if the peer cannot receive large argument value for commands."""
427 427
428 428 @abc.abstractmethod
429 429 def commandexecutor(self):
430 430 """A context manager that resolves to an ipeercommandexecutor.
431 431
432 432 The object this resolves to can be used to issue command requests
433 433 to the peer.
434 434
435 435 Callers should call its ``callcommand`` method to issue command
436 436 requests.
437 437
438 438 A new executor should be obtained for each distinct set of commands
439 439 (possibly just a single command) that the consumer wants to execute
440 440 as part of a single operation or round trip. This is because some
441 441 peers are half-duplex and/or don't support persistent connections.
442 442 e.g. in the case of HTTP peers, commands sent to an executor represent
443 443 a single HTTP request. While some peers may support multiple command
444 444 sends over the wire per executor, consumers need to code to the least
445 445 capable peer. So it should be assumed that command executors buffer
446 446 called commands until they are told to send them and that each
447 447 command executor could result in a new connection or wire-level request
448 448 being issued.
449 449 """
450 450
451 451
452 class peer(_ipeerconnection, ipeercapabilities, ipeerrequests, Protocol):
452 # TODO: make this a Protocol class when 3.11 is the minimum supported version?
453 class peer(_ipeerconnection, ipeercapabilities, ipeerrequests):
453 454 """Unified interface for peer repositories.
454 455
455 456 All peer instances must conform to this interface.
456 457 """
457 458
458 459 limitedarguments: bool = False
459 460
460 461 def __init__(self, ui, path=None, remotehidden=False):
461 462 self.ui = ui
462 463 self.path = path
463 464
464 465 def capable(self, name):
465 466 # TODO: this class should maybe subclass ipeercommands too, otherwise it
466 467 # is assuming whatever uses this as a mixin also has this interface.
467 468 caps = self.capabilities() # pytype: disable=attribute-error
468 469 if name in caps:
469 470 return True
470 471
471 472 name = b'%s=' % name
472 473 for cap in caps:
473 474 if cap.startswith(name):
474 475 return cap[len(name) :]
475 476
476 477 return False
477 478
478 479 def requirecap(self, name, purpose):
479 480 if self.capable(name):
480 481 return
481 482
482 483 raise error.CapabilityError(
483 484 _(
484 485 b'cannot %s; remote repository does not support the '
485 486 b'\'%s\' capability'
486 487 )
487 488 % (purpose, name)
488 489 )
489 490
490 491
491 492 class iverifyproblem(Protocol):
492 493 """Represents a problem with the integrity of the repository.
493 494
494 495 Instances of this interface are emitted to describe an integrity issue
495 496 with a repository (e.g. corrupt storage, missing data, etc).
496 497
497 498 Instances are essentially messages associated with severity.
498 499 """
499 500
500 501 warning: bytes | None
501 502 """Message indicating a non-fatal problem."""
502 503
503 504 error: bytes | None
504 505 """Message indicating a fatal problem."""
505 506
506 507 node: bytes | None
507 508 """Revision encountering the problem.
508 509
509 510 ``None`` means the problem doesn't apply to a single revision.
510 511 """
511 512
512 513
513 514 class irevisiondelta(Protocol):
514 515 """Represents a delta between one revision and another.
515 516
516 517 Instances convey enough information to allow a revision to be exchanged
517 518 with another repository.
518 519
519 520 Instances represent the fulltext revision data or a delta against
520 521 another revision. Therefore the ``revision`` and ``delta`` attributes
521 522 are mutually exclusive.
522 523
523 524 Typically used for changegroup generation.
524 525 """
525 526
526 527 node: bytes
527 528 """20 byte node of this revision."""
528 529
529 530 p1node: bytes
530 531 """20 byte node of 1st parent of this revision."""
531 532
532 533 p2node: bytes
533 534 """20 byte node of 2nd parent of this revision."""
534 535
535 536 # TODO: is this really optional? revlog.revlogrevisiondelta defaults to None
536 537 linknode: bytes | None
537 538 """20 byte node of the changelog revision this node is linked to."""
538 539
539 540 flags: int
540 541 """2 bytes of integer flags that apply to this revision.
541 542
542 543 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
543 544 """
544 545
545 546 basenode: bytes
546 547 """20 byte node of the revision this data is a delta against.
547 548
548 549 ``nullid`` indicates that the revision is a full revision and not
549 550 a delta.
550 551 """
551 552
552 553 baserevisionsize: int | None
553 554 """Size of base revision this delta is against.
554 555
555 556 May be ``None`` if ``basenode`` is ``nullid``.
556 557 """
557 558
558 559 # TODO: is this really optional? (Seems possible in
559 560 # storageutil.emitrevisions()).
560 561 revision: bytes | None
561 562 """Raw fulltext of revision data for this node."""
562 563
563 564 delta: bytes | None
564 565 """Delta between ``basenode`` and ``node``.
565 566
566 567 Stored in the bdiff delta format.
567 568 """
568 569
569 570 sidedata: bytes | None
570 571 """Raw sidedata bytes for the given revision."""
571 572
572 573 protocol_flags: int
573 574 """Single byte of integer flags that can influence the protocol.
574 575
575 576 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
576 577 """
577 578
578 579
579 580 class ifilerevisionssequence(Protocol):
580 581 """Contains index data for all revisions of a file.
581 582
582 583 Types implementing this behave like lists of tuples. The index
583 584 in the list corresponds to the revision number. The values contain
584 585 index metadata.
585 586
586 587 The *null* revision (revision number -1) is always the last item
587 588 in the index.
588 589 """
589 590
590 591 @abc.abstractmethod
591 592 def __len__(self):
592 593 """The total number of revisions."""
593 594
594 595 @abc.abstractmethod
595 596 def __getitem__(self, rev):
596 597 """Returns the object having a specific revision number.
597 598
598 599 Returns an 8-tuple with the following fields:
599 600
600 601 offset+flags
601 602 Contains the offset and flags for the revision. 64-bit unsigned
602 603 integer where first 6 bytes are the offset and the next 2 bytes
603 604 are flags. The offset can be 0 if it is not used by the store.
604 605 compressed size
605 606 Size of the revision data in the store. It can be 0 if it isn't
606 607 needed by the store.
607 608 uncompressed size
608 609 Fulltext size. It can be 0 if it isn't needed by the store.
609 610 base revision
610 611 Revision number of revision the delta for storage is encoded
611 612 against. -1 indicates not encoded against a base revision.
612 613 link revision
613 614 Revision number of changelog revision this entry is related to.
614 615 p1 revision
615 616 Revision number of 1st parent. -1 if no 1st parent.
616 617 p2 revision
617 618 Revision number of 2nd parent. -1 if no 1st parent.
618 619 node
619 620 Binary node value for this revision number.
620 621
621 622 Negative values should index off the end of the sequence. ``-1``
622 623 should return the null revision. ``-2`` should return the most
623 624 recent revision.
624 625 """
625 626
626 627 @abc.abstractmethod
627 628 def __contains__(self, rev):
628 629 """Whether a revision number exists."""
629 630
630 631 @abc.abstractmethod
631 632 def insert(self, i, entry):
632 633 """Add an item to the index at specific revision."""
633 634
634 635
635 636 class ifileindex(Protocol):
636 637 """Storage interface for index data of a single file.
637 638
638 639 File storage data is divided into index metadata and data storage.
639 640 This interface defines the index portion of the interface.
640 641
641 642 The index logically consists of:
642 643
643 644 * A mapping between revision numbers and nodes.
644 645 * DAG data (storing and querying the relationship between nodes).
645 646 * Metadata to facilitate storage.
646 647 """
647 648
648 649 nullid: bytes
649 650 """node for the null revision for use as delta base."""
650 651
651 652 @abc.abstractmethod
652 653 def __len__(self) -> int:
653 654 """Obtain the number of revisions stored for this file."""
654 655
655 656 @abc.abstractmethod
656 657 def __iter__(self) -> Iterator[int]:
657 658 """Iterate over revision numbers for this file."""
658 659
659 660 @abc.abstractmethod
660 661 def hasnode(self, node):
661 662 """Returns a bool indicating if a node is known to this store.
662 663
663 664 Implementations must only return True for full, binary node values:
664 665 hex nodes, revision numbers, and partial node matches must be
665 666 rejected.
666 667
667 668 The null node is never present.
668 669 """
669 670
670 671 @abc.abstractmethod
671 672 def revs(self, start=0, stop=None):
672 673 """Iterate over revision numbers for this file, with control."""
673 674
674 675 @abc.abstractmethod
675 676 def parents(self, node):
676 677 """Returns a 2-tuple of parent nodes for a revision.
677 678
678 679 Values will be ``nullid`` if the parent is empty.
679 680 """
680 681
681 682 @abc.abstractmethod
682 683 def parentrevs(self, rev):
683 684 """Like parents() but operates on revision numbers."""
684 685
685 686 @abc.abstractmethod
686 687 def rev(self, node):
687 688 """Obtain the revision number given a node.
688 689
689 690 Raises ``error.LookupError`` if the node is not known.
690 691 """
691 692
692 693 @abc.abstractmethod
693 694 def node(self, rev):
694 695 """Obtain the node value given a revision number.
695 696
696 697 Raises ``IndexError`` if the node is not known.
697 698 """
698 699
699 700 @abc.abstractmethod
700 701 def lookup(self, node):
701 702 """Attempt to resolve a value to a node.
702 703
703 704 Value can be a binary node, hex node, revision number, or a string
704 705 that can be converted to an integer.
705 706
706 707 Raises ``error.LookupError`` if a node could not be resolved.
707 708 """
708 709
709 710 @abc.abstractmethod
710 711 def linkrev(self, rev):
711 712 """Obtain the changeset revision number a revision is linked to."""
712 713
713 714 @abc.abstractmethod
714 715 def iscensored(self, rev):
715 716 """Return whether a revision's content has been censored."""
716 717
717 718 @abc.abstractmethod
718 719 def commonancestorsheads(self, node1, node2):
719 720 """Obtain an iterable of nodes containing heads of common ancestors.
720 721
721 722 See ``ancestor.commonancestorsheads()``.
722 723 """
723 724
724 725 @abc.abstractmethod
725 726 def descendants(self, revs):
726 727 """Obtain descendant revision numbers for a set of revision numbers.
727 728
728 729 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
729 730 """
730 731
731 732 @abc.abstractmethod
732 733 def heads(self, start=None, stop=None):
733 734 """Obtain a list of nodes that are DAG heads, with control.
734 735
735 736 The set of revisions examined can be limited by specifying
736 737 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
737 738 iterable of nodes. DAG traversal starts at earlier revision
738 739 ``start`` and iterates forward until any node in ``stop`` is
739 740 encountered.
740 741 """
741 742
742 743 @abc.abstractmethod
743 744 def children(self, node):
744 745 """Obtain nodes that are children of a node.
745 746
746 747 Returns a list of nodes.
747 748 """
748 749
749 750
750 751 class ifiledata(Protocol):
751 752 """Storage interface for data storage of a specific file.
752 753
753 754 This complements ``ifileindex`` and provides an interface for accessing
754 755 data for a tracked file.
755 756 """
756 757
757 758 @abc.abstractmethod
758 759 def size(self, rev):
759 760 """Obtain the fulltext size of file data.
760 761
761 762 Any metadata is excluded from size measurements.
762 763 """
763 764
764 765 @abc.abstractmethod
765 766 def revision(self, node):
766 767 """Obtain fulltext data for a node.
767 768
768 769 By default, any storage transformations are applied before the data
769 770 is returned. If ``raw`` is True, non-raw storage transformations
770 771 are not applied.
771 772
772 773 The fulltext data may contain a header containing metadata. Most
773 774 consumers should use ``read()`` to obtain the actual file data.
774 775 """
775 776
776 777 @abc.abstractmethod
777 778 def rawdata(self, node):
778 779 """Obtain raw data for a node."""
779 780
780 781 @abc.abstractmethod
781 782 def read(self, node):
782 783 """Resolve file fulltext data.
783 784
784 785 This is similar to ``revision()`` except any metadata in the data
785 786 headers is stripped.
786 787 """
787 788
788 789 @abc.abstractmethod
789 790 def renamed(self, node):
790 791 """Obtain copy metadata for a node.
791 792
792 793 Returns ``False`` if no copy metadata is stored or a 2-tuple of
793 794 (path, node) from which this revision was copied.
794 795 """
795 796
796 797 @abc.abstractmethod
797 798 def cmp(self, node, fulltext):
798 799 """Compare fulltext to another revision.
799 800
800 801 Returns True if the fulltext is different from what is stored.
801 802
802 803 This takes copy metadata into account.
803 804
804 805 TODO better document the copy metadata and censoring logic.
805 806 """
806 807
807 808 @abc.abstractmethod
808 809 def emitrevisions(
809 810 self,
810 811 nodes,
811 812 nodesorder=None,
812 813 revisiondata=False,
813 814 assumehaveparentrevisions=False,
814 815 deltamode=CG_DELTAMODE_STD,
815 816 ):
816 817 """Produce ``irevisiondelta`` for revisions.
817 818
818 819 Given an iterable of nodes, emits objects conforming to the
819 820 ``irevisiondelta`` interface that describe revisions in storage.
820 821
821 822 This method is a generator.
822 823
823 824 The input nodes may be unordered. Implementations must ensure that a
824 825 node's parents are emitted before the node itself. Transitively, this
825 826 means that a node may only be emitted once all its ancestors in
826 827 ``nodes`` have also been emitted.
827 828
828 829 By default, emits "index" data (the ``node``, ``p1node``, and
829 830 ``p2node`` attributes). If ``revisiondata`` is set, revision data
830 831 will also be present on the emitted objects.
831 832
832 833 With default argument values, implementations can choose to emit
833 834 either fulltext revision data or a delta. When emitting deltas,
834 835 implementations must consider whether the delta's base revision
835 836 fulltext is available to the receiver.
836 837
837 838 The base revision fulltext is guaranteed to be available if any of
838 839 the following are met:
839 840
840 841 * Its fulltext revision was emitted by this method call.
841 842 * A delta for that revision was emitted by this method call.
842 843 * ``assumehaveparentrevisions`` is True and the base revision is a
843 844 parent of the node.
844 845
845 846 ``nodesorder`` can be used to control the order that revisions are
846 847 emitted. By default, revisions can be reordered as long as they are
847 848 in DAG topological order (see above). If the value is ``nodes``,
848 849 the iteration order from ``nodes`` should be used. If the value is
849 850 ``storage``, then the native order from the backing storage layer
850 851 is used. (Not all storage layers will have strong ordering and behavior
851 852 of this mode is storage-dependent.) ``nodes`` ordering can force
852 853 revisions to be emitted before their ancestors, so consumers should
853 854 use it with care.
854 855
855 856 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
856 857 be set and it is the caller's responsibility to resolve it, if needed.
857 858
858 859 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
859 860 all revision data should be emitted as deltas against the revision
860 861 emitted just prior. The initial revision should be a delta against its
861 862 1st parent.
862 863 """
863 864
864 865
865 866 class ifilemutation(Protocol):
866 867 """Storage interface for mutation events of a tracked file."""
867 868
868 869 @abc.abstractmethod
869 870 def add(self, filedata, meta, transaction, linkrev, p1, p2):
870 871 """Add a new revision to the store.
871 872
872 873 Takes file data, dictionary of metadata, a transaction, linkrev,
873 874 and parent nodes.
874 875
875 876 Returns the node that was added.
876 877
877 878 May no-op if a revision matching the supplied data is already stored.
878 879 """
879 880
880 881 @abc.abstractmethod
881 882 def addrevision(
882 883 self,
883 884 revisiondata,
884 885 transaction,
885 886 linkrev,
886 887 p1,
887 888 p2,
888 889 node=None,
889 890 flags=0,
890 891 cachedelta=None,
891 892 ):
892 893 """Add a new revision to the store and return its number.
893 894
894 895 This is similar to ``add()`` except it operates at a lower level.
895 896
896 897 The data passed in already contains a metadata header, if any.
897 898
898 899 ``node`` and ``flags`` can be used to define the expected node and
899 900 the flags to use with storage. ``flags`` is a bitwise value composed
900 901 of the various ``REVISION_FLAG_*`` constants.
901 902
902 903 ``add()`` is usually called when adding files from e.g. the working
903 904 directory. ``addrevision()`` is often called by ``add()`` and for
904 905 scenarios where revision data has already been computed, such as when
905 906 applying raw data from a peer repo.
906 907 """
907 908
908 909 @abc.abstractmethod
909 910 def addgroup(
910 911 self,
911 912 deltas,
912 913 linkmapper,
913 914 transaction,
914 915 addrevisioncb=None,
915 916 duplicaterevisioncb=None,
916 917 maybemissingparents=False,
917 918 ):
918 919 """Process a series of deltas for storage.
919 920
920 921 ``deltas`` is an iterable of 7-tuples of
921 922 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
922 923 to add.
923 924
924 925 The ``delta`` field contains ``mpatch`` data to apply to a base
925 926 revision, identified by ``deltabase``. The base node can be
926 927 ``nullid``, in which case the header from the delta can be ignored
927 928 and the delta used as the fulltext.
928 929
929 930 ``alwayscache`` instructs the lower layers to cache the content of the
930 931 newly added revision, even if it needs to be explicitly computed.
931 932 This used to be the default when ``addrevisioncb`` was provided up to
932 933 Mercurial 5.8.
933 934
934 935 ``addrevisioncb`` should be called for each new rev as it is committed.
935 936 ``duplicaterevisioncb`` should be called for all revs with a
936 937 pre-existing node.
937 938
938 939 ``maybemissingparents`` is a bool indicating whether the incoming
939 940 data may reference parents/ancestor revisions that aren't present.
940 941 This flag is set when receiving data into a "shallow" store that
941 942 doesn't hold all history.
942 943
943 944 Returns a list of nodes that were processed. A node will be in the list
944 945 even if it existed in the store previously.
945 946 """
946 947
947 948 @abc.abstractmethod
948 949 def censorrevision(self, tr, node, tombstone=b''):
949 950 """Remove the content of a single revision.
950 951
951 952 The specified ``node`` will have its content purged from storage.
952 953 Future attempts to access the revision data for this node will
953 954 result in failure.
954 955
955 956 A ``tombstone`` message can optionally be stored. This message may be
956 957 displayed to users when they attempt to access the missing revision
957 958 data.
958 959
959 960 Storage backends may have stored deltas against the previous content
960 961 in this revision. As part of censoring a revision, these storage
961 962 backends are expected to rewrite any internally stored deltas such
962 963 that they no longer reference the deleted content.
963 964 """
964 965
965 966 @abc.abstractmethod
966 967 def getstrippoint(self, minlink):
967 968 """Find the minimum revision that must be stripped to strip a linkrev.
968 969
969 970 Returns a 2-tuple containing the minimum revision number and a set
970 971 of all revisions numbers that would be broken by this strip.
971 972
972 973 TODO this is highly revlog centric and should be abstracted into
973 974 a higher-level deletion API. ``repair.strip()`` relies on this.
974 975 """
975 976
976 977 @abc.abstractmethod
977 978 def strip(self, minlink, transaction):
978 979 """Remove storage of items starting at a linkrev.
979 980
980 981 This uses ``getstrippoint()`` to determine the first node to remove.
981 982 Then it effectively truncates storage for all revisions after that.
982 983
983 984 TODO this is highly revlog centric and should be abstracted into a
984 985 higher-level deletion API.
985 986 """
986 987
987 988
988 989 class ifilestorage(ifileindex, ifiledata, ifilemutation, Protocol):
989 990 """Complete storage interface for a single tracked file."""
990 991
991 992 @abc.abstractmethod
992 993 def files(self):
993 994 """Obtain paths that are backing storage for this file.
994 995
995 996 TODO this is used heavily by verify code and there should probably
996 997 be a better API for that.
997 998 """
998 999
999 1000 @abc.abstractmethod
1000 1001 def storageinfo(
1001 1002 self,
1002 1003 exclusivefiles=False,
1003 1004 sharedfiles=False,
1004 1005 revisionscount=False,
1005 1006 trackedsize=False,
1006 1007 storedsize=False,
1007 1008 ):
1008 1009 """Obtain information about storage for this file's data.
1009 1010
1010 1011 Returns a dict describing storage for this tracked path. The keys
1011 1012 in the dict map to arguments of the same. The arguments are bools
1012 1013 indicating whether to calculate and obtain that data.
1013 1014
1014 1015 exclusivefiles
1015 1016 Iterable of (vfs, path) describing files that are exclusively
1016 1017 used to back storage for this tracked path.
1017 1018
1018 1019 sharedfiles
1019 1020 Iterable of (vfs, path) describing files that are used to back
1020 1021 storage for this tracked path. Those files may also provide storage
1021 1022 for other stored entities.
1022 1023
1023 1024 revisionscount
1024 1025 Number of revisions available for retrieval.
1025 1026
1026 1027 trackedsize
1027 1028 Total size in bytes of all tracked revisions. This is a sum of the
1028 1029 length of the fulltext of all revisions.
1029 1030
1030 1031 storedsize
1031 1032 Total size in bytes used to store data for all tracked revisions.
1032 1033 This is commonly less than ``trackedsize`` due to internal usage
1033 1034 of deltas rather than fulltext revisions.
1034 1035
1035 1036 Not all storage backends may support all queries are have a reasonable
1036 1037 value to use. In that case, the value should be set to ``None`` and
1037 1038 callers are expected to handle this special value.
1038 1039 """
1039 1040
1040 1041 @abc.abstractmethod
1041 1042 def verifyintegrity(self, state) -> Iterable[iverifyproblem]:
1042 1043 """Verifies the integrity of file storage.
1043 1044
1044 1045 ``state`` is a dict holding state of the verifier process. It can be
1045 1046 used to communicate data between invocations of multiple storage
1046 1047 primitives.
1047 1048
1048 1049 If individual revisions cannot have their revision content resolved,
1049 1050 the method is expected to set the ``skipread`` key to a set of nodes
1050 1051 that encountered problems. If set, the method can also add the node(s)
1051 1052 to ``safe_renamed`` in order to indicate nodes that may perform the
1052 1053 rename checks with currently accessible data.
1053 1054
1054 1055 The method yields objects conforming to the ``iverifyproblem``
1055 1056 interface.
1056 1057 """
1057 1058
1058 1059
1059 1060 class idirs(Protocol):
1060 1061 """Interface representing a collection of directories from paths.
1061 1062
1062 1063 This interface is essentially a derived data structure representing
1063 1064 directories from a collection of paths.
1064 1065 """
1065 1066
1066 1067 @abc.abstractmethod
1067 1068 def addpath(self, path):
1068 1069 """Add a path to the collection.
1069 1070
1070 1071 All directories in the path will be added to the collection.
1071 1072 """
1072 1073
1073 1074 @abc.abstractmethod
1074 1075 def delpath(self, path):
1075 1076 """Remove a path from the collection.
1076 1077
1077 1078 If the removal was the last path in a particular directory, the
1078 1079 directory is removed from the collection.
1079 1080 """
1080 1081
1081 1082 @abc.abstractmethod
1082 1083 def __iter__(self):
1083 1084 """Iterate over the directories in this collection of paths."""
1084 1085
1085 1086 @abc.abstractmethod
1086 1087 def __contains__(self, path):
1087 1088 """Whether a specific directory is in this collection."""
1088 1089
1089 1090
1090 1091 class imanifestdict(Protocol):
1091 1092 """Interface representing a manifest data structure.
1092 1093
1093 1094 A manifest is effectively a dict mapping paths to entries. Each entry
1094 1095 consists of a binary node and extra flags affecting that entry.
1095 1096 """
1096 1097
1097 1098 @abc.abstractmethod
1098 1099 def __getitem__(self, key: bytes) -> bytes:
1099 1100 """Returns the binary node value for a path in the manifest.
1100 1101
1101 1102 Raises ``KeyError`` if the path does not exist in the manifest.
1102 1103
1103 1104 Equivalent to ``self.find(path)[0]``.
1104 1105 """
1105 1106
1106 1107 @abc.abstractmethod
1107 1108 def find(self, path: bytes) -> tuple[bytes, bytes]:
1108 1109 """Returns the entry for a path in the manifest.
1109 1110
1110 1111 Returns a 2-tuple of (node, flags).
1111 1112
1112 1113 Raises ``KeyError`` if the path does not exist in the manifest.
1113 1114 """
1114 1115
1115 1116 @abc.abstractmethod
1116 1117 def __len__(self) -> int:
1117 1118 """Return the number of entries in the manifest."""
1118 1119
1119 1120 @abc.abstractmethod
1120 1121 def __nonzero__(self) -> bool:
1121 1122 """Returns True if the manifest has entries, False otherwise."""
1122 1123
1123 1124 __bool__ = __nonzero__
1124 1125
1125 1126 @abc.abstractmethod
1126 1127 def set(self, path: bytes, node: bytes, flags: bytes) -> None:
1127 1128 """Define the node value and flags for a path in the manifest.
1128 1129
1129 1130 Equivalent to __setitem__ followed by setflag, but can be more efficient.
1130 1131 """
1131 1132
1132 1133 @abc.abstractmethod
1133 1134 def __setitem__(self, path: bytes, node: bytes) -> None:
1134 1135 """Define the node value for a path in the manifest.
1135 1136
1136 1137 If the path is already in the manifest, its flags will be copied to
1137 1138 the new entry.
1138 1139 """
1139 1140
1140 1141 @abc.abstractmethod
1141 1142 def __contains__(self, path: bytes) -> bool:
1142 1143 """Whether a path exists in the manifest."""
1143 1144
1144 1145 @abc.abstractmethod
1145 1146 def __delitem__(self, path: bytes) -> None:
1146 1147 """Remove a path from the manifest.
1147 1148
1148 1149 Raises ``KeyError`` if the path is not in the manifest.
1149 1150 """
1150 1151
1151 1152 @abc.abstractmethod
1152 1153 def __iter__(self) -> Iterator[bytes]:
1153 1154 """Iterate over paths in the manifest."""
1154 1155
1155 1156 @abc.abstractmethod
1156 1157 def iterkeys(self) -> Iterator[bytes]:
1157 1158 """Iterate over paths in the manifest."""
1158 1159
1159 1160 @abc.abstractmethod
1160 1161 def keys(self) -> list[bytes]:
1161 1162 """Obtain a list of paths in the manifest."""
1162 1163
1163 1164 @abc.abstractmethod
1164 1165 def filesnotin(self, other, match=None) -> Set[bytes]:
1165 1166 """Obtain the set of paths in this manifest but not in another.
1166 1167
1167 1168 ``match`` is an optional matcher function to be applied to both
1168 1169 manifests.
1169 1170
1170 1171 Returns a set of paths.
1171 1172 """
1172 1173
1173 1174 @abc.abstractmethod
1174 1175 def dirs(self) -> pathutil.dirs:
1175 1176 """Returns an object implementing the ``idirs`` interface."""
1176 1177
1177 1178 @abc.abstractmethod
1178 1179 def hasdir(self, dir: bytes) -> bool:
1179 1180 """Returns a bool indicating if a directory is in this manifest."""
1180 1181
1181 1182 @abc.abstractmethod
1182 1183 def walk(self, match: matchmod.basematcher) -> Iterator[bytes]:
1183 1184 """Generator of paths in manifest satisfying a matcher.
1184 1185
1185 1186 If the matcher has explicit files listed and they don't exist in
1186 1187 the manifest, ``match.bad()`` is called for each missing file.
1187 1188 """
1188 1189
1189 1190 @abc.abstractmethod
1190 1191 def diff(
1191 1192 self,
1192 1193 other: Any, # TODO: 'manifestdict' or (better) equivalent interface
1193 1194 match: matchmod.basematcher | None = None,
1194 1195 clean: bool = False,
1195 1196 ) -> dict[
1196 1197 bytes,
1197 1198 tuple[tuple[bytes | None, bytes], tuple[bytes | None, bytes]] | None,
1198 1199 ]:
1199 1200 """Find differences between this manifest and another.
1200 1201
1201 1202 This manifest is compared to ``other``.
1202 1203
1203 1204 If ``match`` is provided, the two manifests are filtered against this
1204 1205 matcher and only entries satisfying the matcher are compared.
1205 1206
1206 1207 If ``clean`` is True, unchanged files are included in the returned
1207 1208 object.
1208 1209
1209 1210 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1210 1211 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1211 1212 represents the node and flags for this manifest and ``(node2, flag2)``
1212 1213 are the same for the other manifest.
1213 1214 """
1214 1215
1215 1216 @abc.abstractmethod
1216 1217 def setflag(self, path: bytes, flag: bytes) -> None:
1217 1218 """Set the flag value for a given path.
1218 1219
1219 1220 Raises ``KeyError`` if the path is not already in the manifest.
1220 1221 """
1221 1222
1222 1223 @abc.abstractmethod
1223 1224 def get(self, path: bytes, default=None) -> bytes | None:
1224 1225 """Obtain the node value for a path or a default value if missing."""
1225 1226
1226 1227 @abc.abstractmethod
1227 1228 def flags(self, path: bytes) -> bytes:
1228 1229 """Return the flags value for a path (default: empty bytestring)."""
1229 1230
1230 1231 @abc.abstractmethod
1231 1232 def copy(self) -> 'imanifestdict':
1232 1233 """Return a copy of this manifest."""
1233 1234
1234 1235 @abc.abstractmethod
1235 1236 def items(self) -> Iterator[tuple[bytes, bytes]]:
1236 1237 """Returns an iterable of (path, node) for items in this manifest."""
1237 1238
1238 1239 @abc.abstractmethod
1239 1240 def iteritems(self) -> Iterator[tuple[bytes, bytes]]:
1240 1241 """Identical to items()."""
1241 1242
1242 1243 @abc.abstractmethod
1243 1244 def iterentries(self) -> Iterator[tuple[bytes, bytes, bytes]]:
1244 1245 """Returns an iterable of (path, node, flags) for this manifest.
1245 1246
1246 1247 Similar to ``iteritems()`` except items are a 3-tuple and include
1247 1248 flags.
1248 1249 """
1249 1250
1250 1251 @abc.abstractmethod
1251 1252 def text(self) -> ByteString:
1252 1253 """Obtain the raw data representation for this manifest.
1253 1254
1254 1255 Result is used to create a manifest revision.
1255 1256 """
1256 1257
1257 1258 @abc.abstractmethod
1258 1259 def fastdelta(
1259 1260 self, base: ByteString, changes: Iterable[tuple[bytes, bool]]
1260 1261 ) -> tuple[ByteString, ByteString]:
1261 1262 """Obtain a delta between this manifest and another given changes.
1262 1263
1263 1264 ``base`` in the raw data representation for another manifest.
1264 1265
1265 1266 ``changes`` is an iterable of ``(path, to_delete)``.
1266 1267
1267 1268 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1268 1269 delta between ``base`` and this manifest.
1269 1270
1270 1271 If this manifest implementation can't support ``fastdelta()``,
1271 1272 raise ``mercurial.manifest.FastdeltaUnavailable``.
1272 1273 """
1273 1274
1274 1275
1275 1276 class imanifestrevisionbase(Protocol):
1276 1277 """Base interface representing a single revision of a manifest.
1277 1278
1278 1279 Should not be used as a primary interface: should always be inherited
1279 1280 as part of a larger interface.
1280 1281 """
1281 1282
1282 1283 @abc.abstractmethod
1283 1284 def copy(self):
1284 1285 """Obtain a copy of this manifest instance.
1285 1286
1286 1287 Returns an object conforming to the ``imanifestrevisionwritable``
1287 1288 interface. The instance will be associated with the same
1288 1289 ``imanifestlog`` collection as this instance.
1289 1290 """
1290 1291
1291 1292 @abc.abstractmethod
1292 1293 def read(self):
1293 1294 """Obtain the parsed manifest data structure.
1294 1295
1295 1296 The returned object conforms to the ``imanifestdict`` interface.
1296 1297 """
1297 1298
1298 1299
1299 1300 class imanifestrevisionstored(imanifestrevisionbase, Protocol):
1300 1301 """Interface representing a manifest revision committed to storage."""
1301 1302
1302 1303 @abc.abstractmethod
1303 1304 def node(self) -> bytes:
1304 1305 """The binary node for this manifest."""
1305 1306
1306 1307 parents: list[bytes]
1307 1308 """List of binary nodes that are parents for this manifest revision."""
1308 1309
1309 1310 @abc.abstractmethod
1310 1311 def readdelta(self, shallow: bool = False):
1311 1312 """Obtain the manifest data structure representing changes from parent.
1312 1313
1313 1314 This manifest is compared to its 1st parent. A new manifest
1314 1315 representing those differences is constructed.
1315 1316
1316 1317 If `shallow` is True, this will read the delta for this directory,
1317 1318 without recursively reading subdirectory manifests. Instead, any
1318 1319 subdirectory entry will be reported as it appears in the manifest, i.e.
1319 1320 the subdirectory will be reported among files and distinguished only by
1320 1321 its 't' flag. This only apply if the underlying manifest support it.
1321 1322
1322 1323 The returned object conforms to the ``imanifestdict`` interface.
1323 1324 """
1324 1325
1325 1326 @abc.abstractmethod
1326 1327 def read_any_fast_delta(
1327 1328 self,
1328 1329 valid_bases: Collection[int] | None = None,
1329 1330 *,
1330 1331 shallow: bool = False,
1331 1332 ):
1332 1333 """read some manifest information as fast if possible
1333 1334
1334 1335 This might return a "delta", a manifest object containing only file
1335 1336 changed compared to another revisions. The `valid_bases` argument
1336 1337 control the set of revision that might be used as a base.
1337 1338
1338 1339 If no delta can be retrieved quickly, a full read of the manifest will
1339 1340 be performed instead.
1340 1341
1341 1342 The function return a tuple with two elements. The first one is the
1342 1343 delta base used (or None if we did a full read), the second one is the
1343 1344 manifest information.
1344 1345
1345 1346 If `shallow` is True, this will read the delta for this directory,
1346 1347 without recursively reading subdirectory manifests. Instead, any
1347 1348 subdirectory entry will be reported as it appears in the manifest, i.e.
1348 1349 the subdirectory will be reported among files and distinguished only by
1349 1350 its 't' flag. This only apply if the underlying manifest support it.
1350 1351
1351 1352 The returned object conforms to the ``imanifestdict`` interface.
1352 1353 """
1353 1354
1354 1355 @abc.abstractmethod
1355 1356 def read_delta_parents(self, *, shallow: bool = False, exact: bool = True):
1356 1357 """return a diff from this revision against both parents.
1357 1358
1358 1359 If `exact` is False, this might return a superset of the diff, containing
1359 1360 files that are actually present as is in one of the parents.
1360 1361
1361 1362 If `shallow` is True, this will read the delta for this directory,
1362 1363 without recursively reading subdirectory manifests. Instead, any
1363 1364 subdirectory entry will be reported as it appears in the manifest, i.e.
1364 1365 the subdirectory will be reported among files and distinguished only by
1365 1366 its 't' flag. This only apply if the underlying manifest support it.
1366 1367
1367 1368 The returned object conforms to the ``imanifestdict`` interface."""
1368 1369
1369 1370 @abc.abstractmethod
1370 1371 def read_delta_new_entries(self, *, shallow: bool = False):
1371 1372 """Return a manifest containing just the entries that might be new to
1372 1373 the repository.
1373 1374
1374 1375 This is often equivalent to a diff against both parents, but without
1375 1376 garantee. For performance reason, It might contains more files in some cases.
1376 1377
1377 1378 If `shallow` is True, this will read the delta for this directory,
1378 1379 without recursively reading subdirectory manifests. Instead, any
1379 1380 subdirectory entry will be reported as it appears in the manifest, i.e.
1380 1381 the subdirectory will be reported among files and distinguished only by
1381 1382 its 't' flag. This only apply if the underlying manifest support it.
1382 1383
1383 1384 The returned object conforms to the ``imanifestdict`` interface."""
1384 1385
1385 1386 @abc.abstractmethod
1386 1387 def readfast(self, shallow: bool = False):
1387 1388 """Calls either ``read()`` or ``readdelta()``.
1388 1389
1389 1390 The faster of the two options is called.
1390 1391 """
1391 1392
1392 1393 @abc.abstractmethod
1393 1394 def find(self, key: bytes) -> tuple[bytes, bytes]:
1394 1395 """Calls ``self.read().find(key)``.
1395 1396
1396 1397 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1397 1398 """
1398 1399
1399 1400
1400 1401 class imanifestrevisionwritable(imanifestrevisionbase, Protocol):
1401 1402 """Interface representing a manifest revision that can be committed."""
1402 1403
1403 1404 @abc.abstractmethod
1404 1405 def write(
1405 1406 self, transaction, linkrev, p1node, p2node, added, removed, match=None
1406 1407 ):
1407 1408 """Add this revision to storage.
1408 1409
1409 1410 Takes a transaction object, the changeset revision number it will
1410 1411 be associated with, its parent nodes, and lists of added and
1411 1412 removed paths.
1412 1413
1413 1414 If match is provided, storage can choose not to inspect or write out
1414 1415 items that do not match. Storage is still required to be able to provide
1415 1416 the full manifest in the future for any directories written (these
1416 1417 manifests should not be "narrowed on disk").
1417 1418
1418 1419 Returns the binary node of the created revision.
1419 1420 """
1420 1421
1421 1422
1422 1423 class imanifeststorage(Protocol):
1423 1424 """Storage interface for manifest data."""
1424 1425
1425 1426 nodeconstants: NodeConstants
1426 1427 """nodeconstants used by the current repository."""
1427 1428
1428 1429 tree: bytes
1429 1430 """The path to the directory this manifest tracks.
1430 1431
1431 1432 The empty bytestring represents the root manifest.
1432 1433 """
1433 1434
1434 1435 index: ifilerevisionssequence
1435 1436 """An ``ifilerevisionssequence`` instance."""
1436 1437
1437 1438 opener: Vfs
1438 1439 """VFS opener to use to access underlying files used for storage.
1439 1440
1440 1441 TODO this is revlog specific and should not be exposed.
1441 1442 """
1442 1443
1443 1444 # TODO: finish type hints
1444 1445 fulltextcache: dict
1445 1446 """Dict with cache of fulltexts.
1446 1447
1447 1448 TODO this doesn't feel appropriate for the storage interface.
1448 1449 """
1449 1450
1450 1451 @abc.abstractmethod
1451 1452 def __len__(self):
1452 1453 """Obtain the number of revisions stored for this manifest."""
1453 1454
1454 1455 @abc.abstractmethod
1455 1456 def __iter__(self):
1456 1457 """Iterate over revision numbers for this manifest."""
1457 1458
1458 1459 @abc.abstractmethod
1459 1460 def rev(self, node):
1460 1461 """Obtain the revision number given a binary node.
1461 1462
1462 1463 Raises ``error.LookupError`` if the node is not known.
1463 1464 """
1464 1465
1465 1466 @abc.abstractmethod
1466 1467 def node(self, rev):
1467 1468 """Obtain the node value given a revision number.
1468 1469
1469 1470 Raises ``error.LookupError`` if the revision is not known.
1470 1471 """
1471 1472
1472 1473 @abc.abstractmethod
1473 1474 def lookup(self, value):
1474 1475 """Attempt to resolve a value to a node.
1475 1476
1476 1477 Value can be a binary node, hex node, revision number, or a bytes
1477 1478 that can be converted to an integer.
1478 1479
1479 1480 Raises ``error.LookupError`` if a ndoe could not be resolved.
1480 1481 """
1481 1482
1482 1483 @abc.abstractmethod
1483 1484 def parents(self, node):
1484 1485 """Returns a 2-tuple of parent nodes for a node.
1485 1486
1486 1487 Values will be ``nullid`` if the parent is empty.
1487 1488 """
1488 1489
1489 1490 @abc.abstractmethod
1490 1491 def parentrevs(self, rev):
1491 1492 """Like parents() but operates on revision numbers."""
1492 1493
1493 1494 @abc.abstractmethod
1494 1495 def linkrev(self, rev):
1495 1496 """Obtain the changeset revision number a revision is linked to."""
1496 1497
1497 1498 @abc.abstractmethod
1498 1499 def revision(self, node):
1499 1500 """Obtain fulltext data for a node."""
1500 1501
1501 1502 @abc.abstractmethod
1502 1503 def rawdata(self, node):
1503 1504 """Obtain raw data for a node."""
1504 1505
1505 1506 @abc.abstractmethod
1506 1507 def revdiff(self, rev1, rev2):
1507 1508 """Obtain a delta between two revision numbers.
1508 1509
1509 1510 The returned data is the result of ``bdiff.bdiff()`` on the raw
1510 1511 revision data.
1511 1512 """
1512 1513
1513 1514 @abc.abstractmethod
1514 1515 def cmp(self, node, fulltext):
1515 1516 """Compare fulltext to another revision.
1516 1517
1517 1518 Returns True if the fulltext is different from what is stored.
1518 1519 """
1519 1520
1520 1521 @abc.abstractmethod
1521 1522 def emitrevisions(
1522 1523 self,
1523 1524 nodes,
1524 1525 nodesorder=None,
1525 1526 revisiondata=False,
1526 1527 assumehaveparentrevisions=False,
1527 1528 ):
1528 1529 """Produce ``irevisiondelta`` describing revisions.
1529 1530
1530 1531 See the documentation for ``ifiledata`` for more.
1531 1532 """
1532 1533
1533 1534 @abc.abstractmethod
1534 1535 def addgroup(
1535 1536 self,
1536 1537 deltas,
1537 1538 linkmapper,
1538 1539 transaction,
1539 1540 addrevisioncb=None,
1540 1541 duplicaterevisioncb=None,
1541 1542 ):
1542 1543 """Process a series of deltas for storage.
1543 1544
1544 1545 See the documentation in ``ifilemutation`` for more.
1545 1546 """
1546 1547
1547 1548 @abc.abstractmethod
1548 1549 def rawsize(self, rev):
1549 1550 """Obtain the size of tracked data.
1550 1551
1551 1552 Is equivalent to ``len(m.rawdata(node))``.
1552 1553
1553 1554 TODO this method is only used by upgrade code and may be removed.
1554 1555 """
1555 1556
1556 1557 @abc.abstractmethod
1557 1558 def getstrippoint(self, minlink):
1558 1559 """Find minimum revision that must be stripped to strip a linkrev.
1559 1560
1560 1561 See the documentation in ``ifilemutation`` for more.
1561 1562 """
1562 1563
1563 1564 @abc.abstractmethod
1564 1565 def strip(self, minlink, transaction):
1565 1566 """Remove storage of items starting at a linkrev.
1566 1567
1567 1568 See the documentation in ``ifilemutation`` for more.
1568 1569 """
1569 1570
1570 1571 @abc.abstractmethod
1571 1572 def checksize(self):
1572 1573 """Obtain the expected sizes of backing files.
1573 1574
1574 1575 TODO this is used by verify and it should not be part of the interface.
1575 1576 """
1576 1577
1577 1578 @abc.abstractmethod
1578 1579 def files(self):
1579 1580 """Obtain paths that are backing storage for this manifest.
1580 1581
1581 1582 TODO this is used by verify and there should probably be a better API
1582 1583 for this functionality.
1583 1584 """
1584 1585
1585 1586 @abc.abstractmethod
1586 1587 def deltaparent(self, rev):
1587 1588 """Obtain the revision that a revision is delta'd against.
1588 1589
1589 1590 TODO delta encoding is an implementation detail of storage and should
1590 1591 not be exposed to the storage interface.
1591 1592 """
1592 1593
1593 1594 @abc.abstractmethod
1594 1595 def clone(self, tr, dest, **kwargs):
1595 1596 """Clone this instance to another."""
1596 1597
1597 1598 @abc.abstractmethod
1598 1599 def clearcaches(self, clear_persisted_data=False):
1599 1600 """Clear any caches associated with this instance."""
1600 1601
1601 1602 @abc.abstractmethod
1602 1603 def dirlog(self, d):
1603 1604 """Obtain a manifest storage instance for a tree."""
1604 1605
1605 1606 @abc.abstractmethod
1606 1607 def add(
1607 1608 self,
1608 1609 m,
1609 1610 transaction,
1610 1611 link,
1611 1612 p1,
1612 1613 p2,
1613 1614 added,
1614 1615 removed,
1615 1616 readtree=None,
1616 1617 match=None,
1617 1618 ):
1618 1619 """Add a revision to storage.
1619 1620
1620 1621 ``m`` is an object conforming to ``imanifestdict``.
1621 1622
1622 1623 ``link`` is the linkrev revision number.
1623 1624
1624 1625 ``p1`` and ``p2`` are the parent revision numbers.
1625 1626
1626 1627 ``added`` and ``removed`` are iterables of added and removed paths,
1627 1628 respectively.
1628 1629
1629 1630 ``readtree`` is a function that can be used to read the child tree(s)
1630 1631 when recursively writing the full tree structure when using
1631 1632 treemanifets.
1632 1633
1633 1634 ``match`` is a matcher that can be used to hint to storage that not all
1634 1635 paths must be inspected; this is an optimization and can be safely
1635 1636 ignored. Note that the storage must still be able to reproduce a full
1636 1637 manifest including files that did not match.
1637 1638 """
1638 1639
1639 1640 @abc.abstractmethod
1640 1641 def storageinfo(
1641 1642 self,
1642 1643 exclusivefiles=False,
1643 1644 sharedfiles=False,
1644 1645 revisionscount=False,
1645 1646 trackedsize=False,
1646 1647 storedsize=False,
1647 1648 ):
1648 1649 """Obtain information about storage for this manifest's data.
1649 1650
1650 1651 See ``ifilestorage.storageinfo()`` for a description of this method.
1651 1652 This one behaves the same way, except for manifest data.
1652 1653 """
1653 1654
1654 1655 @abc.abstractmethod
1655 1656 def get_revlog(self):
1656 1657 """return an actual revlog instance if any
1657 1658
1658 1659 This exist because a lot of code leverage the fact the underlying
1659 1660 storage is a revlog for optimization, so giving simple way to access
1660 1661 the revlog instance helps such code.
1661 1662 """
1662 1663
1663 1664
1664 1665 class imanifestlog(Protocol):
1665 1666 """Interface representing a collection of manifest snapshots.
1666 1667
1667 1668 Represents the root manifest in a repository.
1668 1669
1669 1670 Also serves as a means to access nested tree manifests and to cache
1670 1671 tree manifests.
1671 1672 """
1672 1673
1673 1674 nodeconstants: NodeConstants
1674 1675 """nodeconstants used by the current repository."""
1675 1676
1676 1677 narrowed: bool
1677 1678 """True, is the manifest is narrowed by a matcher"""
1678 1679
1679 1680 @abc.abstractmethod
1680 1681 def __getitem__(self, node):
1681 1682 """Obtain a manifest instance for a given binary node.
1682 1683
1683 1684 Equivalent to calling ``self.get('', node)``.
1684 1685
1685 1686 The returned object conforms to the ``imanifestrevisionstored``
1686 1687 interface.
1687 1688 """
1688 1689
1689 1690 @abc.abstractmethod
1690 1691 def get(self, tree, node, verify=True):
1691 1692 """Retrieve the manifest instance for a given directory and binary node.
1692 1693
1693 1694 ``node`` always refers to the node of the root manifest (which will be
1694 1695 the only manifest if flat manifests are being used).
1695 1696
1696 1697 If ``tree`` is the empty string, the root manifest is returned.
1697 1698 Otherwise the manifest for the specified directory will be returned
1698 1699 (requires tree manifests).
1699 1700
1700 1701 If ``verify`` is True, ``LookupError`` is raised if the node is not
1701 1702 known.
1702 1703
1703 1704 The returned object conforms to the ``imanifestrevisionstored``
1704 1705 interface.
1705 1706 """
1706 1707
1707 1708 @abc.abstractmethod
1708 1709 def getstorage(self, tree):
1709 1710 """Retrieve an interface to storage for a particular tree.
1710 1711
1711 1712 If ``tree`` is the empty bytestring, storage for the root manifest will
1712 1713 be returned. Otherwise storage for a tree manifest is returned.
1713 1714
1714 1715 TODO formalize interface for returned object.
1715 1716 """
1716 1717
1717 1718 @abc.abstractmethod
1718 1719 def clearcaches(self, clear_persisted_data: bool = False) -> None:
1719 1720 """Clear caches associated with this collection."""
1720 1721
1721 1722 @abc.abstractmethod
1722 1723 def rev(self, node):
1723 1724 """Obtain the revision number for a binary node.
1724 1725
1725 1726 Raises ``error.LookupError`` if the node is not known.
1726 1727 """
1727 1728
1728 1729 @abc.abstractmethod
1729 1730 def update_caches(self, transaction):
1730 1731 """update whatever cache are relevant for the used storage."""
1731 1732
1732 1733
1733 1734 class ilocalrepositoryfilestorage(Protocol):
1734 1735 """Local repository sub-interface providing access to tracked file storage.
1735 1736
1736 1737 This interface defines how a repository accesses storage for a single
1737 1738 tracked file path.
1738 1739 """
1739 1740
1740 1741 @abc.abstractmethod
1741 1742 def file(self, f):
1742 1743 """Obtain a filelog for a tracked path.
1743 1744
1744 1745 The returned type conforms to the ``ifilestorage`` interface.
1745 1746 """
1746 1747
1747 1748
1748 1749 class ilocalrepositorymain(Protocol):
1749 1750 """Main interface for local repositories.
1750 1751
1751 1752 This currently captures the reality of things - not how things should be.
1752 1753 """
1753 1754
1754 1755 nodeconstants: NodeConstants
1755 1756 """Constant nodes matching the hash function used by the repository."""
1756 1757
1757 1758 nullid: bytes
1758 1759 """null revision for the hash function used by the repository."""
1759 1760
1760 1761 supported: set[bytes]
1761 1762 """Set of requirements that this repo is capable of opening."""
1762 1763
1763 1764 requirements: set[bytes]
1764 1765 """Set of requirements this repo uses."""
1765 1766
1766 1767 features: set[bytes]
1767 1768 """Set of "features" this repository supports.
1768 1769
1769 1770 A "feature" is a loosely-defined term. It can refer to a feature
1770 1771 in the classical sense or can describe an implementation detail
1771 1772 of the repository. For example, a ``readonly`` feature may denote
1772 1773 the repository as read-only. Or a ``revlogfilestore`` feature may
1773 1774 denote that the repository is using revlogs for file storage.
1774 1775
1775 1776 The intent of features is to provide a machine-queryable mechanism
1776 1777 for repo consumers to test for various repository characteristics.
1777 1778
1778 1779 Features are similar to ``requirements``. The main difference is that
1779 1780 requirements are stored on-disk and represent requirements to open the
1780 1781 repository. Features are more run-time capabilities of the repository
1781 1782 and more granular capabilities (which may be derived from requirements).
1782 1783 """
1783 1784
1784 1785 filtername: bytes
1785 1786 """Name of the repoview that is active on this repo."""
1786 1787
1787 1788 vfs_map: Mapping[bytes, Vfs]
1788 1789 """a bytes-key β†’ vfs mapping used by transaction and others"""
1789 1790
1790 1791 wvfs: Vfs
1791 1792 """VFS used to access the working directory."""
1792 1793
1793 1794 vfs: Vfs
1794 1795 """VFS rooted at the .hg directory.
1795 1796
1796 1797 Used to access repository data not in the store.
1797 1798 """
1798 1799
1799 1800 svfs: Vfs
1800 1801 """VFS rooted at the store.
1801 1802
1802 1803 Used to access repository data in the store. Typically .hg/store.
1803 1804 But can point elsewhere if the store is shared.
1804 1805 """
1805 1806
1806 1807 root: bytes
1807 1808 """Path to the root of the working directory."""
1808 1809
1809 1810 path: bytes
1810 1811 """Path to the .hg directory."""
1811 1812
1812 1813 origroot: bytes
1813 1814 """The filesystem path that was used to construct the repo."""
1814 1815
1815 1816 auditor: Any
1816 1817 """A pathauditor for the working directory.
1817 1818
1818 1819 This checks if a path refers to a nested repository.
1819 1820
1820 1821 Operates on the filesystem.
1821 1822 """
1822 1823
1823 1824 nofsauditor: Any # TODO: add type hints
1824 1825 """A pathauditor for the working directory.
1825 1826
1826 1827 This is like ``auditor`` except it doesn't do filesystem checks.
1827 1828 """
1828 1829
1829 1830 baseui: Ui
1830 1831 """Original ui instance passed into constructor."""
1831 1832
1832 1833 ui: Ui
1833 1834 """Main ui instance for this instance."""
1834 1835
1835 1836 sharedpath: bytes
1836 1837 """Path to the .hg directory of the repo this repo was shared from."""
1837 1838
1838 1839 store: Any # TODO: add type hints
1839 1840 """A store instance."""
1840 1841
1841 1842 spath: bytes
1842 1843 """Path to the store."""
1843 1844
1844 1845 sjoin: Callable # TODO: add type hints
1845 1846 """Alias to self.store.join."""
1846 1847
1847 1848 cachevfs: Vfs
1848 1849 """A VFS used to access the cache directory.
1849 1850
1850 1851 Typically .hg/cache.
1851 1852 """
1852 1853
1853 1854 wcachevfs: Vfs
1854 1855 """A VFS used to access the cache directory dedicated to working copy
1855 1856
1856 1857 Typically .hg/wcache.
1857 1858 """
1858 1859
1859 1860 filteredrevcache: Any # TODO: add type hints
1860 1861 """Holds sets of revisions to be filtered."""
1861 1862
1862 1863 names: Any # TODO: add type hints
1863 1864 """A ``namespaces`` instance."""
1864 1865
1865 1866 filecopiesmode: Any # TODO: add type hints
1866 1867 """The way files copies should be dealt with in this repo."""
1867 1868
1868 1869 @abc.abstractmethod
1869 1870 def close(self):
1870 1871 """Close the handle on this repository."""
1871 1872
1872 1873 @abc.abstractmethod
1873 1874 def peer(self, path=None):
1874 1875 """Obtain an object conforming to the ``peer`` interface."""
1875 1876
1876 1877 @abc.abstractmethod
1877 1878 def unfiltered(self):
1878 1879 """Obtain an unfiltered/raw view of this repo."""
1879 1880
1880 1881 @abc.abstractmethod
1881 1882 def filtered(self, name, visibilityexceptions=None):
1882 1883 """Obtain a named view of this repository."""
1883 1884
1884 1885 obsstore: Any # TODO: add type hints
1885 1886 """A store of obsolescence data."""
1886 1887
1887 1888 changelog: Any # TODO: add type hints
1888 1889 """A handle on the changelog revlog."""
1889 1890
1890 1891 manifestlog: imanifestlog
1891 1892 """An instance conforming to the ``imanifestlog`` interface.
1892 1893
1893 1894 Provides access to manifests for the repository.
1894 1895 """
1895 1896
1896 1897 dirstate: intdirstate.idirstate
1897 1898 """Working directory state."""
1898 1899
1899 1900 narrowpats: Any # TODO: add type hints
1900 1901 """Matcher patterns for this repository's narrowspec."""
1901 1902
1902 1903 @abc.abstractmethod
1903 1904 def narrowmatch(self, match=None, includeexact=False):
1904 1905 """Obtain a matcher for the narrowspec."""
1905 1906
1906 1907 @abc.abstractmethod
1907 1908 def setnarrowpats(self, newincludes, newexcludes):
1908 1909 """Define the narrowspec for this repository."""
1909 1910
1910 1911 @abc.abstractmethod
1911 1912 def __getitem__(self, changeid):
1912 1913 """Try to resolve a changectx."""
1913 1914
1914 1915 @abc.abstractmethod
1915 1916 def __contains__(self, changeid):
1916 1917 """Whether a changeset exists."""
1917 1918
1918 1919 @abc.abstractmethod
1919 1920 def __nonzero__(self):
1920 1921 """Always returns True."""
1921 1922 return True
1922 1923
1923 1924 __bool__ = __nonzero__
1924 1925
1925 1926 @abc.abstractmethod
1926 1927 def __len__(self):
1927 1928 """Returns the number of changesets in the repo."""
1928 1929
1929 1930 @abc.abstractmethod
1930 1931 def __iter__(self):
1931 1932 """Iterate over revisions in the changelog."""
1932 1933
1933 1934 @abc.abstractmethod
1934 1935 def revs(self, expr, *args):
1935 1936 """Evaluate a revset.
1936 1937
1937 1938 Emits revisions.
1938 1939 """
1939 1940
1940 1941 @abc.abstractmethod
1941 1942 def set(self, expr, *args):
1942 1943 """Evaluate a revset.
1943 1944
1944 1945 Emits changectx instances.
1945 1946 """
1946 1947
1947 1948 @abc.abstractmethod
1948 1949 def anyrevs(self, specs, user=False, localalias=None):
1949 1950 """Find revisions matching one of the given revsets."""
1950 1951
1951 1952 @abc.abstractmethod
1952 1953 def url(self):
1953 1954 """Returns a string representing the location of this repo."""
1954 1955
1955 1956 @abc.abstractmethod
1956 1957 def hook(self, name, throw=False, **args):
1957 1958 """Call a hook."""
1958 1959
1959 1960 @abc.abstractmethod
1960 1961 def tags(self):
1961 1962 """Return a mapping of tag to node."""
1962 1963
1963 1964 @abc.abstractmethod
1964 1965 def tagtype(self, tagname):
1965 1966 """Return the type of a given tag."""
1966 1967
1967 1968 @abc.abstractmethod
1968 1969 def tagslist(self):
1969 1970 """Return a list of tags ordered by revision."""
1970 1971
1971 1972 @abc.abstractmethod
1972 1973 def nodetags(self, node):
1973 1974 """Return the tags associated with a node."""
1974 1975
1975 1976 @abc.abstractmethod
1976 1977 def nodebookmarks(self, node):
1977 1978 """Return the list of bookmarks pointing to the specified node."""
1978 1979
1979 1980 @abc.abstractmethod
1980 1981 def branchmap(self):
1981 1982 """Return a mapping of branch to heads in that branch."""
1982 1983
1983 1984 @abc.abstractmethod
1984 1985 def revbranchcache(self):
1985 1986 pass
1986 1987
1987 1988 @abc.abstractmethod
1988 1989 def register_changeset(self, rev, changelogrevision):
1989 1990 """Extension point for caches for new nodes.
1990 1991
1991 1992 Multiple consumers are expected to need parts of the changelogrevision,
1992 1993 so it is provided as optimization to avoid duplicate lookups. A simple
1993 1994 cache would be fragile when other revisions are accessed, too."""
1994 1995 pass
1995 1996
1996 1997 @abc.abstractmethod
1997 1998 def branchtip(self, branchtip, ignoremissing=False):
1998 1999 """Return the tip node for a given branch."""
1999 2000
2000 2001 @abc.abstractmethod
2001 2002 def lookup(self, key):
2002 2003 """Resolve the node for a revision."""
2003 2004
2004 2005 @abc.abstractmethod
2005 2006 def lookupbranch(self, key):
2006 2007 """Look up the branch name of the given revision or branch name."""
2007 2008
2008 2009 @abc.abstractmethod
2009 2010 def known(self, nodes):
2010 2011 """Determine whether a series of nodes is known.
2011 2012
2012 2013 Returns a list of bools.
2013 2014 """
2014 2015
2015 2016 @abc.abstractmethod
2016 2017 def local(self):
2017 2018 """Whether the repository is local."""
2018 2019 return True
2019 2020
2020 2021 @abc.abstractmethod
2021 2022 def publishing(self):
2022 2023 """Whether the repository is a publishing repository."""
2023 2024
2024 2025 @abc.abstractmethod
2025 2026 def cancopy(self):
2026 2027 pass
2027 2028
2028 2029 @abc.abstractmethod
2029 2030 def shared(self):
2030 2031 """The type of shared repository or None."""
2031 2032
2032 2033 @abc.abstractmethod
2033 2034 def wjoin(self, f, *insidef):
2034 2035 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
2035 2036
2036 2037 @abc.abstractmethod
2037 2038 def setparents(self, p1, p2):
2038 2039 """Set the parent nodes of the working directory."""
2039 2040
2040 2041 @abc.abstractmethod
2041 2042 def filectx(self, path, changeid=None, fileid=None):
2042 2043 """Obtain a filectx for the given file revision."""
2043 2044
2044 2045 @abc.abstractmethod
2045 2046 def getcwd(self):
2046 2047 """Obtain the current working directory from the dirstate."""
2047 2048
2048 2049 @abc.abstractmethod
2049 2050 def pathto(self, f, cwd=None):
2050 2051 """Obtain the relative path to a file."""
2051 2052
2052 2053 @abc.abstractmethod
2053 2054 def adddatafilter(self, name, fltr):
2054 2055 pass
2055 2056
2056 2057 @abc.abstractmethod
2057 2058 def wread(self, filename):
2058 2059 """Read a file from wvfs, using data filters."""
2059 2060
2060 2061 @abc.abstractmethod
2061 2062 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2062 2063 """Write data to a file in the wvfs, using data filters."""
2063 2064
2064 2065 @abc.abstractmethod
2065 2066 def wwritedata(self, filename, data):
2066 2067 """Resolve data for writing to the wvfs, using data filters."""
2067 2068
2068 2069 @abc.abstractmethod
2069 2070 def currenttransaction(self):
2070 2071 """Obtain the current transaction instance or None."""
2071 2072
2072 2073 @abc.abstractmethod
2073 2074 def transaction(self, desc, report=None):
2074 2075 """Open a new transaction to write to the repository."""
2075 2076
2076 2077 @abc.abstractmethod
2077 2078 def undofiles(self):
2078 2079 """Returns a list of (vfs, path) for files to undo transactions."""
2079 2080
2080 2081 @abc.abstractmethod
2081 2082 def recover(self):
2082 2083 """Roll back an interrupted transaction."""
2083 2084
2084 2085 @abc.abstractmethod
2085 2086 def rollback(self, dryrun=False, force=False):
2086 2087 """Undo the last transaction.
2087 2088
2088 2089 DANGEROUS.
2089 2090 """
2090 2091
2091 2092 @abc.abstractmethod
2092 2093 def updatecaches(self, tr=None, full=False, caches=None):
2093 2094 """Warm repo caches."""
2094 2095
2095 2096 @abc.abstractmethod
2096 2097 def invalidatecaches(self):
2097 2098 """Invalidate cached data due to the repository mutating."""
2098 2099
2099 2100 @abc.abstractmethod
2100 2101 def invalidatevolatilesets(self):
2101 2102 pass
2102 2103
2103 2104 @abc.abstractmethod
2104 2105 def invalidatedirstate(self):
2105 2106 """Invalidate the dirstate."""
2106 2107
2107 2108 @abc.abstractmethod
2108 2109 def invalidate(self, clearfilecache=False):
2109 2110 pass
2110 2111
2111 2112 @abc.abstractmethod
2112 2113 def invalidateall(self):
2113 2114 pass
2114 2115
2115 2116 @abc.abstractmethod
2116 2117 def lock(self, wait=True):
2117 2118 """Lock the repository store and return a lock instance."""
2118 2119
2119 2120 @abc.abstractmethod
2120 2121 def currentlock(self):
2121 2122 """Return the lock if it's held or None."""
2122 2123
2123 2124 @abc.abstractmethod
2124 2125 def wlock(self, wait=True):
2125 2126 """Lock the non-store parts of the repository."""
2126 2127
2127 2128 @abc.abstractmethod
2128 2129 def currentwlock(self):
2129 2130 """Return the wlock if it's held or None."""
2130 2131
2131 2132 @abc.abstractmethod
2132 2133 def checkcommitpatterns(self, wctx, match, status, fail):
2133 2134 pass
2134 2135
2135 2136 @abc.abstractmethod
2136 2137 def commit(
2137 2138 self,
2138 2139 text=b'',
2139 2140 user=None,
2140 2141 date=None,
2141 2142 match=None,
2142 2143 force=False,
2143 2144 editor=False,
2144 2145 extra=None,
2145 2146 ):
2146 2147 """Add a new revision to the repository."""
2147 2148
2148 2149 @abc.abstractmethod
2149 2150 def commitctx(self, ctx, error=False, origctx=None):
2150 2151 """Commit a commitctx instance to the repository."""
2151 2152
2152 2153 @abc.abstractmethod
2153 2154 def destroying(self):
2154 2155 """Inform the repository that nodes are about to be destroyed."""
2155 2156
2156 2157 @abc.abstractmethod
2157 2158 def destroyed(self):
2158 2159 """Inform the repository that nodes have been destroyed."""
2159 2160
2160 2161 @abc.abstractmethod
2161 2162 def status(
2162 2163 self,
2163 2164 node1=b'.',
2164 2165 node2=None,
2165 2166 match=None,
2166 2167 ignored=False,
2167 2168 clean=False,
2168 2169 unknown=False,
2169 2170 listsubrepos=False,
2170 2171 ):
2171 2172 """Convenience method to call repo[x].status()."""
2172 2173
2173 2174 @abc.abstractmethod
2174 2175 def addpostdsstatus(self, ps):
2175 2176 pass
2176 2177
2177 2178 @abc.abstractmethod
2178 2179 def postdsstatus(self):
2179 2180 pass
2180 2181
2181 2182 @abc.abstractmethod
2182 2183 def clearpostdsstatus(self):
2183 2184 pass
2184 2185
2185 2186 @abc.abstractmethod
2186 2187 def heads(self, start=None):
2187 2188 """Obtain list of nodes that are DAG heads."""
2188 2189
2189 2190 @abc.abstractmethod
2190 2191 def branchheads(self, branch=None, start=None, closed=False):
2191 2192 pass
2192 2193
2193 2194 @abc.abstractmethod
2194 2195 def branches(self, nodes):
2195 2196 pass
2196 2197
2197 2198 @abc.abstractmethod
2198 2199 def between(self, pairs):
2199 2200 pass
2200 2201
2201 2202 @abc.abstractmethod
2202 2203 def checkpush(self, pushop):
2203 2204 pass
2204 2205
2205 2206 prepushoutgoinghooks: util.hooks
2206 2207 """util.hooks instance."""
2207 2208
2208 2209 @abc.abstractmethod
2209 2210 def pushkey(self, namespace, key, old, new):
2210 2211 pass
2211 2212
2212 2213 @abc.abstractmethod
2213 2214 def listkeys(self, namespace):
2214 2215 pass
2215 2216
2216 2217 @abc.abstractmethod
2217 2218 def debugwireargs(self, one, two, three=None, four=None, five=None):
2218 2219 pass
2219 2220
2220 2221 @abc.abstractmethod
2221 2222 def savecommitmessage(self, text):
2222 2223 pass
2223 2224
2224 2225 @abc.abstractmethod
2225 2226 def register_sidedata_computer(
2226 2227 self, kind, category, keys, computer, flags, replace=False
2227 2228 ):
2228 2229 pass
2229 2230
2230 2231 @abc.abstractmethod
2231 2232 def register_wanted_sidedata(self, category):
2232 2233 pass
2233 2234
2234 2235
2235 2236 class completelocalrepository(
2236 2237 ilocalrepositorymain,
2237 2238 ilocalrepositoryfilestorage,
2238 2239 Protocol,
2239 2240 ):
2240 2241 """Complete interface for a local repository."""
2241 2242
2242 2243
2243 2244 class iwireprotocolcommandcacher(Protocol):
2244 2245 """Represents a caching backend for wire protocol commands.
2245 2246
2246 2247 Wire protocol version 2 supports transparent caching of many commands.
2247 2248 To leverage this caching, servers can activate objects that cache
2248 2249 command responses. Objects handle both cache writing and reading.
2249 2250 This interface defines how that response caching mechanism works.
2250 2251
2251 2252 Wire protocol version 2 commands emit a series of objects that are
2252 2253 serialized and sent to the client. The caching layer exists between
2253 2254 the invocation of the command function and the sending of its output
2254 2255 objects to an output layer.
2255 2256
2256 2257 Instances of this interface represent a binding to a cache that
2257 2258 can serve a response (in place of calling a command function) and/or
2258 2259 write responses to a cache for subsequent use.
2259 2260
2260 2261 When a command request arrives, the following happens with regards
2261 2262 to this interface:
2262 2263
2263 2264 1. The server determines whether the command request is cacheable.
2264 2265 2. If it is, an instance of this interface is spawned.
2265 2266 3. The cacher is activated in a context manager (``__enter__`` is called).
2266 2267 4. A cache *key* for that request is derived. This will call the
2267 2268 instance's ``adjustcachekeystate()`` method so the derivation
2268 2269 can be influenced.
2269 2270 5. The cacher is informed of the derived cache key via a call to
2270 2271 ``setcachekey()``.
2271 2272 6. The cacher's ``lookup()`` method is called to test for presence of
2272 2273 the derived key in the cache.
2273 2274 7. If ``lookup()`` returns a hit, that cached result is used in place
2274 2275 of invoking the command function. ``__exit__`` is called and the instance
2275 2276 is discarded.
2276 2277 8. The command function is invoked.
2277 2278 9. ``onobject()`` is called for each object emitted by the command
2278 2279 function.
2279 2280 10. After the final object is seen, ``onfinished()`` is called.
2280 2281 11. ``__exit__`` is called to signal the end of use of the instance.
2281 2282
2282 2283 Cache *key* derivation can be influenced by the instance.
2283 2284
2284 2285 Cache keys are initially derived by a deterministic representation of
2285 2286 the command request. This includes the command name, arguments, protocol
2286 2287 version, etc. This initial key derivation is performed by CBOR-encoding a
2287 2288 data structure and feeding that output into a hasher.
2288 2289
2289 2290 Instances of this interface can influence this initial key derivation
2290 2291 via ``adjustcachekeystate()``.
2291 2292
2292 2293 The instance is informed of the derived cache key via a call to
2293 2294 ``setcachekey()``. The instance must store the key locally so it can
2294 2295 be consulted on subsequent operations that may require it.
2295 2296
2296 2297 When constructed, the instance has access to a callable that can be used
2297 2298 for encoding response objects. This callable receives as its single
2298 2299 argument an object emitted by a command function. It returns an iterable
2299 2300 of bytes chunks representing the encoded object. Unless the cacher is
2300 2301 caching native Python objects in memory or has a way of reconstructing
2301 2302 the original Python objects, implementations typically call this function
2302 2303 to produce bytes from the output objects and then store those bytes in
2303 2304 the cache. When it comes time to re-emit those bytes, they are wrapped
2304 2305 in a ``wireprototypes.encodedresponse`` instance to tell the output
2305 2306 layer that they are pre-encoded.
2306 2307
2307 2308 When receiving the objects emitted by the command function, instances
2308 2309 can choose what to do with those objects. The simplest thing to do is
2309 2310 re-emit the original objects. They will be forwarded to the output
2310 2311 layer and will be processed as if the cacher did not exist.
2311 2312
2312 2313 Implementations could also choose to not emit objects - instead locally
2313 2314 buffering objects or their encoded representation. They could then emit
2314 2315 a single "coalesced" object when ``onfinished()`` is called. In
2315 2316 this way, the implementation would function as a filtering layer of
2316 2317 sorts.
2317 2318
2318 2319 When caching objects, typically the encoded form of the object will
2319 2320 be stored. Keep in mind that if the original object is forwarded to
2320 2321 the output layer, it will need to be encoded there as well. For large
2321 2322 output, this redundant encoding could add overhead. Implementations
2322 2323 could wrap the encoded object data in ``wireprototypes.encodedresponse``
2323 2324 instances to avoid this overhead.
2324 2325 """
2325 2326
2326 2327 @abc.abstractmethod
2327 2328 def __enter__(self):
2328 2329 """Marks the instance as active.
2329 2330
2330 2331 Should return self.
2331 2332 """
2332 2333
2333 2334 @abc.abstractmethod
2334 2335 def __exit__(self, exctype, excvalue, exctb):
2335 2336 """Called when cacher is no longer used.
2336 2337
2337 2338 This can be used by implementations to perform cleanup actions (e.g.
2338 2339 disconnecting network sockets, aborting a partially cached response.
2339 2340 """
2340 2341
2341 2342 @abc.abstractmethod
2342 2343 def adjustcachekeystate(self, state):
2343 2344 """Influences cache key derivation by adjusting state to derive key.
2344 2345
2345 2346 A dict defining the state used to derive the cache key is passed.
2346 2347
2347 2348 Implementations can modify this dict to record additional state that
2348 2349 is wanted to influence key derivation.
2349 2350
2350 2351 Implementations are *highly* encouraged to not modify or delete
2351 2352 existing keys.
2352 2353 """
2353 2354
2354 2355 @abc.abstractmethod
2355 2356 def setcachekey(self, key):
2356 2357 """Record the derived cache key for this request.
2357 2358
2358 2359 Instances may mutate the key for internal usage, as desired. e.g.
2359 2360 instances may wish to prepend the repo name, introduce path
2360 2361 components for filesystem or URL addressing, etc. Behavior is up to
2361 2362 the cache.
2362 2363
2363 2364 Returns a bool indicating if the request is cacheable by this
2364 2365 instance.
2365 2366 """
2366 2367
2367 2368 @abc.abstractmethod
2368 2369 def lookup(self):
2369 2370 """Attempt to resolve an entry in the cache.
2370 2371
2371 2372 The instance is instructed to look for the cache key that it was
2372 2373 informed about via the call to ``setcachekey()``.
2373 2374
2374 2375 If there's no cache hit or the cacher doesn't wish to use the cached
2375 2376 entry, ``None`` should be returned.
2376 2377
2377 2378 Else, a dict defining the cached result should be returned. The
2378 2379 dict may have the following keys:
2379 2380
2380 2381 objs
2381 2382 An iterable of objects that should be sent to the client. That
2382 2383 iterable of objects is expected to be what the command function
2383 2384 would return if invoked or an equivalent representation thereof.
2384 2385 """
2385 2386
2386 2387 @abc.abstractmethod
2387 2388 def onobject(self, obj):
2388 2389 """Called when a new object is emitted from the command function.
2389 2390
2390 2391 Receives as its argument the object that was emitted from the
2391 2392 command function.
2392 2393
2393 2394 This method returns an iterator of objects to forward to the output
2394 2395 layer. The easiest implementation is a generator that just
2395 2396 ``yield obj``.
2396 2397 """
2397 2398
2398 2399 @abc.abstractmethod
2399 2400 def onfinished(self):
2400 2401 """Called after all objects have been emitted from the command function.
2401 2402
2402 2403 Implementations should return an iterator of objects to forward to
2403 2404 the output layer.
2404 2405
2405 2406 This method can be a generator.
2406 2407 """
General Comments 0
You need to be logged in to leave comments. Login now