##// END OF EJS Templates
interfaces: convert `repository.ifilestorage` to a Protocol class...
Matt Harbison -
r53385:8c89e978 default
parent child Browse files
Show More
@@ -1,2286 +1,2317
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import annotations
10 10
11 11 import abc
12 12 import typing
13 13
14 14 from typing import (
15 15 Any,
16 16 Callable,
17 17 Collection,
18 18 Iterable,
19 19 Iterator,
20 20 Mapping,
21 21 Protocol,
22 22 )
23 23
24 24 from ..i18n import _
25 25 from .. import error
26 26
27 27 if typing.TYPE_CHECKING:
28 28 # Almost all mercurial modules are only imported in the type checking phase
29 29 # to avoid circular imports
30 30 from .. import (
31 31 util,
32 32 )
33 33 from ..utils import (
34 34 urlutil,
35 35 )
36 36
37 37 from . import dirstate as intdirstate
38 38
39 39 # TODO: make a protocol class for this
40 40 NodeConstants = Any
41 41
42 42 # TODO: create a Protocol class, since importing uimod here causes a cycle
43 43 # that confuses pytype.
44 44 Ui = Any
45 45
46 46 # TODO: make a protocol class for this
47 47 Vfs = Any
48 48
49 49 # Local repository feature string.
50 50
51 51 # Revlogs are being used for file storage.
52 52 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
53 53 # The storage part of the repository is shared from an external source.
54 54 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
55 55 # LFS supported for backing file storage.
56 56 REPO_FEATURE_LFS = b'lfs'
57 57 # Repository supports being stream cloned.
58 58 REPO_FEATURE_STREAM_CLONE = b'streamclone'
59 59 # Repository supports (at least) some sidedata to be stored
60 60 REPO_FEATURE_SIDE_DATA = b'side-data'
61 61 # Files storage may lack data for all ancestors.
62 62 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
63 63
64 64 REVISION_FLAG_CENSORED = 1 << 15
65 65 REVISION_FLAG_ELLIPSIS = 1 << 14
66 66 REVISION_FLAG_EXTSTORED = 1 << 13
67 67 REVISION_FLAG_HASCOPIESINFO = 1 << 12
68 68
69 69 REVISION_FLAGS_KNOWN = (
70 70 REVISION_FLAG_CENSORED
71 71 | REVISION_FLAG_ELLIPSIS
72 72 | REVISION_FLAG_EXTSTORED
73 73 | REVISION_FLAG_HASCOPIESINFO
74 74 )
75 75
76 76 CG_DELTAMODE_STD = b'default'
77 77 CG_DELTAMODE_PREV = b'previous'
78 78 CG_DELTAMODE_FULL = b'fulltext'
79 79 CG_DELTAMODE_P1 = b'p1'
80 80
81 81
82 82 ## Cache related constants:
83 83 #
84 84 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
85 85
86 86 # Warm branchmaps of all known repoview's filter-level
87 87 CACHE_BRANCHMAP_ALL = b"branchmap-all"
88 88 # Warm branchmaps of repoview's filter-level used by server
89 89 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
90 90 # Warm internal changelog cache (eg: persistent nodemap)
91 91 CACHE_CHANGELOG_CACHE = b"changelog-cache"
92 92 # check of a branchmap can use the "pure topo" mode
93 93 CACHE_BRANCHMAP_DETECT_PURE_TOPO = b"branchmap-detect-pure-topo"
94 94 # Warm full manifest cache
95 95 CACHE_FULL_MANIFEST = b"full-manifest"
96 96 # Warm file-node-tags cache
97 97 CACHE_FILE_NODE_TAGS = b"file-node-tags"
98 98 # Warm internal manifestlog cache (eg: persistent nodemap)
99 99 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
100 100 # Warn rev branch cache
101 101 CACHE_REV_BRANCH = b"rev-branch-cache"
102 102 # Warm tags' cache for default repoview'
103 103 CACHE_TAGS_DEFAULT = b"tags-default"
104 104 # Warm tags' cache for repoview's filter-level used by server
105 105 CACHE_TAGS_SERVED = b"tags-served"
106 106
107 107 # the cache to warm by default after a simple transaction
108 108 # (this is a mutable set to let extension update it)
109 109 CACHES_DEFAULT = {
110 110 CACHE_BRANCHMAP_SERVED,
111 111 }
112 112
113 113 # the caches to warm when warming all of them
114 114 # (this is a mutable set to let extension update it)
115 115 CACHES_ALL = {
116 116 CACHE_BRANCHMAP_SERVED,
117 117 CACHE_BRANCHMAP_ALL,
118 118 CACHE_BRANCHMAP_DETECT_PURE_TOPO,
119 119 CACHE_REV_BRANCH,
120 120 CACHE_CHANGELOG_CACHE,
121 121 CACHE_FILE_NODE_TAGS,
122 122 CACHE_FULL_MANIFEST,
123 123 CACHE_MANIFESTLOG_CACHE,
124 124 CACHE_TAGS_DEFAULT,
125 125 CACHE_TAGS_SERVED,
126 126 }
127 127
128 128 # the cache to warm by default on simple call
129 129 # (this is a mutable set to let extension update it)
130 130 CACHES_POST_CLONE = CACHES_ALL.copy()
131 131 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
132 132 CACHES_POST_CLONE.discard(CACHE_REV_BRANCH)
133 133
134 134
135 135 class _ipeerconnection(Protocol):
136 136 """Represents a "connection" to a repository.
137 137
138 138 This is the base interface for representing a connection to a repository.
139 139 It holds basic properties and methods applicable to all peer types.
140 140
141 141 This is not a complete interface definition and should not be used
142 142 outside of this module.
143 143 """
144 144
145 145 ui: Ui
146 146 """ui.ui instance"""
147 147
148 148 path: urlutil.path | None
149 149 """a urlutil.path instance or None"""
150 150
151 151 def url(self):
152 152 """Returns a URL string representing this peer.
153 153
154 154 Currently, implementations expose the raw URL used to construct the
155 155 instance. It may contain credentials as part of the URL. The
156 156 expectations of the value aren't well-defined and this could lead to
157 157 data leakage.
158 158
159 159 TODO audit/clean consumers and more clearly define the contents of this
160 160 value.
161 161 """
162 162
163 163 def local(self):
164 164 """Returns a local repository instance.
165 165
166 166 If the peer represents a local repository, returns an object that
167 167 can be used to interface with it. Otherwise returns ``None``.
168 168 """
169 169
170 170 def canpush(self):
171 171 """Returns a boolean indicating if this peer can be pushed to."""
172 172
173 173 def close(self):
174 174 """Close the connection to this peer.
175 175
176 176 This is called when the peer will no longer be used. Resources
177 177 associated with the peer should be cleaned up.
178 178 """
179 179
180 180
181 181 class ipeercapabilities(Protocol):
182 182 """Peer sub-interface related to capabilities."""
183 183
184 184 def capable(self, name):
185 185 """Determine support for a named capability.
186 186
187 187 Returns ``False`` if capability not supported.
188 188
189 189 Returns ``True`` if boolean capability is supported. Returns a string
190 190 if capability support is non-boolean.
191 191
192 192 Capability strings may or may not map to wire protocol capabilities.
193 193 """
194 194
195 195 def requirecap(self, name, purpose):
196 196 """Require a capability to be present.
197 197
198 198 Raises a ``CapabilityError`` if the capability isn't present.
199 199 """
200 200
201 201
202 202 class ipeercommands(Protocol):
203 203 """Client-side interface for communicating over the wire protocol.
204 204
205 205 This interface is used as a gateway to the Mercurial wire protocol.
206 206 methods commonly call wire protocol commands of the same name.
207 207 """
208 208
209 209 def branchmap(self):
210 210 """Obtain heads in named branches.
211 211
212 212 Returns a dict mapping branch name to an iterable of nodes that are
213 213 heads on that branch.
214 214 """
215 215
216 216 def capabilities(self):
217 217 """Obtain capabilities of the peer.
218 218
219 219 Returns a set of string capabilities.
220 220 """
221 221
222 222 def get_cached_bundle_inline(self, path):
223 223 """Retrieve a clonebundle across the wire.
224 224
225 225 Returns a chunkbuffer
226 226 """
227 227
228 228 def clonebundles(self):
229 229 """Obtains the clone bundles manifest for the repo.
230 230
231 231 Returns the manifest as unparsed bytes.
232 232 """
233 233
234 234 def debugwireargs(self, one, two, three=None, four=None, five=None):
235 235 """Used to facilitate debugging of arguments passed over the wire."""
236 236
237 237 def getbundle(self, source, **kwargs):
238 238 """Obtain remote repository data as a bundle.
239 239
240 240 This command is how the bulk of repository data is transferred from
241 241 the peer to the local repository
242 242
243 243 Returns a generator of bundle data.
244 244 """
245 245
246 246 def heads(self):
247 247 """Determine all known head revisions in the peer.
248 248
249 249 Returns an iterable of binary nodes.
250 250 """
251 251
252 252 def known(self, nodes):
253 253 """Determine whether multiple nodes are known.
254 254
255 255 Accepts an iterable of nodes whose presence to check for.
256 256
257 257 Returns an iterable of booleans indicating of the corresponding node
258 258 at that index is known to the peer.
259 259 """
260 260
261 261 def listkeys(self, namespace):
262 262 """Obtain all keys in a pushkey namespace.
263 263
264 264 Returns an iterable of key names.
265 265 """
266 266
267 267 def lookup(self, key):
268 268 """Resolve a value to a known revision.
269 269
270 270 Returns a binary node of the resolved revision on success.
271 271 """
272 272
273 273 def pushkey(self, namespace, key, old, new):
274 274 """Set a value using the ``pushkey`` protocol.
275 275
276 276 Arguments correspond to the pushkey namespace and key to operate on and
277 277 the old and new values for that key.
278 278
279 279 Returns a string with the peer result. The value inside varies by the
280 280 namespace.
281 281 """
282 282
283 283 def stream_out(self):
284 284 """Obtain streaming clone data.
285 285
286 286 Successful result should be a generator of data chunks.
287 287 """
288 288
289 289 def unbundle(self, bundle, heads, url):
290 290 """Transfer repository data to the peer.
291 291
292 292 This is how the bulk of data during a push is transferred.
293 293
294 294 Returns the integer number of heads added to the peer.
295 295 """
296 296
297 297
298 298 class ipeerlegacycommands(Protocol):
299 299 """Interface for implementing support for legacy wire protocol commands.
300 300
301 301 Wire protocol commands transition to legacy status when they are no longer
302 302 used by modern clients. To facilitate identifying which commands are
303 303 legacy, the interfaces are split.
304 304 """
305 305
306 306 def between(self, pairs):
307 307 """Obtain nodes between pairs of nodes.
308 308
309 309 ``pairs`` is an iterable of node pairs.
310 310
311 311 Returns an iterable of iterables of nodes corresponding to each
312 312 requested pair.
313 313 """
314 314
315 315 def branches(self, nodes):
316 316 """Obtain ancestor changesets of specific nodes back to a branch point.
317 317
318 318 For each requested node, the peer finds the first ancestor node that is
319 319 a DAG root or is a merge.
320 320
321 321 Returns an iterable of iterables with the resolved values for each node.
322 322 """
323 323
324 324 def changegroup(self, nodes, source):
325 325 """Obtain a changegroup with data for descendants of specified nodes."""
326 326
327 327 def changegroupsubset(self, bases, heads, source):
328 328 pass
329 329
330 330
331 331 class ipeercommandexecutor(Protocol):
332 332 """Represents a mechanism to execute remote commands.
333 333
334 334 This is the primary interface for requesting that wire protocol commands
335 335 be executed. Instances of this interface are active in a context manager
336 336 and have a well-defined lifetime. When the context manager exits, all
337 337 outstanding requests are waited on.
338 338 """
339 339
340 340 def callcommand(self, name, args):
341 341 """Request that a named command be executed.
342 342
343 343 Receives the command name and a dictionary of command arguments.
344 344
345 345 Returns a ``concurrent.futures.Future`` that will resolve to the
346 346 result of that command request. That exact value is left up to
347 347 the implementation and possibly varies by command.
348 348
349 349 Not all commands can coexist with other commands in an executor
350 350 instance: it depends on the underlying wire protocol transport being
351 351 used and the command itself.
352 352
353 353 Implementations MAY call ``sendcommands()`` automatically if the
354 354 requested command can not coexist with other commands in this executor.
355 355
356 356 Implementations MAY call ``sendcommands()`` automatically when the
357 357 future's ``result()`` is called. So, consumers using multiple
358 358 commands with an executor MUST ensure that ``result()`` is not called
359 359 until all command requests have been issued.
360 360 """
361 361
362 362 def sendcommands(self):
363 363 """Trigger submission of queued command requests.
364 364
365 365 Not all transports submit commands as soon as they are requested to
366 366 run. When called, this method forces queued command requests to be
367 367 issued. It will no-op if all commands have already been sent.
368 368
369 369 When called, no more new commands may be issued with this executor.
370 370 """
371 371
372 372 def close(self):
373 373 """Signal that this command request is finished.
374 374
375 375 When called, no more new commands may be issued. All outstanding
376 376 commands that have previously been issued are waited on before
377 377 returning. This not only includes waiting for the futures to resolve,
378 378 but also waiting for all response data to arrive. In other words,
379 379 calling this waits for all on-wire state for issued command requests
380 380 to finish.
381 381
382 382 When used as a context manager, this method is called when exiting the
383 383 context manager.
384 384
385 385 This method may call ``sendcommands()`` if there are buffered commands.
386 386 """
387 387
388 388
389 389 class ipeerrequests(Protocol):
390 390 """Interface for executing commands on a peer."""
391 391
392 392 limitedarguments: bool
393 393 """True if the peer cannot receive large argument value for commands."""
394 394
395 395 def commandexecutor(self):
396 396 """A context manager that resolves to an ipeercommandexecutor.
397 397
398 398 The object this resolves to can be used to issue command requests
399 399 to the peer.
400 400
401 401 Callers should call its ``callcommand`` method to issue command
402 402 requests.
403 403
404 404 A new executor should be obtained for each distinct set of commands
405 405 (possibly just a single command) that the consumer wants to execute
406 406 as part of a single operation or round trip. This is because some
407 407 peers are half-duplex and/or don't support persistent connections.
408 408 e.g. in the case of HTTP peers, commands sent to an executor represent
409 409 a single HTTP request. While some peers may support multiple command
410 410 sends over the wire per executor, consumers need to code to the least
411 411 capable peer. So it should be assumed that command executors buffer
412 412 called commands until they are told to send them and that each
413 413 command executor could result in a new connection or wire-level request
414 414 being issued.
415 415 """
416 416
417 417
418 418 class peer(_ipeerconnection, ipeercapabilities, ipeerrequests, Protocol):
419 419 """Unified interface for peer repositories.
420 420
421 421 All peer instances must conform to this interface.
422 422 """
423 423
424 424 limitedarguments: bool = False
425 425
426 426 def __init__(self, ui, path=None, remotehidden=False):
427 427 self.ui = ui
428 428 self.path = path
429 429
430 430 def capable(self, name):
431 431 # TODO: this class should maybe subclass ipeercommands too, otherwise it
432 432 # is assuming whatever uses this as a mixin also has this interface.
433 433 caps = self.capabilities() # pytype: disable=attribute-error
434 434 if name in caps:
435 435 return True
436 436
437 437 name = b'%s=' % name
438 438 for cap in caps:
439 439 if cap.startswith(name):
440 440 return cap[len(name) :]
441 441
442 442 return False
443 443
444 444 def requirecap(self, name, purpose):
445 445 if self.capable(name):
446 446 return
447 447
448 448 raise error.CapabilityError(
449 449 _(
450 450 b'cannot %s; remote repository does not support the '
451 451 b'\'%s\' capability'
452 452 )
453 453 % (purpose, name)
454 454 )
455 455
456 456
457 457 class iverifyproblem(Protocol):
458 458 """Represents a problem with the integrity of the repository.
459 459
460 460 Instances of this interface are emitted to describe an integrity issue
461 461 with a repository (e.g. corrupt storage, missing data, etc).
462 462
463 463 Instances are essentially messages associated with severity.
464 464 """
465 465
466 466 warning: bytes | None
467 467 """Message indicating a non-fatal problem."""
468 468
469 469 error: bytes | None
470 470 """Message indicating a fatal problem."""
471 471
472 472 node: bytes | None
473 473 """Revision encountering the problem.
474 474
475 475 ``None`` means the problem doesn't apply to a single revision.
476 476 """
477 477
478 478
479 479 class irevisiondelta(Protocol):
480 480 """Represents a delta between one revision and another.
481 481
482 482 Instances convey enough information to allow a revision to be exchanged
483 483 with another repository.
484 484
485 485 Instances represent the fulltext revision data or a delta against
486 486 another revision. Therefore the ``revision`` and ``delta`` attributes
487 487 are mutually exclusive.
488 488
489 489 Typically used for changegroup generation.
490 490 """
491 491
492 492 node: bytes
493 493 """20 byte node of this revision."""
494 494
495 495 p1node: bytes
496 496 """20 byte node of 1st parent of this revision."""
497 497
498 498 p2node: bytes
499 499 """20 byte node of 2nd parent of this revision."""
500 500
501 501 # TODO: is this really optional? revlog.revlogrevisiondelta defaults to None
502 502 linknode: bytes | None
503 503 """20 byte node of the changelog revision this node is linked to."""
504 504
505 505 flags: int
506 506 """2 bytes of integer flags that apply to this revision.
507 507
508 508 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
509 509 """
510 510
511 511 basenode: bytes
512 512 """20 byte node of the revision this data is a delta against.
513 513
514 514 ``nullid`` indicates that the revision is a full revision and not
515 515 a delta.
516 516 """
517 517
518 518 baserevisionsize: int | None
519 519 """Size of base revision this delta is against.
520 520
521 521 May be ``None`` if ``basenode`` is ``nullid``.
522 522 """
523 523
524 524 # TODO: is this really optional? (Seems possible in
525 525 # storageutil.emitrevisions()).
526 526 revision: bytes | None
527 527 """Raw fulltext of revision data for this node."""
528 528
529 529 delta: bytes | None
530 530 """Delta between ``basenode`` and ``node``.
531 531
532 532 Stored in the bdiff delta format.
533 533 """
534 534
535 535 sidedata: bytes | None
536 536 """Raw sidedata bytes for the given revision."""
537 537
538 538 protocol_flags: int
539 539 """Single byte of integer flags that can influence the protocol.
540 540
541 541 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
542 542 """
543 543
544 544
545 545 class ifilerevisionssequence(Protocol):
546 546 """Contains index data for all revisions of a file.
547 547
548 548 Types implementing this behave like lists of tuples. The index
549 549 in the list corresponds to the revision number. The values contain
550 550 index metadata.
551 551
552 552 The *null* revision (revision number -1) is always the last item
553 553 in the index.
554 554 """
555 555
556 556 def __len__(self):
557 557 """The total number of revisions."""
558 558
559 559 def __getitem__(self, rev):
560 560 """Returns the object having a specific revision number.
561 561
562 562 Returns an 8-tuple with the following fields:
563 563
564 564 offset+flags
565 565 Contains the offset and flags for the revision. 64-bit unsigned
566 566 integer where first 6 bytes are the offset and the next 2 bytes
567 567 are flags. The offset can be 0 if it is not used by the store.
568 568 compressed size
569 569 Size of the revision data in the store. It can be 0 if it isn't
570 570 needed by the store.
571 571 uncompressed size
572 572 Fulltext size. It can be 0 if it isn't needed by the store.
573 573 base revision
574 574 Revision number of revision the delta for storage is encoded
575 575 against. -1 indicates not encoded against a base revision.
576 576 link revision
577 577 Revision number of changelog revision this entry is related to.
578 578 p1 revision
579 579 Revision number of 1st parent. -1 if no 1st parent.
580 580 p2 revision
581 581 Revision number of 2nd parent. -1 if no 1st parent.
582 582 node
583 583 Binary node value for this revision number.
584 584
585 585 Negative values should index off the end of the sequence. ``-1``
586 586 should return the null revision. ``-2`` should return the most
587 587 recent revision.
588 588 """
589 589
590 590 def __contains__(self, rev):
591 591 """Whether a revision number exists."""
592 592
593 593 def insert(self, i, entry):
594 594 """Add an item to the index at specific revision."""
595 595
596 596
597 597 class ifileindex(Protocol):
598 598 """Storage interface for index data of a single file.
599 599
600 600 File storage data is divided into index metadata and data storage.
601 601 This interface defines the index portion of the interface.
602 602
603 603 The index logically consists of:
604 604
605 605 * A mapping between revision numbers and nodes.
606 606 * DAG data (storing and querying the relationship between nodes).
607 607 * Metadata to facilitate storage.
608 608 """
609 609
610 610 nullid: bytes
611 611 """node for the null revision for use as delta base."""
612 612
613 @abc.abstractmethod
613 614 def __len__(self) -> int:
614 615 """Obtain the number of revisions stored for this file."""
615 616
617 @abc.abstractmethod
616 618 def __iter__(self) -> Iterator[int]:
617 619 """Iterate over revision numbers for this file."""
618 620
621 @abc.abstractmethod
619 622 def hasnode(self, node):
620 623 """Returns a bool indicating if a node is known to this store.
621 624
622 625 Implementations must only return True for full, binary node values:
623 626 hex nodes, revision numbers, and partial node matches must be
624 627 rejected.
625 628
626 629 The null node is never present.
627 630 """
628 631
632 @abc.abstractmethod
629 633 def revs(self, start=0, stop=None):
630 634 """Iterate over revision numbers for this file, with control."""
631 635
636 @abc.abstractmethod
632 637 def parents(self, node):
633 638 """Returns a 2-tuple of parent nodes for a revision.
634 639
635 640 Values will be ``nullid`` if the parent is empty.
636 641 """
637 642
643 @abc.abstractmethod
638 644 def parentrevs(self, rev):
639 645 """Like parents() but operates on revision numbers."""
640 646
647 @abc.abstractmethod
641 648 def rev(self, node):
642 649 """Obtain the revision number given a node.
643 650
644 651 Raises ``error.LookupError`` if the node is not known.
645 652 """
646 653
654 @abc.abstractmethod
647 655 def node(self, rev):
648 656 """Obtain the node value given a revision number.
649 657
650 658 Raises ``IndexError`` if the node is not known.
651 659 """
652 660
661 @abc.abstractmethod
653 662 def lookup(self, node):
654 663 """Attempt to resolve a value to a node.
655 664
656 665 Value can be a binary node, hex node, revision number, or a string
657 666 that can be converted to an integer.
658 667
659 668 Raises ``error.LookupError`` if a node could not be resolved.
660 669 """
661 670
671 @abc.abstractmethod
662 672 def linkrev(self, rev):
663 673 """Obtain the changeset revision number a revision is linked to."""
664 674
675 @abc.abstractmethod
665 676 def iscensored(self, rev):
666 677 """Return whether a revision's content has been censored."""
667 678
679 @abc.abstractmethod
668 680 def commonancestorsheads(self, node1, node2):
669 681 """Obtain an iterable of nodes containing heads of common ancestors.
670 682
671 683 See ``ancestor.commonancestorsheads()``.
672 684 """
673 685
686 @abc.abstractmethod
674 687 def descendants(self, revs):
675 688 """Obtain descendant revision numbers for a set of revision numbers.
676 689
677 690 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
678 691 """
679 692
693 @abc.abstractmethod
680 694 def heads(self, start=None, stop=None):
681 695 """Obtain a list of nodes that are DAG heads, with control.
682 696
683 697 The set of revisions examined can be limited by specifying
684 698 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
685 699 iterable of nodes. DAG traversal starts at earlier revision
686 700 ``start`` and iterates forward until any node in ``stop`` is
687 701 encountered.
688 702 """
689 703
704 @abc.abstractmethod
690 705 def children(self, node):
691 706 """Obtain nodes that are children of a node.
692 707
693 708 Returns a list of nodes.
694 709 """
695 710
696 711
697 712 class ifiledata(Protocol):
698 713 """Storage interface for data storage of a specific file.
699 714
700 715 This complements ``ifileindex`` and provides an interface for accessing
701 716 data for a tracked file.
702 717 """
703 718
719 @abc.abstractmethod
704 720 def size(self, rev):
705 721 """Obtain the fulltext size of file data.
706 722
707 723 Any metadata is excluded from size measurements.
708 724 """
709 725
726 @abc.abstractmethod
710 727 def revision(self, node):
711 728 """Obtain fulltext data for a node.
712 729
713 730 By default, any storage transformations are applied before the data
714 731 is returned. If ``raw`` is True, non-raw storage transformations
715 732 are not applied.
716 733
717 734 The fulltext data may contain a header containing metadata. Most
718 735 consumers should use ``read()`` to obtain the actual file data.
719 736 """
720 737
738 @abc.abstractmethod
721 739 def rawdata(self, node):
722 740 """Obtain raw data for a node."""
723 741
742 @abc.abstractmethod
724 743 def read(self, node):
725 744 """Resolve file fulltext data.
726 745
727 746 This is similar to ``revision()`` except any metadata in the data
728 747 headers is stripped.
729 748 """
730 749
750 @abc.abstractmethod
731 751 def renamed(self, node):
732 752 """Obtain copy metadata for a node.
733 753
734 754 Returns ``False`` if no copy metadata is stored or a 2-tuple of
735 755 (path, node) from which this revision was copied.
736 756 """
737 757
758 @abc.abstractmethod
738 759 def cmp(self, node, fulltext):
739 760 """Compare fulltext to another revision.
740 761
741 762 Returns True if the fulltext is different from what is stored.
742 763
743 764 This takes copy metadata into account.
744 765
745 766 TODO better document the copy metadata and censoring logic.
746 767 """
747 768
769 @abc.abstractmethod
748 770 def emitrevisions(
749 771 self,
750 772 nodes,
751 773 nodesorder=None,
752 774 revisiondata=False,
753 775 assumehaveparentrevisions=False,
754 776 deltamode=CG_DELTAMODE_STD,
755 777 ):
756 778 """Produce ``irevisiondelta`` for revisions.
757 779
758 780 Given an iterable of nodes, emits objects conforming to the
759 781 ``irevisiondelta`` interface that describe revisions in storage.
760 782
761 783 This method is a generator.
762 784
763 785 The input nodes may be unordered. Implementations must ensure that a
764 786 node's parents are emitted before the node itself. Transitively, this
765 787 means that a node may only be emitted once all its ancestors in
766 788 ``nodes`` have also been emitted.
767 789
768 790 By default, emits "index" data (the ``node``, ``p1node``, and
769 791 ``p2node`` attributes). If ``revisiondata`` is set, revision data
770 792 will also be present on the emitted objects.
771 793
772 794 With default argument values, implementations can choose to emit
773 795 either fulltext revision data or a delta. When emitting deltas,
774 796 implementations must consider whether the delta's base revision
775 797 fulltext is available to the receiver.
776 798
777 799 The base revision fulltext is guaranteed to be available if any of
778 800 the following are met:
779 801
780 802 * Its fulltext revision was emitted by this method call.
781 803 * A delta for that revision was emitted by this method call.
782 804 * ``assumehaveparentrevisions`` is True and the base revision is a
783 805 parent of the node.
784 806
785 807 ``nodesorder`` can be used to control the order that revisions are
786 808 emitted. By default, revisions can be reordered as long as they are
787 809 in DAG topological order (see above). If the value is ``nodes``,
788 810 the iteration order from ``nodes`` should be used. If the value is
789 811 ``storage``, then the native order from the backing storage layer
790 812 is used. (Not all storage layers will have strong ordering and behavior
791 813 of this mode is storage-dependent.) ``nodes`` ordering can force
792 814 revisions to be emitted before their ancestors, so consumers should
793 815 use it with care.
794 816
795 817 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
796 818 be set and it is the caller's responsibility to resolve it, if needed.
797 819
798 820 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
799 821 all revision data should be emitted as deltas against the revision
800 822 emitted just prior. The initial revision should be a delta against its
801 823 1st parent.
802 824 """
803 825
804 826
805 827 class ifilemutation(Protocol):
806 828 """Storage interface for mutation events of a tracked file."""
807 829
830 @abc.abstractmethod
808 831 def add(self, filedata, meta, transaction, linkrev, p1, p2):
809 832 """Add a new revision to the store.
810 833
811 834 Takes file data, dictionary of metadata, a transaction, linkrev,
812 835 and parent nodes.
813 836
814 837 Returns the node that was added.
815 838
816 839 May no-op if a revision matching the supplied data is already stored.
817 840 """
818 841
842 @abc.abstractmethod
819 843 def addrevision(
820 844 self,
821 845 revisiondata,
822 846 transaction,
823 847 linkrev,
824 848 p1,
825 849 p2,
826 850 node=None,
827 851 flags=0,
828 852 cachedelta=None,
829 853 ):
830 854 """Add a new revision to the store and return its number.
831 855
832 856 This is similar to ``add()`` except it operates at a lower level.
833 857
834 858 The data passed in already contains a metadata header, if any.
835 859
836 860 ``node`` and ``flags`` can be used to define the expected node and
837 861 the flags to use with storage. ``flags`` is a bitwise value composed
838 862 of the various ``REVISION_FLAG_*`` constants.
839 863
840 864 ``add()`` is usually called when adding files from e.g. the working
841 865 directory. ``addrevision()`` is often called by ``add()`` and for
842 866 scenarios where revision data has already been computed, such as when
843 867 applying raw data from a peer repo.
844 868 """
845 869
870 @abc.abstractmethod
846 871 def addgroup(
847 872 self,
848 873 deltas,
849 874 linkmapper,
850 875 transaction,
851 876 addrevisioncb=None,
852 877 duplicaterevisioncb=None,
853 878 maybemissingparents=False,
854 879 ):
855 880 """Process a series of deltas for storage.
856 881
857 882 ``deltas`` is an iterable of 7-tuples of
858 883 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
859 884 to add.
860 885
861 886 The ``delta`` field contains ``mpatch`` data to apply to a base
862 887 revision, identified by ``deltabase``. The base node can be
863 888 ``nullid``, in which case the header from the delta can be ignored
864 889 and the delta used as the fulltext.
865 890
866 891 ``alwayscache`` instructs the lower layers to cache the content of the
867 892 newly added revision, even if it needs to be explicitly computed.
868 893 This used to be the default when ``addrevisioncb`` was provided up to
869 894 Mercurial 5.8.
870 895
871 896 ``addrevisioncb`` should be called for each new rev as it is committed.
872 897 ``duplicaterevisioncb`` should be called for all revs with a
873 898 pre-existing node.
874 899
875 900 ``maybemissingparents`` is a bool indicating whether the incoming
876 901 data may reference parents/ancestor revisions that aren't present.
877 902 This flag is set when receiving data into a "shallow" store that
878 903 doesn't hold all history.
879 904
880 905 Returns a list of nodes that were processed. A node will be in the list
881 906 even if it existed in the store previously.
882 907 """
883 908
909 @abc.abstractmethod
884 910 def censorrevision(self, tr, node, tombstone=b''):
885 911 """Remove the content of a single revision.
886 912
887 913 The specified ``node`` will have its content purged from storage.
888 914 Future attempts to access the revision data for this node will
889 915 result in failure.
890 916
891 917 A ``tombstone`` message can optionally be stored. This message may be
892 918 displayed to users when they attempt to access the missing revision
893 919 data.
894 920
895 921 Storage backends may have stored deltas against the previous content
896 922 in this revision. As part of censoring a revision, these storage
897 923 backends are expected to rewrite any internally stored deltas such
898 924 that they no longer reference the deleted content.
899 925 """
900 926
927 @abc.abstractmethod
901 928 def getstrippoint(self, minlink):
902 929 """Find the minimum revision that must be stripped to strip a linkrev.
903 930
904 931 Returns a 2-tuple containing the minimum revision number and a set
905 932 of all revisions numbers that would be broken by this strip.
906 933
907 934 TODO this is highly revlog centric and should be abstracted into
908 935 a higher-level deletion API. ``repair.strip()`` relies on this.
909 936 """
910 937
938 @abc.abstractmethod
911 939 def strip(self, minlink, transaction):
912 940 """Remove storage of items starting at a linkrev.
913 941
914 942 This uses ``getstrippoint()`` to determine the first node to remove.
915 943 Then it effectively truncates storage for all revisions after that.
916 944
917 945 TODO this is highly revlog centric and should be abstracted into a
918 946 higher-level deletion API.
919 947 """
920 948
921 949
922 class ifilestorage(ifileindex, ifiledata, ifilemutation):
950 class ifilestorage(ifileindex, ifiledata, ifilemutation, Protocol):
923 951 """Complete storage interface for a single tracked file."""
924 952
953 @abc.abstractmethod
925 954 def files(self):
926 955 """Obtain paths that are backing storage for this file.
927 956
928 957 TODO this is used heavily by verify code and there should probably
929 958 be a better API for that.
930 959 """
931 960
961 @abc.abstractmethod
932 962 def storageinfo(
933 963 self,
934 964 exclusivefiles=False,
935 965 sharedfiles=False,
936 966 revisionscount=False,
937 967 trackedsize=False,
938 968 storedsize=False,
939 969 ):
940 970 """Obtain information about storage for this file's data.
941 971
942 972 Returns a dict describing storage for this tracked path. The keys
943 973 in the dict map to arguments of the same. The arguments are bools
944 974 indicating whether to calculate and obtain that data.
945 975
946 976 exclusivefiles
947 977 Iterable of (vfs, path) describing files that are exclusively
948 978 used to back storage for this tracked path.
949 979
950 980 sharedfiles
951 981 Iterable of (vfs, path) describing files that are used to back
952 982 storage for this tracked path. Those files may also provide storage
953 983 for other stored entities.
954 984
955 985 revisionscount
956 986 Number of revisions available for retrieval.
957 987
958 988 trackedsize
959 989 Total size in bytes of all tracked revisions. This is a sum of the
960 990 length of the fulltext of all revisions.
961 991
962 992 storedsize
963 993 Total size in bytes used to store data for all tracked revisions.
964 994 This is commonly less than ``trackedsize`` due to internal usage
965 995 of deltas rather than fulltext revisions.
966 996
967 997 Not all storage backends may support all queries are have a reasonable
968 998 value to use. In that case, the value should be set to ``None`` and
969 999 callers are expected to handle this special value.
970 1000 """
971 1001
1002 @abc.abstractmethod
972 1003 def verifyintegrity(self, state) -> Iterable[iverifyproblem]:
973 1004 """Verifies the integrity of file storage.
974 1005
975 1006 ``state`` is a dict holding state of the verifier process. It can be
976 1007 used to communicate data between invocations of multiple storage
977 1008 primitives.
978 1009
979 1010 If individual revisions cannot have their revision content resolved,
980 1011 the method is expected to set the ``skipread`` key to a set of nodes
981 1012 that encountered problems. If set, the method can also add the node(s)
982 1013 to ``safe_renamed`` in order to indicate nodes that may perform the
983 1014 rename checks with currently accessible data.
984 1015
985 1016 The method yields objects conforming to the ``iverifyproblem``
986 1017 interface.
987 1018 """
988 1019
989 1020
990 1021 class idirs(Protocol):
991 1022 """Interface representing a collection of directories from paths.
992 1023
993 1024 This interface is essentially a derived data structure representing
994 1025 directories from a collection of paths.
995 1026 """
996 1027
997 1028 def addpath(self, path):
998 1029 """Add a path to the collection.
999 1030
1000 1031 All directories in the path will be added to the collection.
1001 1032 """
1002 1033
1003 1034 def delpath(self, path):
1004 1035 """Remove a path from the collection.
1005 1036
1006 1037 If the removal was the last path in a particular directory, the
1007 1038 directory is removed from the collection.
1008 1039 """
1009 1040
1010 1041 def __iter__(self):
1011 1042 """Iterate over the directories in this collection of paths."""
1012 1043
1013 1044 def __contains__(self, path):
1014 1045 """Whether a specific directory is in this collection."""
1015 1046
1016 1047
1017 1048 class imanifestdict(Protocol):
1018 1049 """Interface representing a manifest data structure.
1019 1050
1020 1051 A manifest is effectively a dict mapping paths to entries. Each entry
1021 1052 consists of a binary node and extra flags affecting that entry.
1022 1053 """
1023 1054
1024 1055 def __getitem__(self, path):
1025 1056 """Returns the binary node value for a path in the manifest.
1026 1057
1027 1058 Raises ``KeyError`` if the path does not exist in the manifest.
1028 1059
1029 1060 Equivalent to ``self.find(path)[0]``.
1030 1061 """
1031 1062
1032 1063 def find(self, path):
1033 1064 """Returns the entry for a path in the manifest.
1034 1065
1035 1066 Returns a 2-tuple of (node, flags).
1036 1067
1037 1068 Raises ``KeyError`` if the path does not exist in the manifest.
1038 1069 """
1039 1070
1040 1071 def __len__(self):
1041 1072 """Return the number of entries in the manifest."""
1042 1073
1043 1074 def __nonzero__(self):
1044 1075 """Returns True if the manifest has entries, False otherwise."""
1045 1076
1046 1077 __bool__ = __nonzero__
1047 1078
1048 1079 def set(self, path, node, flags):
1049 1080 """Define the node value and flags for a path in the manifest.
1050 1081
1051 1082 Equivalent to __setitem__ followed by setflag, but can be more efficient.
1052 1083 """
1053 1084
1054 1085 def __setitem__(self, path, node):
1055 1086 """Define the node value for a path in the manifest.
1056 1087
1057 1088 If the path is already in the manifest, its flags will be copied to
1058 1089 the new entry.
1059 1090 """
1060 1091
1061 1092 def __contains__(self, path):
1062 1093 """Whether a path exists in the manifest."""
1063 1094
1064 1095 def __delitem__(self, path):
1065 1096 """Remove a path from the manifest.
1066 1097
1067 1098 Raises ``KeyError`` if the path is not in the manifest.
1068 1099 """
1069 1100
1070 1101 def __iter__(self):
1071 1102 """Iterate over paths in the manifest."""
1072 1103
1073 1104 def iterkeys(self):
1074 1105 """Iterate over paths in the manifest."""
1075 1106
1076 1107 def keys(self):
1077 1108 """Obtain a list of paths in the manifest."""
1078 1109
1079 1110 def filesnotin(self, other, match=None):
1080 1111 """Obtain the set of paths in this manifest but not in another.
1081 1112
1082 1113 ``match`` is an optional matcher function to be applied to both
1083 1114 manifests.
1084 1115
1085 1116 Returns a set of paths.
1086 1117 """
1087 1118
1088 1119 def dirs(self):
1089 1120 """Returns an object implementing the ``idirs`` interface."""
1090 1121
1091 1122 def hasdir(self, dir):
1092 1123 """Returns a bool indicating if a directory is in this manifest."""
1093 1124
1094 1125 def walk(self, match):
1095 1126 """Generator of paths in manifest satisfying a matcher.
1096 1127
1097 1128 If the matcher has explicit files listed and they don't exist in
1098 1129 the manifest, ``match.bad()`` is called for each missing file.
1099 1130 """
1100 1131
1101 1132 def diff(self, other, match=None, clean=False):
1102 1133 """Find differences between this manifest and another.
1103 1134
1104 1135 This manifest is compared to ``other``.
1105 1136
1106 1137 If ``match`` is provided, the two manifests are filtered against this
1107 1138 matcher and only entries satisfying the matcher are compared.
1108 1139
1109 1140 If ``clean`` is True, unchanged files are included in the returned
1110 1141 object.
1111 1142
1112 1143 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1113 1144 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1114 1145 represents the node and flags for this manifest and ``(node2, flag2)``
1115 1146 are the same for the other manifest.
1116 1147 """
1117 1148
1118 1149 def setflag(self, path, flag):
1119 1150 """Set the flag value for a given path.
1120 1151
1121 1152 Raises ``KeyError`` if the path is not already in the manifest.
1122 1153 """
1123 1154
1124 1155 def get(self, path, default=None):
1125 1156 """Obtain the node value for a path or a default value if missing."""
1126 1157
1127 1158 def flags(self, path):
1128 1159 """Return the flags value for a path (default: empty bytestring)."""
1129 1160
1130 1161 def copy(self):
1131 1162 """Return a copy of this manifest."""
1132 1163
1133 1164 def items(self):
1134 1165 """Returns an iterable of (path, node) for items in this manifest."""
1135 1166
1136 1167 def iteritems(self):
1137 1168 """Identical to items()."""
1138 1169
1139 1170 def iterentries(self):
1140 1171 """Returns an iterable of (path, node, flags) for this manifest.
1141 1172
1142 1173 Similar to ``iteritems()`` except items are a 3-tuple and include
1143 1174 flags.
1144 1175 """
1145 1176
1146 1177 def text(self):
1147 1178 """Obtain the raw data representation for this manifest.
1148 1179
1149 1180 Result is used to create a manifest revision.
1150 1181 """
1151 1182
1152 1183 def fastdelta(self, base, changes):
1153 1184 """Obtain a delta between this manifest and another given changes.
1154 1185
1155 1186 ``base`` in the raw data representation for another manifest.
1156 1187
1157 1188 ``changes`` is an iterable of ``(path, to_delete)``.
1158 1189
1159 1190 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1160 1191 delta between ``base`` and this manifest.
1161 1192
1162 1193 If this manifest implementation can't support ``fastdelta()``,
1163 1194 raise ``mercurial.manifest.FastdeltaUnavailable``.
1164 1195 """
1165 1196
1166 1197
1167 1198 class imanifestrevisionbase(Protocol):
1168 1199 """Base interface representing a single revision of a manifest.
1169 1200
1170 1201 Should not be used as a primary interface: should always be inherited
1171 1202 as part of a larger interface.
1172 1203 """
1173 1204
1174 1205 def copy(self):
1175 1206 """Obtain a copy of this manifest instance.
1176 1207
1177 1208 Returns an object conforming to the ``imanifestrevisionwritable``
1178 1209 interface. The instance will be associated with the same
1179 1210 ``imanifestlog`` collection as this instance.
1180 1211 """
1181 1212
1182 1213 def read(self):
1183 1214 """Obtain the parsed manifest data structure.
1184 1215
1185 1216 The returned object conforms to the ``imanifestdict`` interface.
1186 1217 """
1187 1218
1188 1219
1189 1220 class imanifestrevisionstored(imanifestrevisionbase, Protocol):
1190 1221 """Interface representing a manifest revision committed to storage."""
1191 1222
1192 1223 @abc.abstractmethod
1193 1224 def node(self) -> bytes:
1194 1225 """The binary node for this manifest."""
1195 1226
1196 1227 parents: list[bytes]
1197 1228 """List of binary nodes that are parents for this manifest revision."""
1198 1229
1199 1230 @abc.abstractmethod
1200 1231 def readdelta(self, shallow: bool = False):
1201 1232 """Obtain the manifest data structure representing changes from parent.
1202 1233
1203 1234 This manifest is compared to its 1st parent. A new manifest
1204 1235 representing those differences is constructed.
1205 1236
1206 1237 If `shallow` is True, this will read the delta for this directory,
1207 1238 without recursively reading subdirectory manifests. Instead, any
1208 1239 subdirectory entry will be reported as it appears in the manifest, i.e.
1209 1240 the subdirectory will be reported among files and distinguished only by
1210 1241 its 't' flag. This only apply if the underlying manifest support it.
1211 1242
1212 1243 The returned object conforms to the ``imanifestdict`` interface.
1213 1244 """
1214 1245
1215 1246 @abc.abstractmethod
1216 1247 def read_any_fast_delta(
1217 1248 self,
1218 1249 valid_bases: Collection[int] | None = None,
1219 1250 *,
1220 1251 shallow: bool = False,
1221 1252 ):
1222 1253 """read some manifest information as fast if possible
1223 1254
1224 1255 This might return a "delta", a manifest object containing only file
1225 1256 changed compared to another revisions. The `valid_bases` argument
1226 1257 control the set of revision that might be used as a base.
1227 1258
1228 1259 If no delta can be retrieved quickly, a full read of the manifest will
1229 1260 be performed instead.
1230 1261
1231 1262 The function return a tuple with two elements. The first one is the
1232 1263 delta base used (or None if we did a full read), the second one is the
1233 1264 manifest information.
1234 1265
1235 1266 If `shallow` is True, this will read the delta for this directory,
1236 1267 without recursively reading subdirectory manifests. Instead, any
1237 1268 subdirectory entry will be reported as it appears in the manifest, i.e.
1238 1269 the subdirectory will be reported among files and distinguished only by
1239 1270 its 't' flag. This only apply if the underlying manifest support it.
1240 1271
1241 1272 The returned object conforms to the ``imanifestdict`` interface.
1242 1273 """
1243 1274
1244 1275 @abc.abstractmethod
1245 1276 def read_delta_parents(self, *, shallow: bool = False, exact: bool = True):
1246 1277 """return a diff from this revision against both parents.
1247 1278
1248 1279 If `exact` is False, this might return a superset of the diff, containing
1249 1280 files that are actually present as is in one of the parents.
1250 1281
1251 1282 If `shallow` is True, this will read the delta for this directory,
1252 1283 without recursively reading subdirectory manifests. Instead, any
1253 1284 subdirectory entry will be reported as it appears in the manifest, i.e.
1254 1285 the subdirectory will be reported among files and distinguished only by
1255 1286 its 't' flag. This only apply if the underlying manifest support it.
1256 1287
1257 1288 The returned object conforms to the ``imanifestdict`` interface."""
1258 1289
1259 1290 @abc.abstractmethod
1260 1291 def read_delta_new_entries(self, *, shallow: bool = False):
1261 1292 """Return a manifest containing just the entries that might be new to
1262 1293 the repository.
1263 1294
1264 1295 This is often equivalent to a diff against both parents, but without
1265 1296 garantee. For performance reason, It might contains more files in some cases.
1266 1297
1267 1298 If `shallow` is True, this will read the delta for this directory,
1268 1299 without recursively reading subdirectory manifests. Instead, any
1269 1300 subdirectory entry will be reported as it appears in the manifest, i.e.
1270 1301 the subdirectory will be reported among files and distinguished only by
1271 1302 its 't' flag. This only apply if the underlying manifest support it.
1272 1303
1273 1304 The returned object conforms to the ``imanifestdict`` interface."""
1274 1305
1275 1306 @abc.abstractmethod
1276 1307 def readfast(self, shallow: bool = False):
1277 1308 """Calls either ``read()`` or ``readdelta()``.
1278 1309
1279 1310 The faster of the two options is called.
1280 1311 """
1281 1312
1282 1313 @abc.abstractmethod
1283 1314 def find(self, key: bytes) -> tuple[bytes, bytes]:
1284 1315 """Calls ``self.read().find(key)``.
1285 1316
1286 1317 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1287 1318 """
1288 1319
1289 1320
1290 1321 class imanifestrevisionwritable(imanifestrevisionbase, Protocol):
1291 1322 """Interface representing a manifest revision that can be committed."""
1292 1323
1293 1324 @abc.abstractmethod
1294 1325 def write(
1295 1326 self, transaction, linkrev, p1node, p2node, added, removed, match=None
1296 1327 ):
1297 1328 """Add this revision to storage.
1298 1329
1299 1330 Takes a transaction object, the changeset revision number it will
1300 1331 be associated with, its parent nodes, and lists of added and
1301 1332 removed paths.
1302 1333
1303 1334 If match is provided, storage can choose not to inspect or write out
1304 1335 items that do not match. Storage is still required to be able to provide
1305 1336 the full manifest in the future for any directories written (these
1306 1337 manifests should not be "narrowed on disk").
1307 1338
1308 1339 Returns the binary node of the created revision.
1309 1340 """
1310 1341
1311 1342
1312 1343 class imanifeststorage(Protocol):
1313 1344 """Storage interface for manifest data."""
1314 1345
1315 1346 nodeconstants: NodeConstants
1316 1347 """nodeconstants used by the current repository."""
1317 1348
1318 1349 tree: bytes
1319 1350 """The path to the directory this manifest tracks.
1320 1351
1321 1352 The empty bytestring represents the root manifest.
1322 1353 """
1323 1354
1324 1355 index: ifilerevisionssequence
1325 1356 """An ``ifilerevisionssequence`` instance."""
1326 1357
1327 1358 opener: Vfs
1328 1359 """VFS opener to use to access underlying files used for storage.
1329 1360
1330 1361 TODO this is revlog specific and should not be exposed.
1331 1362 """
1332 1363
1333 1364 # TODO: finish type hints
1334 1365 fulltextcache: dict
1335 1366 """Dict with cache of fulltexts.
1336 1367
1337 1368 TODO this doesn't feel appropriate for the storage interface.
1338 1369 """
1339 1370
1340 1371 @abc.abstractmethod
1341 1372 def __len__(self):
1342 1373 """Obtain the number of revisions stored for this manifest."""
1343 1374
1344 1375 @abc.abstractmethod
1345 1376 def __iter__(self):
1346 1377 """Iterate over revision numbers for this manifest."""
1347 1378
1348 1379 @abc.abstractmethod
1349 1380 def rev(self, node):
1350 1381 """Obtain the revision number given a binary node.
1351 1382
1352 1383 Raises ``error.LookupError`` if the node is not known.
1353 1384 """
1354 1385
1355 1386 @abc.abstractmethod
1356 1387 def node(self, rev):
1357 1388 """Obtain the node value given a revision number.
1358 1389
1359 1390 Raises ``error.LookupError`` if the revision is not known.
1360 1391 """
1361 1392
1362 1393 @abc.abstractmethod
1363 1394 def lookup(self, value):
1364 1395 """Attempt to resolve a value to a node.
1365 1396
1366 1397 Value can be a binary node, hex node, revision number, or a bytes
1367 1398 that can be converted to an integer.
1368 1399
1369 1400 Raises ``error.LookupError`` if a ndoe could not be resolved.
1370 1401 """
1371 1402
1372 1403 @abc.abstractmethod
1373 1404 def parents(self, node):
1374 1405 """Returns a 2-tuple of parent nodes for a node.
1375 1406
1376 1407 Values will be ``nullid`` if the parent is empty.
1377 1408 """
1378 1409
1379 1410 @abc.abstractmethod
1380 1411 def parentrevs(self, rev):
1381 1412 """Like parents() but operates on revision numbers."""
1382 1413
1383 1414 @abc.abstractmethod
1384 1415 def linkrev(self, rev):
1385 1416 """Obtain the changeset revision number a revision is linked to."""
1386 1417
1387 1418 @abc.abstractmethod
1388 1419 def revision(self, node):
1389 1420 """Obtain fulltext data for a node."""
1390 1421
1391 1422 @abc.abstractmethod
1392 1423 def rawdata(self, node):
1393 1424 """Obtain raw data for a node."""
1394 1425
1395 1426 @abc.abstractmethod
1396 1427 def revdiff(self, rev1, rev2):
1397 1428 """Obtain a delta between two revision numbers.
1398 1429
1399 1430 The returned data is the result of ``bdiff.bdiff()`` on the raw
1400 1431 revision data.
1401 1432 """
1402 1433
1403 1434 @abc.abstractmethod
1404 1435 def cmp(self, node, fulltext):
1405 1436 """Compare fulltext to another revision.
1406 1437
1407 1438 Returns True if the fulltext is different from what is stored.
1408 1439 """
1409 1440
1410 1441 @abc.abstractmethod
1411 1442 def emitrevisions(
1412 1443 self,
1413 1444 nodes,
1414 1445 nodesorder=None,
1415 1446 revisiondata=False,
1416 1447 assumehaveparentrevisions=False,
1417 1448 ):
1418 1449 """Produce ``irevisiondelta`` describing revisions.
1419 1450
1420 1451 See the documentation for ``ifiledata`` for more.
1421 1452 """
1422 1453
1423 1454 @abc.abstractmethod
1424 1455 def addgroup(
1425 1456 self,
1426 1457 deltas,
1427 1458 linkmapper,
1428 1459 transaction,
1429 1460 addrevisioncb=None,
1430 1461 duplicaterevisioncb=None,
1431 1462 ):
1432 1463 """Process a series of deltas for storage.
1433 1464
1434 1465 See the documentation in ``ifilemutation`` for more.
1435 1466 """
1436 1467
1437 1468 @abc.abstractmethod
1438 1469 def rawsize(self, rev):
1439 1470 """Obtain the size of tracked data.
1440 1471
1441 1472 Is equivalent to ``len(m.rawdata(node))``.
1442 1473
1443 1474 TODO this method is only used by upgrade code and may be removed.
1444 1475 """
1445 1476
1446 1477 @abc.abstractmethod
1447 1478 def getstrippoint(self, minlink):
1448 1479 """Find minimum revision that must be stripped to strip a linkrev.
1449 1480
1450 1481 See the documentation in ``ifilemutation`` for more.
1451 1482 """
1452 1483
1453 1484 @abc.abstractmethod
1454 1485 def strip(self, minlink, transaction):
1455 1486 """Remove storage of items starting at a linkrev.
1456 1487
1457 1488 See the documentation in ``ifilemutation`` for more.
1458 1489 """
1459 1490
1460 1491 @abc.abstractmethod
1461 1492 def checksize(self):
1462 1493 """Obtain the expected sizes of backing files.
1463 1494
1464 1495 TODO this is used by verify and it should not be part of the interface.
1465 1496 """
1466 1497
1467 1498 @abc.abstractmethod
1468 1499 def files(self):
1469 1500 """Obtain paths that are backing storage for this manifest.
1470 1501
1471 1502 TODO this is used by verify and there should probably be a better API
1472 1503 for this functionality.
1473 1504 """
1474 1505
1475 1506 @abc.abstractmethod
1476 1507 def deltaparent(self, rev):
1477 1508 """Obtain the revision that a revision is delta'd against.
1478 1509
1479 1510 TODO delta encoding is an implementation detail of storage and should
1480 1511 not be exposed to the storage interface.
1481 1512 """
1482 1513
1483 1514 @abc.abstractmethod
1484 1515 def clone(self, tr, dest, **kwargs):
1485 1516 """Clone this instance to another."""
1486 1517
1487 1518 @abc.abstractmethod
1488 1519 def clearcaches(self, clear_persisted_data=False):
1489 1520 """Clear any caches associated with this instance."""
1490 1521
1491 1522 @abc.abstractmethod
1492 1523 def dirlog(self, d):
1493 1524 """Obtain a manifest storage instance for a tree."""
1494 1525
1495 1526 @abc.abstractmethod
1496 1527 def add(
1497 1528 self,
1498 1529 m,
1499 1530 transaction,
1500 1531 link,
1501 1532 p1,
1502 1533 p2,
1503 1534 added,
1504 1535 removed,
1505 1536 readtree=None,
1506 1537 match=None,
1507 1538 ):
1508 1539 """Add a revision to storage.
1509 1540
1510 1541 ``m`` is an object conforming to ``imanifestdict``.
1511 1542
1512 1543 ``link`` is the linkrev revision number.
1513 1544
1514 1545 ``p1`` and ``p2`` are the parent revision numbers.
1515 1546
1516 1547 ``added`` and ``removed`` are iterables of added and removed paths,
1517 1548 respectively.
1518 1549
1519 1550 ``readtree`` is a function that can be used to read the child tree(s)
1520 1551 when recursively writing the full tree structure when using
1521 1552 treemanifets.
1522 1553
1523 1554 ``match`` is a matcher that can be used to hint to storage that not all
1524 1555 paths must be inspected; this is an optimization and can be safely
1525 1556 ignored. Note that the storage must still be able to reproduce a full
1526 1557 manifest including files that did not match.
1527 1558 """
1528 1559
1529 1560 @abc.abstractmethod
1530 1561 def storageinfo(
1531 1562 self,
1532 1563 exclusivefiles=False,
1533 1564 sharedfiles=False,
1534 1565 revisionscount=False,
1535 1566 trackedsize=False,
1536 1567 storedsize=False,
1537 1568 ):
1538 1569 """Obtain information about storage for this manifest's data.
1539 1570
1540 1571 See ``ifilestorage.storageinfo()`` for a description of this method.
1541 1572 This one behaves the same way, except for manifest data.
1542 1573 """
1543 1574
1544 1575 @abc.abstractmethod
1545 1576 def get_revlog(self):
1546 1577 """return an actual revlog instance if any
1547 1578
1548 1579 This exist because a lot of code leverage the fact the underlying
1549 1580 storage is a revlog for optimization, so giving simple way to access
1550 1581 the revlog instance helps such code.
1551 1582 """
1552 1583
1553 1584
1554 1585 class imanifestlog(Protocol):
1555 1586 """Interface representing a collection of manifest snapshots.
1556 1587
1557 1588 Represents the root manifest in a repository.
1558 1589
1559 1590 Also serves as a means to access nested tree manifests and to cache
1560 1591 tree manifests.
1561 1592 """
1562 1593
1563 1594 nodeconstants: NodeConstants
1564 1595 """nodeconstants used by the current repository."""
1565 1596
1566 1597 narrowed: bool
1567 1598 """True, is the manifest is narrowed by a matcher"""
1568 1599
1569 1600 @abc.abstractmethod
1570 1601 def __getitem__(self, node):
1571 1602 """Obtain a manifest instance for a given binary node.
1572 1603
1573 1604 Equivalent to calling ``self.get('', node)``.
1574 1605
1575 1606 The returned object conforms to the ``imanifestrevisionstored``
1576 1607 interface.
1577 1608 """
1578 1609
1579 1610 @abc.abstractmethod
1580 1611 def get(self, tree, node, verify=True):
1581 1612 """Retrieve the manifest instance for a given directory and binary node.
1582 1613
1583 1614 ``node`` always refers to the node of the root manifest (which will be
1584 1615 the only manifest if flat manifests are being used).
1585 1616
1586 1617 If ``tree`` is the empty string, the root manifest is returned.
1587 1618 Otherwise the manifest for the specified directory will be returned
1588 1619 (requires tree manifests).
1589 1620
1590 1621 If ``verify`` is True, ``LookupError`` is raised if the node is not
1591 1622 known.
1592 1623
1593 1624 The returned object conforms to the ``imanifestrevisionstored``
1594 1625 interface.
1595 1626 """
1596 1627
1597 1628 @abc.abstractmethod
1598 1629 def getstorage(self, tree):
1599 1630 """Retrieve an interface to storage for a particular tree.
1600 1631
1601 1632 If ``tree`` is the empty bytestring, storage for the root manifest will
1602 1633 be returned. Otherwise storage for a tree manifest is returned.
1603 1634
1604 1635 TODO formalize interface for returned object.
1605 1636 """
1606 1637
1607 1638 @abc.abstractmethod
1608 1639 def clearcaches(self, clear_persisted_data: bool = False) -> None:
1609 1640 """Clear caches associated with this collection."""
1610 1641
1611 1642 @abc.abstractmethod
1612 1643 def rev(self, node):
1613 1644 """Obtain the revision number for a binary node.
1614 1645
1615 1646 Raises ``error.LookupError`` if the node is not known.
1616 1647 """
1617 1648
1618 1649 @abc.abstractmethod
1619 1650 def update_caches(self, transaction):
1620 1651 """update whatever cache are relevant for the used storage."""
1621 1652
1622 1653
1623 1654 class ilocalrepositoryfilestorage(Protocol):
1624 1655 """Local repository sub-interface providing access to tracked file storage.
1625 1656
1626 1657 This interface defines how a repository accesses storage for a single
1627 1658 tracked file path.
1628 1659 """
1629 1660
1630 1661 def file(self, f):
1631 1662 """Obtain a filelog for a tracked path.
1632 1663
1633 1664 The returned type conforms to the ``ifilestorage`` interface.
1634 1665 """
1635 1666
1636 1667
1637 1668 class ilocalrepositorymain(Protocol):
1638 1669 """Main interface for local repositories.
1639 1670
1640 1671 This currently captures the reality of things - not how things should be.
1641 1672 """
1642 1673
1643 1674 nodeconstants: NodeConstants
1644 1675 """Constant nodes matching the hash function used by the repository."""
1645 1676
1646 1677 nullid: bytes
1647 1678 """null revision for the hash function used by the repository."""
1648 1679
1649 1680 supported: set[bytes]
1650 1681 """Set of requirements that this repo is capable of opening."""
1651 1682
1652 1683 requirements: set[bytes]
1653 1684 """Set of requirements this repo uses."""
1654 1685
1655 1686 features: set[bytes]
1656 1687 """Set of "features" this repository supports.
1657 1688
1658 1689 A "feature" is a loosely-defined term. It can refer to a feature
1659 1690 in the classical sense or can describe an implementation detail
1660 1691 of the repository. For example, a ``readonly`` feature may denote
1661 1692 the repository as read-only. Or a ``revlogfilestore`` feature may
1662 1693 denote that the repository is using revlogs for file storage.
1663 1694
1664 1695 The intent of features is to provide a machine-queryable mechanism
1665 1696 for repo consumers to test for various repository characteristics.
1666 1697
1667 1698 Features are similar to ``requirements``. The main difference is that
1668 1699 requirements are stored on-disk and represent requirements to open the
1669 1700 repository. Features are more run-time capabilities of the repository
1670 1701 and more granular capabilities (which may be derived from requirements).
1671 1702 """
1672 1703
1673 1704 filtername: bytes
1674 1705 """Name of the repoview that is active on this repo."""
1675 1706
1676 1707 vfs_map: Mapping[bytes, Vfs]
1677 1708 """a bytes-key β†’ vfs mapping used by transaction and others"""
1678 1709
1679 1710 wvfs: Vfs
1680 1711 """VFS used to access the working directory."""
1681 1712
1682 1713 vfs: Vfs
1683 1714 """VFS rooted at the .hg directory.
1684 1715
1685 1716 Used to access repository data not in the store.
1686 1717 """
1687 1718
1688 1719 svfs: Vfs
1689 1720 """VFS rooted at the store.
1690 1721
1691 1722 Used to access repository data in the store. Typically .hg/store.
1692 1723 But can point elsewhere if the store is shared.
1693 1724 """
1694 1725
1695 1726 root: bytes
1696 1727 """Path to the root of the working directory."""
1697 1728
1698 1729 path: bytes
1699 1730 """Path to the .hg directory."""
1700 1731
1701 1732 origroot: bytes
1702 1733 """The filesystem path that was used to construct the repo."""
1703 1734
1704 1735 auditor: Any
1705 1736 """A pathauditor for the working directory.
1706 1737
1707 1738 This checks if a path refers to a nested repository.
1708 1739
1709 1740 Operates on the filesystem.
1710 1741 """
1711 1742
1712 1743 nofsauditor: Any # TODO: add type hints
1713 1744 """A pathauditor for the working directory.
1714 1745
1715 1746 This is like ``auditor`` except it doesn't do filesystem checks.
1716 1747 """
1717 1748
1718 1749 baseui: Ui
1719 1750 """Original ui instance passed into constructor."""
1720 1751
1721 1752 ui: Ui
1722 1753 """Main ui instance for this instance."""
1723 1754
1724 1755 sharedpath: bytes
1725 1756 """Path to the .hg directory of the repo this repo was shared from."""
1726 1757
1727 1758 store: Any # TODO: add type hints
1728 1759 """A store instance."""
1729 1760
1730 1761 spath: bytes
1731 1762 """Path to the store."""
1732 1763
1733 1764 sjoin: Callable # TODO: add type hints
1734 1765 """Alias to self.store.join."""
1735 1766
1736 1767 cachevfs: Vfs
1737 1768 """A VFS used to access the cache directory.
1738 1769
1739 1770 Typically .hg/cache.
1740 1771 """
1741 1772
1742 1773 wcachevfs: Vfs
1743 1774 """A VFS used to access the cache directory dedicated to working copy
1744 1775
1745 1776 Typically .hg/wcache.
1746 1777 """
1747 1778
1748 1779 filteredrevcache: Any # TODO: add type hints
1749 1780 """Holds sets of revisions to be filtered."""
1750 1781
1751 1782 names: Any # TODO: add type hints
1752 1783 """A ``namespaces`` instance."""
1753 1784
1754 1785 filecopiesmode: Any # TODO: add type hints
1755 1786 """The way files copies should be dealt with in this repo."""
1756 1787
1757 1788 @abc.abstractmethod
1758 1789 def close(self):
1759 1790 """Close the handle on this repository."""
1760 1791
1761 1792 @abc.abstractmethod
1762 1793 def peer(self, path=None):
1763 1794 """Obtain an object conforming to the ``peer`` interface."""
1764 1795
1765 1796 @abc.abstractmethod
1766 1797 def unfiltered(self):
1767 1798 """Obtain an unfiltered/raw view of this repo."""
1768 1799
1769 1800 @abc.abstractmethod
1770 1801 def filtered(self, name, visibilityexceptions=None):
1771 1802 """Obtain a named view of this repository."""
1772 1803
1773 1804 obsstore: Any # TODO: add type hints
1774 1805 """A store of obsolescence data."""
1775 1806
1776 1807 changelog: Any # TODO: add type hints
1777 1808 """A handle on the changelog revlog."""
1778 1809
1779 1810 manifestlog: imanifestlog
1780 1811 """An instance conforming to the ``imanifestlog`` interface.
1781 1812
1782 1813 Provides access to manifests for the repository.
1783 1814 """
1784 1815
1785 1816 dirstate: intdirstate.idirstate
1786 1817 """Working directory state."""
1787 1818
1788 1819 narrowpats: Any # TODO: add type hints
1789 1820 """Matcher patterns for this repository's narrowspec."""
1790 1821
1791 1822 @abc.abstractmethod
1792 1823 def narrowmatch(self, match=None, includeexact=False):
1793 1824 """Obtain a matcher for the narrowspec."""
1794 1825
1795 1826 @abc.abstractmethod
1796 1827 def setnarrowpats(self, newincludes, newexcludes):
1797 1828 """Define the narrowspec for this repository."""
1798 1829
1799 1830 @abc.abstractmethod
1800 1831 def __getitem__(self, changeid):
1801 1832 """Try to resolve a changectx."""
1802 1833
1803 1834 @abc.abstractmethod
1804 1835 def __contains__(self, changeid):
1805 1836 """Whether a changeset exists."""
1806 1837
1807 1838 @abc.abstractmethod
1808 1839 def __nonzero__(self):
1809 1840 """Always returns True."""
1810 1841 return True
1811 1842
1812 1843 __bool__ = __nonzero__
1813 1844
1814 1845 @abc.abstractmethod
1815 1846 def __len__(self):
1816 1847 """Returns the number of changesets in the repo."""
1817 1848
1818 1849 @abc.abstractmethod
1819 1850 def __iter__(self):
1820 1851 """Iterate over revisions in the changelog."""
1821 1852
1822 1853 @abc.abstractmethod
1823 1854 def revs(self, expr, *args):
1824 1855 """Evaluate a revset.
1825 1856
1826 1857 Emits revisions.
1827 1858 """
1828 1859
1829 1860 @abc.abstractmethod
1830 1861 def set(self, expr, *args):
1831 1862 """Evaluate a revset.
1832 1863
1833 1864 Emits changectx instances.
1834 1865 """
1835 1866
1836 1867 @abc.abstractmethod
1837 1868 def anyrevs(self, specs, user=False, localalias=None):
1838 1869 """Find revisions matching one of the given revsets."""
1839 1870
1840 1871 @abc.abstractmethod
1841 1872 def url(self):
1842 1873 """Returns a string representing the location of this repo."""
1843 1874
1844 1875 @abc.abstractmethod
1845 1876 def hook(self, name, throw=False, **args):
1846 1877 """Call a hook."""
1847 1878
1848 1879 @abc.abstractmethod
1849 1880 def tags(self):
1850 1881 """Return a mapping of tag to node."""
1851 1882
1852 1883 @abc.abstractmethod
1853 1884 def tagtype(self, tagname):
1854 1885 """Return the type of a given tag."""
1855 1886
1856 1887 @abc.abstractmethod
1857 1888 def tagslist(self):
1858 1889 """Return a list of tags ordered by revision."""
1859 1890
1860 1891 @abc.abstractmethod
1861 1892 def nodetags(self, node):
1862 1893 """Return the tags associated with a node."""
1863 1894
1864 1895 @abc.abstractmethod
1865 1896 def nodebookmarks(self, node):
1866 1897 """Return the list of bookmarks pointing to the specified node."""
1867 1898
1868 1899 @abc.abstractmethod
1869 1900 def branchmap(self):
1870 1901 """Return a mapping of branch to heads in that branch."""
1871 1902
1872 1903 @abc.abstractmethod
1873 1904 def revbranchcache(self):
1874 1905 pass
1875 1906
1876 1907 @abc.abstractmethod
1877 1908 def register_changeset(self, rev, changelogrevision):
1878 1909 """Extension point for caches for new nodes.
1879 1910
1880 1911 Multiple consumers are expected to need parts of the changelogrevision,
1881 1912 so it is provided as optimization to avoid duplicate lookups. A simple
1882 1913 cache would be fragile when other revisions are accessed, too."""
1883 1914 pass
1884 1915
1885 1916 @abc.abstractmethod
1886 1917 def branchtip(self, branchtip, ignoremissing=False):
1887 1918 """Return the tip node for a given branch."""
1888 1919
1889 1920 @abc.abstractmethod
1890 1921 def lookup(self, key):
1891 1922 """Resolve the node for a revision."""
1892 1923
1893 1924 @abc.abstractmethod
1894 1925 def lookupbranch(self, key):
1895 1926 """Look up the branch name of the given revision or branch name."""
1896 1927
1897 1928 @abc.abstractmethod
1898 1929 def known(self, nodes):
1899 1930 """Determine whether a series of nodes is known.
1900 1931
1901 1932 Returns a list of bools.
1902 1933 """
1903 1934
1904 1935 @abc.abstractmethod
1905 1936 def local(self):
1906 1937 """Whether the repository is local."""
1907 1938 return True
1908 1939
1909 1940 @abc.abstractmethod
1910 1941 def publishing(self):
1911 1942 """Whether the repository is a publishing repository."""
1912 1943
1913 1944 @abc.abstractmethod
1914 1945 def cancopy(self):
1915 1946 pass
1916 1947
1917 1948 @abc.abstractmethod
1918 1949 def shared(self):
1919 1950 """The type of shared repository or None."""
1920 1951
1921 1952 @abc.abstractmethod
1922 1953 def wjoin(self, f, *insidef):
1923 1954 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1924 1955
1925 1956 @abc.abstractmethod
1926 1957 def setparents(self, p1, p2):
1927 1958 """Set the parent nodes of the working directory."""
1928 1959
1929 1960 @abc.abstractmethod
1930 1961 def filectx(self, path, changeid=None, fileid=None):
1931 1962 """Obtain a filectx for the given file revision."""
1932 1963
1933 1964 @abc.abstractmethod
1934 1965 def getcwd(self):
1935 1966 """Obtain the current working directory from the dirstate."""
1936 1967
1937 1968 @abc.abstractmethod
1938 1969 def pathto(self, f, cwd=None):
1939 1970 """Obtain the relative path to a file."""
1940 1971
1941 1972 @abc.abstractmethod
1942 1973 def adddatafilter(self, name, fltr):
1943 1974 pass
1944 1975
1945 1976 @abc.abstractmethod
1946 1977 def wread(self, filename):
1947 1978 """Read a file from wvfs, using data filters."""
1948 1979
1949 1980 @abc.abstractmethod
1950 1981 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1951 1982 """Write data to a file in the wvfs, using data filters."""
1952 1983
1953 1984 @abc.abstractmethod
1954 1985 def wwritedata(self, filename, data):
1955 1986 """Resolve data for writing to the wvfs, using data filters."""
1956 1987
1957 1988 @abc.abstractmethod
1958 1989 def currenttransaction(self):
1959 1990 """Obtain the current transaction instance or None."""
1960 1991
1961 1992 @abc.abstractmethod
1962 1993 def transaction(self, desc, report=None):
1963 1994 """Open a new transaction to write to the repository."""
1964 1995
1965 1996 @abc.abstractmethod
1966 1997 def undofiles(self):
1967 1998 """Returns a list of (vfs, path) for files to undo transactions."""
1968 1999
1969 2000 @abc.abstractmethod
1970 2001 def recover(self):
1971 2002 """Roll back an interrupted transaction."""
1972 2003
1973 2004 @abc.abstractmethod
1974 2005 def rollback(self, dryrun=False, force=False):
1975 2006 """Undo the last transaction.
1976 2007
1977 2008 DANGEROUS.
1978 2009 """
1979 2010
1980 2011 @abc.abstractmethod
1981 2012 def updatecaches(self, tr=None, full=False, caches=None):
1982 2013 """Warm repo caches."""
1983 2014
1984 2015 @abc.abstractmethod
1985 2016 def invalidatecaches(self):
1986 2017 """Invalidate cached data due to the repository mutating."""
1987 2018
1988 2019 @abc.abstractmethod
1989 2020 def invalidatevolatilesets(self):
1990 2021 pass
1991 2022
1992 2023 @abc.abstractmethod
1993 2024 def invalidatedirstate(self):
1994 2025 """Invalidate the dirstate."""
1995 2026
1996 2027 @abc.abstractmethod
1997 2028 def invalidate(self, clearfilecache=False):
1998 2029 pass
1999 2030
2000 2031 @abc.abstractmethod
2001 2032 def invalidateall(self):
2002 2033 pass
2003 2034
2004 2035 @abc.abstractmethod
2005 2036 def lock(self, wait=True):
2006 2037 """Lock the repository store and return a lock instance."""
2007 2038
2008 2039 @abc.abstractmethod
2009 2040 def currentlock(self):
2010 2041 """Return the lock if it's held or None."""
2011 2042
2012 2043 @abc.abstractmethod
2013 2044 def wlock(self, wait=True):
2014 2045 """Lock the non-store parts of the repository."""
2015 2046
2016 2047 @abc.abstractmethod
2017 2048 def currentwlock(self):
2018 2049 """Return the wlock if it's held or None."""
2019 2050
2020 2051 @abc.abstractmethod
2021 2052 def checkcommitpatterns(self, wctx, match, status, fail):
2022 2053 pass
2023 2054
2024 2055 @abc.abstractmethod
2025 2056 def commit(
2026 2057 self,
2027 2058 text=b'',
2028 2059 user=None,
2029 2060 date=None,
2030 2061 match=None,
2031 2062 force=False,
2032 2063 editor=False,
2033 2064 extra=None,
2034 2065 ):
2035 2066 """Add a new revision to the repository."""
2036 2067
2037 2068 @abc.abstractmethod
2038 2069 def commitctx(self, ctx, error=False, origctx=None):
2039 2070 """Commit a commitctx instance to the repository."""
2040 2071
2041 2072 @abc.abstractmethod
2042 2073 def destroying(self):
2043 2074 """Inform the repository that nodes are about to be destroyed."""
2044 2075
2045 2076 @abc.abstractmethod
2046 2077 def destroyed(self):
2047 2078 """Inform the repository that nodes have been destroyed."""
2048 2079
2049 2080 @abc.abstractmethod
2050 2081 def status(
2051 2082 self,
2052 2083 node1=b'.',
2053 2084 node2=None,
2054 2085 match=None,
2055 2086 ignored=False,
2056 2087 clean=False,
2057 2088 unknown=False,
2058 2089 listsubrepos=False,
2059 2090 ):
2060 2091 """Convenience method to call repo[x].status()."""
2061 2092
2062 2093 @abc.abstractmethod
2063 2094 def addpostdsstatus(self, ps):
2064 2095 pass
2065 2096
2066 2097 @abc.abstractmethod
2067 2098 def postdsstatus(self):
2068 2099 pass
2069 2100
2070 2101 @abc.abstractmethod
2071 2102 def clearpostdsstatus(self):
2072 2103 pass
2073 2104
2074 2105 @abc.abstractmethod
2075 2106 def heads(self, start=None):
2076 2107 """Obtain list of nodes that are DAG heads."""
2077 2108
2078 2109 @abc.abstractmethod
2079 2110 def branchheads(self, branch=None, start=None, closed=False):
2080 2111 pass
2081 2112
2082 2113 @abc.abstractmethod
2083 2114 def branches(self, nodes):
2084 2115 pass
2085 2116
2086 2117 @abc.abstractmethod
2087 2118 def between(self, pairs):
2088 2119 pass
2089 2120
2090 2121 @abc.abstractmethod
2091 2122 def checkpush(self, pushop):
2092 2123 pass
2093 2124
2094 2125 prepushoutgoinghooks: util.hooks
2095 2126 """util.hooks instance."""
2096 2127
2097 2128 @abc.abstractmethod
2098 2129 def pushkey(self, namespace, key, old, new):
2099 2130 pass
2100 2131
2101 2132 @abc.abstractmethod
2102 2133 def listkeys(self, namespace):
2103 2134 pass
2104 2135
2105 2136 @abc.abstractmethod
2106 2137 def debugwireargs(self, one, two, three=None, four=None, five=None):
2107 2138 pass
2108 2139
2109 2140 @abc.abstractmethod
2110 2141 def savecommitmessage(self, text):
2111 2142 pass
2112 2143
2113 2144 @abc.abstractmethod
2114 2145 def register_sidedata_computer(
2115 2146 self, kind, category, keys, computer, flags, replace=False
2116 2147 ):
2117 2148 pass
2118 2149
2119 2150 @abc.abstractmethod
2120 2151 def register_wanted_sidedata(self, category):
2121 2152 pass
2122 2153
2123 2154
2124 2155 class completelocalrepository(
2125 2156 ilocalrepositorymain, ilocalrepositoryfilestorage
2126 2157 ):
2127 2158 """Complete interface for a local repository."""
2128 2159
2129 2160
2130 2161 class iwireprotocolcommandcacher(Protocol):
2131 2162 """Represents a caching backend for wire protocol commands.
2132 2163
2133 2164 Wire protocol version 2 supports transparent caching of many commands.
2134 2165 To leverage this caching, servers can activate objects that cache
2135 2166 command responses. Objects handle both cache writing and reading.
2136 2167 This interface defines how that response caching mechanism works.
2137 2168
2138 2169 Wire protocol version 2 commands emit a series of objects that are
2139 2170 serialized and sent to the client. The caching layer exists between
2140 2171 the invocation of the command function and the sending of its output
2141 2172 objects to an output layer.
2142 2173
2143 2174 Instances of this interface represent a binding to a cache that
2144 2175 can serve a response (in place of calling a command function) and/or
2145 2176 write responses to a cache for subsequent use.
2146 2177
2147 2178 When a command request arrives, the following happens with regards
2148 2179 to this interface:
2149 2180
2150 2181 1. The server determines whether the command request is cacheable.
2151 2182 2. If it is, an instance of this interface is spawned.
2152 2183 3. The cacher is activated in a context manager (``__enter__`` is called).
2153 2184 4. A cache *key* for that request is derived. This will call the
2154 2185 instance's ``adjustcachekeystate()`` method so the derivation
2155 2186 can be influenced.
2156 2187 5. The cacher is informed of the derived cache key via a call to
2157 2188 ``setcachekey()``.
2158 2189 6. The cacher's ``lookup()`` method is called to test for presence of
2159 2190 the derived key in the cache.
2160 2191 7. If ``lookup()`` returns a hit, that cached result is used in place
2161 2192 of invoking the command function. ``__exit__`` is called and the instance
2162 2193 is discarded.
2163 2194 8. The command function is invoked.
2164 2195 9. ``onobject()`` is called for each object emitted by the command
2165 2196 function.
2166 2197 10. After the final object is seen, ``onfinished()`` is called.
2167 2198 11. ``__exit__`` is called to signal the end of use of the instance.
2168 2199
2169 2200 Cache *key* derivation can be influenced by the instance.
2170 2201
2171 2202 Cache keys are initially derived by a deterministic representation of
2172 2203 the command request. This includes the command name, arguments, protocol
2173 2204 version, etc. This initial key derivation is performed by CBOR-encoding a
2174 2205 data structure and feeding that output into a hasher.
2175 2206
2176 2207 Instances of this interface can influence this initial key derivation
2177 2208 via ``adjustcachekeystate()``.
2178 2209
2179 2210 The instance is informed of the derived cache key via a call to
2180 2211 ``setcachekey()``. The instance must store the key locally so it can
2181 2212 be consulted on subsequent operations that may require it.
2182 2213
2183 2214 When constructed, the instance has access to a callable that can be used
2184 2215 for encoding response objects. This callable receives as its single
2185 2216 argument an object emitted by a command function. It returns an iterable
2186 2217 of bytes chunks representing the encoded object. Unless the cacher is
2187 2218 caching native Python objects in memory or has a way of reconstructing
2188 2219 the original Python objects, implementations typically call this function
2189 2220 to produce bytes from the output objects and then store those bytes in
2190 2221 the cache. When it comes time to re-emit those bytes, they are wrapped
2191 2222 in a ``wireprototypes.encodedresponse`` instance to tell the output
2192 2223 layer that they are pre-encoded.
2193 2224
2194 2225 When receiving the objects emitted by the command function, instances
2195 2226 can choose what to do with those objects. The simplest thing to do is
2196 2227 re-emit the original objects. They will be forwarded to the output
2197 2228 layer and will be processed as if the cacher did not exist.
2198 2229
2199 2230 Implementations could also choose to not emit objects - instead locally
2200 2231 buffering objects or their encoded representation. They could then emit
2201 2232 a single "coalesced" object when ``onfinished()`` is called. In
2202 2233 this way, the implementation would function as a filtering layer of
2203 2234 sorts.
2204 2235
2205 2236 When caching objects, typically the encoded form of the object will
2206 2237 be stored. Keep in mind that if the original object is forwarded to
2207 2238 the output layer, it will need to be encoded there as well. For large
2208 2239 output, this redundant encoding could add overhead. Implementations
2209 2240 could wrap the encoded object data in ``wireprototypes.encodedresponse``
2210 2241 instances to avoid this overhead.
2211 2242 """
2212 2243
2213 2244 def __enter__(self):
2214 2245 """Marks the instance as active.
2215 2246
2216 2247 Should return self.
2217 2248 """
2218 2249
2219 2250 def __exit__(self, exctype, excvalue, exctb):
2220 2251 """Called when cacher is no longer used.
2221 2252
2222 2253 This can be used by implementations to perform cleanup actions (e.g.
2223 2254 disconnecting network sockets, aborting a partially cached response.
2224 2255 """
2225 2256
2226 2257 def adjustcachekeystate(self, state):
2227 2258 """Influences cache key derivation by adjusting state to derive key.
2228 2259
2229 2260 A dict defining the state used to derive the cache key is passed.
2230 2261
2231 2262 Implementations can modify this dict to record additional state that
2232 2263 is wanted to influence key derivation.
2233 2264
2234 2265 Implementations are *highly* encouraged to not modify or delete
2235 2266 existing keys.
2236 2267 """
2237 2268
2238 2269 def setcachekey(self, key):
2239 2270 """Record the derived cache key for this request.
2240 2271
2241 2272 Instances may mutate the key for internal usage, as desired. e.g.
2242 2273 instances may wish to prepend the repo name, introduce path
2243 2274 components for filesystem or URL addressing, etc. Behavior is up to
2244 2275 the cache.
2245 2276
2246 2277 Returns a bool indicating if the request is cacheable by this
2247 2278 instance.
2248 2279 """
2249 2280
2250 2281 def lookup(self):
2251 2282 """Attempt to resolve an entry in the cache.
2252 2283
2253 2284 The instance is instructed to look for the cache key that it was
2254 2285 informed about via the call to ``setcachekey()``.
2255 2286
2256 2287 If there's no cache hit or the cacher doesn't wish to use the cached
2257 2288 entry, ``None`` should be returned.
2258 2289
2259 2290 Else, a dict defining the cached result should be returned. The
2260 2291 dict may have the following keys:
2261 2292
2262 2293 objs
2263 2294 An iterable of objects that should be sent to the client. That
2264 2295 iterable of objects is expected to be what the command function
2265 2296 would return if invoked or an equivalent representation thereof.
2266 2297 """
2267 2298
2268 2299 def onobject(self, obj):
2269 2300 """Called when a new object is emitted from the command function.
2270 2301
2271 2302 Receives as its argument the object that was emitted from the
2272 2303 command function.
2273 2304
2274 2305 This method returns an iterator of objects to forward to the output
2275 2306 layer. The easiest implementation is a generator that just
2276 2307 ``yield obj``.
2277 2308 """
2278 2309
2279 2310 def onfinished(self):
2280 2311 """Called after all objects have been emitted from the command function.
2281 2312
2282 2313 Implementations should return an iterator of objects to forward to
2283 2314 the output layer.
2284 2315
2285 2316 This method can be a generator.
2286 2317 """
General Comments 0
You need to be logged in to leave comments. Login now