##// END OF EJS Templates
undo-files: factor the vfs map in a repository property...
marmoute -
r51189:f3488731 stable
parent child Browse files
Show More
@@ -1,2058 +1,2062 b''
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 from ..i18n import _
11 11 from .. import error
12 12 from . import util as interfaceutil
13 13
14 14 # Local repository feature string.
15 15
16 16 # Revlogs are being used for file storage.
17 17 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
18 18 # The storage part of the repository is shared from an external source.
19 19 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
20 20 # LFS supported for backing file storage.
21 21 REPO_FEATURE_LFS = b'lfs'
22 22 # Repository supports being stream cloned.
23 23 REPO_FEATURE_STREAM_CLONE = b'streamclone'
24 24 # Repository supports (at least) some sidedata to be stored
25 25 REPO_FEATURE_SIDE_DATA = b'side-data'
26 26 # Files storage may lack data for all ancestors.
27 27 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
28 28
29 29 REVISION_FLAG_CENSORED = 1 << 15
30 30 REVISION_FLAG_ELLIPSIS = 1 << 14
31 31 REVISION_FLAG_EXTSTORED = 1 << 13
32 32 REVISION_FLAG_HASCOPIESINFO = 1 << 12
33 33
34 34 REVISION_FLAGS_KNOWN = (
35 35 REVISION_FLAG_CENSORED
36 36 | REVISION_FLAG_ELLIPSIS
37 37 | REVISION_FLAG_EXTSTORED
38 38 | REVISION_FLAG_HASCOPIESINFO
39 39 )
40 40
41 41 CG_DELTAMODE_STD = b'default'
42 42 CG_DELTAMODE_PREV = b'previous'
43 43 CG_DELTAMODE_FULL = b'fulltext'
44 44 CG_DELTAMODE_P1 = b'p1'
45 45
46 46
47 47 ## Cache related constants:
48 48 #
49 49 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
50 50
51 51 # Warm branchmaps of all known repoview's filter-level
52 52 CACHE_BRANCHMAP_ALL = b"branchmap-all"
53 53 # Warm branchmaps of repoview's filter-level used by server
54 54 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
55 55 # Warm internal changelog cache (eg: persistent nodemap)
56 56 CACHE_CHANGELOG_CACHE = b"changelog-cache"
57 57 # Warm full manifest cache
58 58 CACHE_FULL_MANIFEST = b"full-manifest"
59 59 # Warm file-node-tags cache
60 60 CACHE_FILE_NODE_TAGS = b"file-node-tags"
61 61 # Warm internal manifestlog cache (eg: persistent nodemap)
62 62 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
63 63 # Warn rev branch cache
64 64 CACHE_REV_BRANCH = b"rev-branch-cache"
65 65 # Warm tags' cache for default repoview'
66 66 CACHE_TAGS_DEFAULT = b"tags-default"
67 67 # Warm tags' cache for repoview's filter-level used by server
68 68 CACHE_TAGS_SERVED = b"tags-served"
69 69
70 70 # the cache to warm by default after a simple transaction
71 71 # (this is a mutable set to let extension update it)
72 72 CACHES_DEFAULT = {
73 73 CACHE_BRANCHMAP_SERVED,
74 74 }
75 75
76 76 # the caches to warm when warming all of them
77 77 # (this is a mutable set to let extension update it)
78 78 CACHES_ALL = {
79 79 CACHE_BRANCHMAP_SERVED,
80 80 CACHE_BRANCHMAP_ALL,
81 81 CACHE_CHANGELOG_CACHE,
82 82 CACHE_FILE_NODE_TAGS,
83 83 CACHE_FULL_MANIFEST,
84 84 CACHE_MANIFESTLOG_CACHE,
85 85 CACHE_TAGS_DEFAULT,
86 86 CACHE_TAGS_SERVED,
87 87 }
88 88
89 89 # the cache to warm by default on simple call
90 90 # (this is a mutable set to let extension update it)
91 91 CACHES_POST_CLONE = CACHES_ALL.copy()
92 92 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
93 93
94 94
95 95 class ipeerconnection(interfaceutil.Interface):
96 96 """Represents a "connection" to a repository.
97 97
98 98 This is the base interface for representing a connection to a repository.
99 99 It holds basic properties and methods applicable to all peer types.
100 100
101 101 This is not a complete interface definition and should not be used
102 102 outside of this module.
103 103 """
104 104
105 105 ui = interfaceutil.Attribute("""ui.ui instance""")
106 106 path = interfaceutil.Attribute("""a urlutil.path instance or None""")
107 107
108 108 def url():
109 109 """Returns a URL string representing this peer.
110 110
111 111 Currently, implementations expose the raw URL used to construct the
112 112 instance. It may contain credentials as part of the URL. The
113 113 expectations of the value aren't well-defined and this could lead to
114 114 data leakage.
115 115
116 116 TODO audit/clean consumers and more clearly define the contents of this
117 117 value.
118 118 """
119 119
120 120 def local():
121 121 """Returns a local repository instance.
122 122
123 123 If the peer represents a local repository, returns an object that
124 124 can be used to interface with it. Otherwise returns ``None``.
125 125 """
126 126
127 127 def canpush():
128 128 """Returns a boolean indicating if this peer can be pushed to."""
129 129
130 130 def close():
131 131 """Close the connection to this peer.
132 132
133 133 This is called when the peer will no longer be used. Resources
134 134 associated with the peer should be cleaned up.
135 135 """
136 136
137 137
138 138 class ipeercapabilities(interfaceutil.Interface):
139 139 """Peer sub-interface related to capabilities."""
140 140
141 141 def capable(name):
142 142 """Determine support for a named capability.
143 143
144 144 Returns ``False`` if capability not supported.
145 145
146 146 Returns ``True`` if boolean capability is supported. Returns a string
147 147 if capability support is non-boolean.
148 148
149 149 Capability strings may or may not map to wire protocol capabilities.
150 150 """
151 151
152 152 def requirecap(name, purpose):
153 153 """Require a capability to be present.
154 154
155 155 Raises a ``CapabilityError`` if the capability isn't present.
156 156 """
157 157
158 158
159 159 class ipeercommands(interfaceutil.Interface):
160 160 """Client-side interface for communicating over the wire protocol.
161 161
162 162 This interface is used as a gateway to the Mercurial wire protocol.
163 163 methods commonly call wire protocol commands of the same name.
164 164 """
165 165
166 166 def branchmap():
167 167 """Obtain heads in named branches.
168 168
169 169 Returns a dict mapping branch name to an iterable of nodes that are
170 170 heads on that branch.
171 171 """
172 172
173 173 def capabilities():
174 174 """Obtain capabilities of the peer.
175 175
176 176 Returns a set of string capabilities.
177 177 """
178 178
179 179 def clonebundles():
180 180 """Obtains the clone bundles manifest for the repo.
181 181
182 182 Returns the manifest as unparsed bytes.
183 183 """
184 184
185 185 def debugwireargs(one, two, three=None, four=None, five=None):
186 186 """Used to facilitate debugging of arguments passed over the wire."""
187 187
188 188 def getbundle(source, **kwargs):
189 189 """Obtain remote repository data as a bundle.
190 190
191 191 This command is how the bulk of repository data is transferred from
192 192 the peer to the local repository
193 193
194 194 Returns a generator of bundle data.
195 195 """
196 196
197 197 def heads():
198 198 """Determine all known head revisions in the peer.
199 199
200 200 Returns an iterable of binary nodes.
201 201 """
202 202
203 203 def known(nodes):
204 204 """Determine whether multiple nodes are known.
205 205
206 206 Accepts an iterable of nodes whose presence to check for.
207 207
208 208 Returns an iterable of booleans indicating of the corresponding node
209 209 at that index is known to the peer.
210 210 """
211 211
212 212 def listkeys(namespace):
213 213 """Obtain all keys in a pushkey namespace.
214 214
215 215 Returns an iterable of key names.
216 216 """
217 217
218 218 def lookup(key):
219 219 """Resolve a value to a known revision.
220 220
221 221 Returns a binary node of the resolved revision on success.
222 222 """
223 223
224 224 def pushkey(namespace, key, old, new):
225 225 """Set a value using the ``pushkey`` protocol.
226 226
227 227 Arguments correspond to the pushkey namespace and key to operate on and
228 228 the old and new values for that key.
229 229
230 230 Returns a string with the peer result. The value inside varies by the
231 231 namespace.
232 232 """
233 233
234 234 def stream_out():
235 235 """Obtain streaming clone data.
236 236
237 237 Successful result should be a generator of data chunks.
238 238 """
239 239
240 240 def unbundle(bundle, heads, url):
241 241 """Transfer repository data to the peer.
242 242
243 243 This is how the bulk of data during a push is transferred.
244 244
245 245 Returns the integer number of heads added to the peer.
246 246 """
247 247
248 248
249 249 class ipeerlegacycommands(interfaceutil.Interface):
250 250 """Interface for implementing support for legacy wire protocol commands.
251 251
252 252 Wire protocol commands transition to legacy status when they are no longer
253 253 used by modern clients. To facilitate identifying which commands are
254 254 legacy, the interfaces are split.
255 255 """
256 256
257 257 def between(pairs):
258 258 """Obtain nodes between pairs of nodes.
259 259
260 260 ``pairs`` is an iterable of node pairs.
261 261
262 262 Returns an iterable of iterables of nodes corresponding to each
263 263 requested pair.
264 264 """
265 265
266 266 def branches(nodes):
267 267 """Obtain ancestor changesets of specific nodes back to a branch point.
268 268
269 269 For each requested node, the peer finds the first ancestor node that is
270 270 a DAG root or is a merge.
271 271
272 272 Returns an iterable of iterables with the resolved values for each node.
273 273 """
274 274
275 275 def changegroup(nodes, source):
276 276 """Obtain a changegroup with data for descendants of specified nodes."""
277 277
278 278 def changegroupsubset(bases, heads, source):
279 279 pass
280 280
281 281
282 282 class ipeercommandexecutor(interfaceutil.Interface):
283 283 """Represents a mechanism to execute remote commands.
284 284
285 285 This is the primary interface for requesting that wire protocol commands
286 286 be executed. Instances of this interface are active in a context manager
287 287 and have a well-defined lifetime. When the context manager exits, all
288 288 outstanding requests are waited on.
289 289 """
290 290
291 291 def callcommand(name, args):
292 292 """Request that a named command be executed.
293 293
294 294 Receives the command name and a dictionary of command arguments.
295 295
296 296 Returns a ``concurrent.futures.Future`` that will resolve to the
297 297 result of that command request. That exact value is left up to
298 298 the implementation and possibly varies by command.
299 299
300 300 Not all commands can coexist with other commands in an executor
301 301 instance: it depends on the underlying wire protocol transport being
302 302 used and the command itself.
303 303
304 304 Implementations MAY call ``sendcommands()`` automatically if the
305 305 requested command can not coexist with other commands in this executor.
306 306
307 307 Implementations MAY call ``sendcommands()`` automatically when the
308 308 future's ``result()`` is called. So, consumers using multiple
309 309 commands with an executor MUST ensure that ``result()`` is not called
310 310 until all command requests have been issued.
311 311 """
312 312
313 313 def sendcommands():
314 314 """Trigger submission of queued command requests.
315 315
316 316 Not all transports submit commands as soon as they are requested to
317 317 run. When called, this method forces queued command requests to be
318 318 issued. It will no-op if all commands have already been sent.
319 319
320 320 When called, no more new commands may be issued with this executor.
321 321 """
322 322
323 323 def close():
324 324 """Signal that this command request is finished.
325 325
326 326 When called, no more new commands may be issued. All outstanding
327 327 commands that have previously been issued are waited on before
328 328 returning. This not only includes waiting for the futures to resolve,
329 329 but also waiting for all response data to arrive. In other words,
330 330 calling this waits for all on-wire state for issued command requests
331 331 to finish.
332 332
333 333 When used as a context manager, this method is called when exiting the
334 334 context manager.
335 335
336 336 This method may call ``sendcommands()`` if there are buffered commands.
337 337 """
338 338
339 339
340 340 class ipeerrequests(interfaceutil.Interface):
341 341 """Interface for executing commands on a peer."""
342 342
343 343 limitedarguments = interfaceutil.Attribute(
344 344 """True if the peer cannot receive large argument value for commands."""
345 345 )
346 346
347 347 def commandexecutor():
348 348 """A context manager that resolves to an ipeercommandexecutor.
349 349
350 350 The object this resolves to can be used to issue command requests
351 351 to the peer.
352 352
353 353 Callers should call its ``callcommand`` method to issue command
354 354 requests.
355 355
356 356 A new executor should be obtained for each distinct set of commands
357 357 (possibly just a single command) that the consumer wants to execute
358 358 as part of a single operation or round trip. This is because some
359 359 peers are half-duplex and/or don't support persistent connections.
360 360 e.g. in the case of HTTP peers, commands sent to an executor represent
361 361 a single HTTP request. While some peers may support multiple command
362 362 sends over the wire per executor, consumers need to code to the least
363 363 capable peer. So it should be assumed that command executors buffer
364 364 called commands until they are told to send them and that each
365 365 command executor could result in a new connection or wire-level request
366 366 being issued.
367 367 """
368 368
369 369
370 370 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
371 371 """Unified interface for peer repositories.
372 372
373 373 All peer instances must conform to this interface.
374 374 """
375 375
376 376
377 377 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
378 378 """Unified peer interface for wire protocol version 2 peers."""
379 379
380 380 apidescriptor = interfaceutil.Attribute(
381 381 """Data structure holding description of server API."""
382 382 )
383 383
384 384
385 385 @interfaceutil.implementer(ipeerbase)
386 386 class peer:
387 387 """Base class for peer repositories."""
388 388
389 389 limitedarguments = False
390 390
391 391 def __init__(self, ui, path=None):
392 392 self.ui = ui
393 393 self.path = path
394 394
395 395 def capable(self, name):
396 396 caps = self.capabilities()
397 397 if name in caps:
398 398 return True
399 399
400 400 name = b'%s=' % name
401 401 for cap in caps:
402 402 if cap.startswith(name):
403 403 return cap[len(name) :]
404 404
405 405 return False
406 406
407 407 def requirecap(self, name, purpose):
408 408 if self.capable(name):
409 409 return
410 410
411 411 raise error.CapabilityError(
412 412 _(
413 413 b'cannot %s; remote repository does not support the '
414 414 b'\'%s\' capability'
415 415 )
416 416 % (purpose, name)
417 417 )
418 418
419 419
420 420 class iverifyproblem(interfaceutil.Interface):
421 421 """Represents a problem with the integrity of the repository.
422 422
423 423 Instances of this interface are emitted to describe an integrity issue
424 424 with a repository (e.g. corrupt storage, missing data, etc).
425 425
426 426 Instances are essentially messages associated with severity.
427 427 """
428 428
429 429 warning = interfaceutil.Attribute(
430 430 """Message indicating a non-fatal problem."""
431 431 )
432 432
433 433 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
434 434
435 435 node = interfaceutil.Attribute(
436 436 """Revision encountering the problem.
437 437
438 438 ``None`` means the problem doesn't apply to a single revision.
439 439 """
440 440 )
441 441
442 442
443 443 class irevisiondelta(interfaceutil.Interface):
444 444 """Represents a delta between one revision and another.
445 445
446 446 Instances convey enough information to allow a revision to be exchanged
447 447 with another repository.
448 448
449 449 Instances represent the fulltext revision data or a delta against
450 450 another revision. Therefore the ``revision`` and ``delta`` attributes
451 451 are mutually exclusive.
452 452
453 453 Typically used for changegroup generation.
454 454 """
455 455
456 456 node = interfaceutil.Attribute("""20 byte node of this revision.""")
457 457
458 458 p1node = interfaceutil.Attribute(
459 459 """20 byte node of 1st parent of this revision."""
460 460 )
461 461
462 462 p2node = interfaceutil.Attribute(
463 463 """20 byte node of 2nd parent of this revision."""
464 464 )
465 465
466 466 linknode = interfaceutil.Attribute(
467 467 """20 byte node of the changelog revision this node is linked to."""
468 468 )
469 469
470 470 flags = interfaceutil.Attribute(
471 471 """2 bytes of integer flags that apply to this revision.
472 472
473 473 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
474 474 """
475 475 )
476 476
477 477 basenode = interfaceutil.Attribute(
478 478 """20 byte node of the revision this data is a delta against.
479 479
480 480 ``nullid`` indicates that the revision is a full revision and not
481 481 a delta.
482 482 """
483 483 )
484 484
485 485 baserevisionsize = interfaceutil.Attribute(
486 486 """Size of base revision this delta is against.
487 487
488 488 May be ``None`` if ``basenode`` is ``nullid``.
489 489 """
490 490 )
491 491
492 492 revision = interfaceutil.Attribute(
493 493 """Raw fulltext of revision data for this node."""
494 494 )
495 495
496 496 delta = interfaceutil.Attribute(
497 497 """Delta between ``basenode`` and ``node``.
498 498
499 499 Stored in the bdiff delta format.
500 500 """
501 501 )
502 502
503 503 sidedata = interfaceutil.Attribute(
504 504 """Raw sidedata bytes for the given revision."""
505 505 )
506 506
507 507 protocol_flags = interfaceutil.Attribute(
508 508 """Single byte of integer flags that can influence the protocol.
509 509
510 510 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
511 511 """
512 512 )
513 513
514 514
515 515 class ifilerevisionssequence(interfaceutil.Interface):
516 516 """Contains index data for all revisions of a file.
517 517
518 518 Types implementing this behave like lists of tuples. The index
519 519 in the list corresponds to the revision number. The values contain
520 520 index metadata.
521 521
522 522 The *null* revision (revision number -1) is always the last item
523 523 in the index.
524 524 """
525 525
526 526 def __len__():
527 527 """The total number of revisions."""
528 528
529 529 def __getitem__(rev):
530 530 """Returns the object having a specific revision number.
531 531
532 532 Returns an 8-tuple with the following fields:
533 533
534 534 offset+flags
535 535 Contains the offset and flags for the revision. 64-bit unsigned
536 536 integer where first 6 bytes are the offset and the next 2 bytes
537 537 are flags. The offset can be 0 if it is not used by the store.
538 538 compressed size
539 539 Size of the revision data in the store. It can be 0 if it isn't
540 540 needed by the store.
541 541 uncompressed size
542 542 Fulltext size. It can be 0 if it isn't needed by the store.
543 543 base revision
544 544 Revision number of revision the delta for storage is encoded
545 545 against. -1 indicates not encoded against a base revision.
546 546 link revision
547 547 Revision number of changelog revision this entry is related to.
548 548 p1 revision
549 549 Revision number of 1st parent. -1 if no 1st parent.
550 550 p2 revision
551 551 Revision number of 2nd parent. -1 if no 1st parent.
552 552 node
553 553 Binary node value for this revision number.
554 554
555 555 Negative values should index off the end of the sequence. ``-1``
556 556 should return the null revision. ``-2`` should return the most
557 557 recent revision.
558 558 """
559 559
560 560 def __contains__(rev):
561 561 """Whether a revision number exists."""
562 562
563 563 def insert(self, i, entry):
564 564 """Add an item to the index at specific revision."""
565 565
566 566
567 567 class ifileindex(interfaceutil.Interface):
568 568 """Storage interface for index data of a single file.
569 569
570 570 File storage data is divided into index metadata and data storage.
571 571 This interface defines the index portion of the interface.
572 572
573 573 The index logically consists of:
574 574
575 575 * A mapping between revision numbers and nodes.
576 576 * DAG data (storing and querying the relationship between nodes).
577 577 * Metadata to facilitate storage.
578 578 """
579 579
580 580 nullid = interfaceutil.Attribute(
581 581 """node for the null revision for use as delta base."""
582 582 )
583 583
584 584 def __len__():
585 585 """Obtain the number of revisions stored for this file."""
586 586
587 587 def __iter__():
588 588 """Iterate over revision numbers for this file."""
589 589
590 590 def hasnode(node):
591 591 """Returns a bool indicating if a node is known to this store.
592 592
593 593 Implementations must only return True for full, binary node values:
594 594 hex nodes, revision numbers, and partial node matches must be
595 595 rejected.
596 596
597 597 The null node is never present.
598 598 """
599 599
600 600 def revs(start=0, stop=None):
601 601 """Iterate over revision numbers for this file, with control."""
602 602
603 603 def parents(node):
604 604 """Returns a 2-tuple of parent nodes for a revision.
605 605
606 606 Values will be ``nullid`` if the parent is empty.
607 607 """
608 608
609 609 def parentrevs(rev):
610 610 """Like parents() but operates on revision numbers."""
611 611
612 612 def rev(node):
613 613 """Obtain the revision number given a node.
614 614
615 615 Raises ``error.LookupError`` if the node is not known.
616 616 """
617 617
618 618 def node(rev):
619 619 """Obtain the node value given a revision number.
620 620
621 621 Raises ``IndexError`` if the node is not known.
622 622 """
623 623
624 624 def lookup(node):
625 625 """Attempt to resolve a value to a node.
626 626
627 627 Value can be a binary node, hex node, revision number, or a string
628 628 that can be converted to an integer.
629 629
630 630 Raises ``error.LookupError`` if a node could not be resolved.
631 631 """
632 632
633 633 def linkrev(rev):
634 634 """Obtain the changeset revision number a revision is linked to."""
635 635
636 636 def iscensored(rev):
637 637 """Return whether a revision's content has been censored."""
638 638
639 639 def commonancestorsheads(node1, node2):
640 640 """Obtain an iterable of nodes containing heads of common ancestors.
641 641
642 642 See ``ancestor.commonancestorsheads()``.
643 643 """
644 644
645 645 def descendants(revs):
646 646 """Obtain descendant revision numbers for a set of revision numbers.
647 647
648 648 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
649 649 """
650 650
651 651 def heads(start=None, stop=None):
652 652 """Obtain a list of nodes that are DAG heads, with control.
653 653
654 654 The set of revisions examined can be limited by specifying
655 655 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
656 656 iterable of nodes. DAG traversal starts at earlier revision
657 657 ``start`` and iterates forward until any node in ``stop`` is
658 658 encountered.
659 659 """
660 660
661 661 def children(node):
662 662 """Obtain nodes that are children of a node.
663 663
664 664 Returns a list of nodes.
665 665 """
666 666
667 667
668 668 class ifiledata(interfaceutil.Interface):
669 669 """Storage interface for data storage of a specific file.
670 670
671 671 This complements ``ifileindex`` and provides an interface for accessing
672 672 data for a tracked file.
673 673 """
674 674
675 675 def size(rev):
676 676 """Obtain the fulltext size of file data.
677 677
678 678 Any metadata is excluded from size measurements.
679 679 """
680 680
681 681 def revision(node, raw=False):
682 682 """Obtain fulltext data for a node.
683 683
684 684 By default, any storage transformations are applied before the data
685 685 is returned. If ``raw`` is True, non-raw storage transformations
686 686 are not applied.
687 687
688 688 The fulltext data may contain a header containing metadata. Most
689 689 consumers should use ``read()`` to obtain the actual file data.
690 690 """
691 691
692 692 def rawdata(node):
693 693 """Obtain raw data for a node."""
694 694
695 695 def read(node):
696 696 """Resolve file fulltext data.
697 697
698 698 This is similar to ``revision()`` except any metadata in the data
699 699 headers is stripped.
700 700 """
701 701
702 702 def renamed(node):
703 703 """Obtain copy metadata for a node.
704 704
705 705 Returns ``False`` if no copy metadata is stored or a 2-tuple of
706 706 (path, node) from which this revision was copied.
707 707 """
708 708
709 709 def cmp(node, fulltext):
710 710 """Compare fulltext to another revision.
711 711
712 712 Returns True if the fulltext is different from what is stored.
713 713
714 714 This takes copy metadata into account.
715 715
716 716 TODO better document the copy metadata and censoring logic.
717 717 """
718 718
719 719 def emitrevisions(
720 720 nodes,
721 721 nodesorder=None,
722 722 revisiondata=False,
723 723 assumehaveparentrevisions=False,
724 724 deltamode=CG_DELTAMODE_STD,
725 725 ):
726 726 """Produce ``irevisiondelta`` for revisions.
727 727
728 728 Given an iterable of nodes, emits objects conforming to the
729 729 ``irevisiondelta`` interface that describe revisions in storage.
730 730
731 731 This method is a generator.
732 732
733 733 The input nodes may be unordered. Implementations must ensure that a
734 734 node's parents are emitted before the node itself. Transitively, this
735 735 means that a node may only be emitted once all its ancestors in
736 736 ``nodes`` have also been emitted.
737 737
738 738 By default, emits "index" data (the ``node``, ``p1node``, and
739 739 ``p2node`` attributes). If ``revisiondata`` is set, revision data
740 740 will also be present on the emitted objects.
741 741
742 742 With default argument values, implementations can choose to emit
743 743 either fulltext revision data or a delta. When emitting deltas,
744 744 implementations must consider whether the delta's base revision
745 745 fulltext is available to the receiver.
746 746
747 747 The base revision fulltext is guaranteed to be available if any of
748 748 the following are met:
749 749
750 750 * Its fulltext revision was emitted by this method call.
751 751 * A delta for that revision was emitted by this method call.
752 752 * ``assumehaveparentrevisions`` is True and the base revision is a
753 753 parent of the node.
754 754
755 755 ``nodesorder`` can be used to control the order that revisions are
756 756 emitted. By default, revisions can be reordered as long as they are
757 757 in DAG topological order (see above). If the value is ``nodes``,
758 758 the iteration order from ``nodes`` should be used. If the value is
759 759 ``storage``, then the native order from the backing storage layer
760 760 is used. (Not all storage layers will have strong ordering and behavior
761 761 of this mode is storage-dependent.) ``nodes`` ordering can force
762 762 revisions to be emitted before their ancestors, so consumers should
763 763 use it with care.
764 764
765 765 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
766 766 be set and it is the caller's responsibility to resolve it, if needed.
767 767
768 768 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
769 769 all revision data should be emitted as deltas against the revision
770 770 emitted just prior. The initial revision should be a delta against its
771 771 1st parent.
772 772 """
773 773
774 774
775 775 class ifilemutation(interfaceutil.Interface):
776 776 """Storage interface for mutation events of a tracked file."""
777 777
778 778 def add(filedata, meta, transaction, linkrev, p1, p2):
779 779 """Add a new revision to the store.
780 780
781 781 Takes file data, dictionary of metadata, a transaction, linkrev,
782 782 and parent nodes.
783 783
784 784 Returns the node that was added.
785 785
786 786 May no-op if a revision matching the supplied data is already stored.
787 787 """
788 788
789 789 def addrevision(
790 790 revisiondata,
791 791 transaction,
792 792 linkrev,
793 793 p1,
794 794 p2,
795 795 node=None,
796 796 flags=0,
797 797 cachedelta=None,
798 798 ):
799 799 """Add a new revision to the store and return its number.
800 800
801 801 This is similar to ``add()`` except it operates at a lower level.
802 802
803 803 The data passed in already contains a metadata header, if any.
804 804
805 805 ``node`` and ``flags`` can be used to define the expected node and
806 806 the flags to use with storage. ``flags`` is a bitwise value composed
807 807 of the various ``REVISION_FLAG_*`` constants.
808 808
809 809 ``add()`` is usually called when adding files from e.g. the working
810 810 directory. ``addrevision()`` is often called by ``add()`` and for
811 811 scenarios where revision data has already been computed, such as when
812 812 applying raw data from a peer repo.
813 813 """
814 814
815 815 def addgroup(
816 816 deltas,
817 817 linkmapper,
818 818 transaction,
819 819 addrevisioncb=None,
820 820 duplicaterevisioncb=None,
821 821 maybemissingparents=False,
822 822 ):
823 823 """Process a series of deltas for storage.
824 824
825 825 ``deltas`` is an iterable of 7-tuples of
826 826 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
827 827 to add.
828 828
829 829 The ``delta`` field contains ``mpatch`` data to apply to a base
830 830 revision, identified by ``deltabase``. The base node can be
831 831 ``nullid``, in which case the header from the delta can be ignored
832 832 and the delta used as the fulltext.
833 833
834 834 ``alwayscache`` instructs the lower layers to cache the content of the
835 835 newly added revision, even if it needs to be explicitly computed.
836 836 This used to be the default when ``addrevisioncb`` was provided up to
837 837 Mercurial 5.8.
838 838
839 839 ``addrevisioncb`` should be called for each new rev as it is committed.
840 840 ``duplicaterevisioncb`` should be called for all revs with a
841 841 pre-existing node.
842 842
843 843 ``maybemissingparents`` is a bool indicating whether the incoming
844 844 data may reference parents/ancestor revisions that aren't present.
845 845 This flag is set when receiving data into a "shallow" store that
846 846 doesn't hold all history.
847 847
848 848 Returns a list of nodes that were processed. A node will be in the list
849 849 even if it existed in the store previously.
850 850 """
851 851
852 852 def censorrevision(tr, node, tombstone=b''):
853 853 """Remove the content of a single revision.
854 854
855 855 The specified ``node`` will have its content purged from storage.
856 856 Future attempts to access the revision data for this node will
857 857 result in failure.
858 858
859 859 A ``tombstone`` message can optionally be stored. This message may be
860 860 displayed to users when they attempt to access the missing revision
861 861 data.
862 862
863 863 Storage backends may have stored deltas against the previous content
864 864 in this revision. As part of censoring a revision, these storage
865 865 backends are expected to rewrite any internally stored deltas such
866 866 that they no longer reference the deleted content.
867 867 """
868 868
869 869 def getstrippoint(minlink):
870 870 """Find the minimum revision that must be stripped to strip a linkrev.
871 871
872 872 Returns a 2-tuple containing the minimum revision number and a set
873 873 of all revisions numbers that would be broken by this strip.
874 874
875 875 TODO this is highly revlog centric and should be abstracted into
876 876 a higher-level deletion API. ``repair.strip()`` relies on this.
877 877 """
878 878
879 879 def strip(minlink, transaction):
880 880 """Remove storage of items starting at a linkrev.
881 881
882 882 This uses ``getstrippoint()`` to determine the first node to remove.
883 883 Then it effectively truncates storage for all revisions after that.
884 884
885 885 TODO this is highly revlog centric and should be abstracted into a
886 886 higher-level deletion API.
887 887 """
888 888
889 889
890 890 class ifilestorage(ifileindex, ifiledata, ifilemutation):
891 891 """Complete storage interface for a single tracked file."""
892 892
893 893 def files():
894 894 """Obtain paths that are backing storage for this file.
895 895
896 896 TODO this is used heavily by verify code and there should probably
897 897 be a better API for that.
898 898 """
899 899
900 900 def storageinfo(
901 901 exclusivefiles=False,
902 902 sharedfiles=False,
903 903 revisionscount=False,
904 904 trackedsize=False,
905 905 storedsize=False,
906 906 ):
907 907 """Obtain information about storage for this file's data.
908 908
909 909 Returns a dict describing storage for this tracked path. The keys
910 910 in the dict map to arguments of the same. The arguments are bools
911 911 indicating whether to calculate and obtain that data.
912 912
913 913 exclusivefiles
914 914 Iterable of (vfs, path) describing files that are exclusively
915 915 used to back storage for this tracked path.
916 916
917 917 sharedfiles
918 918 Iterable of (vfs, path) describing files that are used to back
919 919 storage for this tracked path. Those files may also provide storage
920 920 for other stored entities.
921 921
922 922 revisionscount
923 923 Number of revisions available for retrieval.
924 924
925 925 trackedsize
926 926 Total size in bytes of all tracked revisions. This is a sum of the
927 927 length of the fulltext of all revisions.
928 928
929 929 storedsize
930 930 Total size in bytes used to store data for all tracked revisions.
931 931 This is commonly less than ``trackedsize`` due to internal usage
932 932 of deltas rather than fulltext revisions.
933 933
934 934 Not all storage backends may support all queries are have a reasonable
935 935 value to use. In that case, the value should be set to ``None`` and
936 936 callers are expected to handle this special value.
937 937 """
938 938
939 939 def verifyintegrity(state):
940 940 """Verifies the integrity of file storage.
941 941
942 942 ``state`` is a dict holding state of the verifier process. It can be
943 943 used to communicate data between invocations of multiple storage
944 944 primitives.
945 945
946 946 If individual revisions cannot have their revision content resolved,
947 947 the method is expected to set the ``skipread`` key to a set of nodes
948 948 that encountered problems. If set, the method can also add the node(s)
949 949 to ``safe_renamed`` in order to indicate nodes that may perform the
950 950 rename checks with currently accessible data.
951 951
952 952 The method yields objects conforming to the ``iverifyproblem``
953 953 interface.
954 954 """
955 955
956 956
957 957 class idirs(interfaceutil.Interface):
958 958 """Interface representing a collection of directories from paths.
959 959
960 960 This interface is essentially a derived data structure representing
961 961 directories from a collection of paths.
962 962 """
963 963
964 964 def addpath(path):
965 965 """Add a path to the collection.
966 966
967 967 All directories in the path will be added to the collection.
968 968 """
969 969
970 970 def delpath(path):
971 971 """Remove a path from the collection.
972 972
973 973 If the removal was the last path in a particular directory, the
974 974 directory is removed from the collection.
975 975 """
976 976
977 977 def __iter__():
978 978 """Iterate over the directories in this collection of paths."""
979 979
980 980 def __contains__(path):
981 981 """Whether a specific directory is in this collection."""
982 982
983 983
984 984 class imanifestdict(interfaceutil.Interface):
985 985 """Interface representing a manifest data structure.
986 986
987 987 A manifest is effectively a dict mapping paths to entries. Each entry
988 988 consists of a binary node and extra flags affecting that entry.
989 989 """
990 990
991 991 def __getitem__(path):
992 992 """Returns the binary node value for a path in the manifest.
993 993
994 994 Raises ``KeyError`` if the path does not exist in the manifest.
995 995
996 996 Equivalent to ``self.find(path)[0]``.
997 997 """
998 998
999 999 def find(path):
1000 1000 """Returns the entry for a path in the manifest.
1001 1001
1002 1002 Returns a 2-tuple of (node, flags).
1003 1003
1004 1004 Raises ``KeyError`` if the path does not exist in the manifest.
1005 1005 """
1006 1006
1007 1007 def __len__():
1008 1008 """Return the number of entries in the manifest."""
1009 1009
1010 1010 def __nonzero__():
1011 1011 """Returns True if the manifest has entries, False otherwise."""
1012 1012
1013 1013 __bool__ = __nonzero__
1014 1014
1015 1015 def __setitem__(path, node):
1016 1016 """Define the node value for a path in the manifest.
1017 1017
1018 1018 If the path is already in the manifest, its flags will be copied to
1019 1019 the new entry.
1020 1020 """
1021 1021
1022 1022 def __contains__(path):
1023 1023 """Whether a path exists in the manifest."""
1024 1024
1025 1025 def __delitem__(path):
1026 1026 """Remove a path from the manifest.
1027 1027
1028 1028 Raises ``KeyError`` if the path is not in the manifest.
1029 1029 """
1030 1030
1031 1031 def __iter__():
1032 1032 """Iterate over paths in the manifest."""
1033 1033
1034 1034 def iterkeys():
1035 1035 """Iterate over paths in the manifest."""
1036 1036
1037 1037 def keys():
1038 1038 """Obtain a list of paths in the manifest."""
1039 1039
1040 1040 def filesnotin(other, match=None):
1041 1041 """Obtain the set of paths in this manifest but not in another.
1042 1042
1043 1043 ``match`` is an optional matcher function to be applied to both
1044 1044 manifests.
1045 1045
1046 1046 Returns a set of paths.
1047 1047 """
1048 1048
1049 1049 def dirs():
1050 1050 """Returns an object implementing the ``idirs`` interface."""
1051 1051
1052 1052 def hasdir(dir):
1053 1053 """Returns a bool indicating if a directory is in this manifest."""
1054 1054
1055 1055 def walk(match):
1056 1056 """Generator of paths in manifest satisfying a matcher.
1057 1057
1058 1058 If the matcher has explicit files listed and they don't exist in
1059 1059 the manifest, ``match.bad()`` is called for each missing file.
1060 1060 """
1061 1061
1062 1062 def diff(other, match=None, clean=False):
1063 1063 """Find differences between this manifest and another.
1064 1064
1065 1065 This manifest is compared to ``other``.
1066 1066
1067 1067 If ``match`` is provided, the two manifests are filtered against this
1068 1068 matcher and only entries satisfying the matcher are compared.
1069 1069
1070 1070 If ``clean`` is True, unchanged files are included in the returned
1071 1071 object.
1072 1072
1073 1073 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1074 1074 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1075 1075 represents the node and flags for this manifest and ``(node2, flag2)``
1076 1076 are the same for the other manifest.
1077 1077 """
1078 1078
1079 1079 def setflag(path, flag):
1080 1080 """Set the flag value for a given path.
1081 1081
1082 1082 Raises ``KeyError`` if the path is not already in the manifest.
1083 1083 """
1084 1084
1085 1085 def get(path, default=None):
1086 1086 """Obtain the node value for a path or a default value if missing."""
1087 1087
1088 1088 def flags(path):
1089 1089 """Return the flags value for a path (default: empty bytestring)."""
1090 1090
1091 1091 def copy():
1092 1092 """Return a copy of this manifest."""
1093 1093
1094 1094 def items():
1095 1095 """Returns an iterable of (path, node) for items in this manifest."""
1096 1096
1097 1097 def iteritems():
1098 1098 """Identical to items()."""
1099 1099
1100 1100 def iterentries():
1101 1101 """Returns an iterable of (path, node, flags) for this manifest.
1102 1102
1103 1103 Similar to ``iteritems()`` except items are a 3-tuple and include
1104 1104 flags.
1105 1105 """
1106 1106
1107 1107 def text():
1108 1108 """Obtain the raw data representation for this manifest.
1109 1109
1110 1110 Result is used to create a manifest revision.
1111 1111 """
1112 1112
1113 1113 def fastdelta(base, changes):
1114 1114 """Obtain a delta between this manifest and another given changes.
1115 1115
1116 1116 ``base`` in the raw data representation for another manifest.
1117 1117
1118 1118 ``changes`` is an iterable of ``(path, to_delete)``.
1119 1119
1120 1120 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1121 1121 delta between ``base`` and this manifest.
1122 1122
1123 1123 If this manifest implementation can't support ``fastdelta()``,
1124 1124 raise ``mercurial.manifest.FastdeltaUnavailable``.
1125 1125 """
1126 1126
1127 1127
1128 1128 class imanifestrevisionbase(interfaceutil.Interface):
1129 1129 """Base interface representing a single revision of a manifest.
1130 1130
1131 1131 Should not be used as a primary interface: should always be inherited
1132 1132 as part of a larger interface.
1133 1133 """
1134 1134
1135 1135 def copy():
1136 1136 """Obtain a copy of this manifest instance.
1137 1137
1138 1138 Returns an object conforming to the ``imanifestrevisionwritable``
1139 1139 interface. The instance will be associated with the same
1140 1140 ``imanifestlog`` collection as this instance.
1141 1141 """
1142 1142
1143 1143 def read():
1144 1144 """Obtain the parsed manifest data structure.
1145 1145
1146 1146 The returned object conforms to the ``imanifestdict`` interface.
1147 1147 """
1148 1148
1149 1149
1150 1150 class imanifestrevisionstored(imanifestrevisionbase):
1151 1151 """Interface representing a manifest revision committed to storage."""
1152 1152
1153 1153 def node():
1154 1154 """The binary node for this manifest."""
1155 1155
1156 1156 parents = interfaceutil.Attribute(
1157 1157 """List of binary nodes that are parents for this manifest revision."""
1158 1158 )
1159 1159
1160 1160 def readdelta(shallow=False):
1161 1161 """Obtain the manifest data structure representing changes from parent.
1162 1162
1163 1163 This manifest is compared to its 1st parent. A new manifest representing
1164 1164 those differences is constructed.
1165 1165
1166 1166 The returned object conforms to the ``imanifestdict`` interface.
1167 1167 """
1168 1168
1169 1169 def readfast(shallow=False):
1170 1170 """Calls either ``read()`` or ``readdelta()``.
1171 1171
1172 1172 The faster of the two options is called.
1173 1173 """
1174 1174
1175 1175 def find(key):
1176 1176 """Calls self.read().find(key)``.
1177 1177
1178 1178 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1179 1179 """
1180 1180
1181 1181
1182 1182 class imanifestrevisionwritable(imanifestrevisionbase):
1183 1183 """Interface representing a manifest revision that can be committed."""
1184 1184
1185 1185 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1186 1186 """Add this revision to storage.
1187 1187
1188 1188 Takes a transaction object, the changeset revision number it will
1189 1189 be associated with, its parent nodes, and lists of added and
1190 1190 removed paths.
1191 1191
1192 1192 If match is provided, storage can choose not to inspect or write out
1193 1193 items that do not match. Storage is still required to be able to provide
1194 1194 the full manifest in the future for any directories written (these
1195 1195 manifests should not be "narrowed on disk").
1196 1196
1197 1197 Returns the binary node of the created revision.
1198 1198 """
1199 1199
1200 1200
1201 1201 class imanifeststorage(interfaceutil.Interface):
1202 1202 """Storage interface for manifest data."""
1203 1203
1204 1204 nodeconstants = interfaceutil.Attribute(
1205 1205 """nodeconstants used by the current repository."""
1206 1206 )
1207 1207
1208 1208 tree = interfaceutil.Attribute(
1209 1209 """The path to the directory this manifest tracks.
1210 1210
1211 1211 The empty bytestring represents the root manifest.
1212 1212 """
1213 1213 )
1214 1214
1215 1215 index = interfaceutil.Attribute(
1216 1216 """An ``ifilerevisionssequence`` instance."""
1217 1217 )
1218 1218
1219 1219 opener = interfaceutil.Attribute(
1220 1220 """VFS opener to use to access underlying files used for storage.
1221 1221
1222 1222 TODO this is revlog specific and should not be exposed.
1223 1223 """
1224 1224 )
1225 1225
1226 1226 _generaldelta = interfaceutil.Attribute(
1227 1227 """Whether generaldelta storage is being used.
1228 1228
1229 1229 TODO this is revlog specific and should not be exposed.
1230 1230 """
1231 1231 )
1232 1232
1233 1233 fulltextcache = interfaceutil.Attribute(
1234 1234 """Dict with cache of fulltexts.
1235 1235
1236 1236 TODO this doesn't feel appropriate for the storage interface.
1237 1237 """
1238 1238 )
1239 1239
1240 1240 def __len__():
1241 1241 """Obtain the number of revisions stored for this manifest."""
1242 1242
1243 1243 def __iter__():
1244 1244 """Iterate over revision numbers for this manifest."""
1245 1245
1246 1246 def rev(node):
1247 1247 """Obtain the revision number given a binary node.
1248 1248
1249 1249 Raises ``error.LookupError`` if the node is not known.
1250 1250 """
1251 1251
1252 1252 def node(rev):
1253 1253 """Obtain the node value given a revision number.
1254 1254
1255 1255 Raises ``error.LookupError`` if the revision is not known.
1256 1256 """
1257 1257
1258 1258 def lookup(value):
1259 1259 """Attempt to resolve a value to a node.
1260 1260
1261 1261 Value can be a binary node, hex node, revision number, or a bytes
1262 1262 that can be converted to an integer.
1263 1263
1264 1264 Raises ``error.LookupError`` if a ndoe could not be resolved.
1265 1265 """
1266 1266
1267 1267 def parents(node):
1268 1268 """Returns a 2-tuple of parent nodes for a node.
1269 1269
1270 1270 Values will be ``nullid`` if the parent is empty.
1271 1271 """
1272 1272
1273 1273 def parentrevs(rev):
1274 1274 """Like parents() but operates on revision numbers."""
1275 1275
1276 1276 def linkrev(rev):
1277 1277 """Obtain the changeset revision number a revision is linked to."""
1278 1278
1279 1279 def revision(node, _df=None):
1280 1280 """Obtain fulltext data for a node."""
1281 1281
1282 1282 def rawdata(node, _df=None):
1283 1283 """Obtain raw data for a node."""
1284 1284
1285 1285 def revdiff(rev1, rev2):
1286 1286 """Obtain a delta between two revision numbers.
1287 1287
1288 1288 The returned data is the result of ``bdiff.bdiff()`` on the raw
1289 1289 revision data.
1290 1290 """
1291 1291
1292 1292 def cmp(node, fulltext):
1293 1293 """Compare fulltext to another revision.
1294 1294
1295 1295 Returns True if the fulltext is different from what is stored.
1296 1296 """
1297 1297
1298 1298 def emitrevisions(
1299 1299 nodes,
1300 1300 nodesorder=None,
1301 1301 revisiondata=False,
1302 1302 assumehaveparentrevisions=False,
1303 1303 ):
1304 1304 """Produce ``irevisiondelta`` describing revisions.
1305 1305
1306 1306 See the documentation for ``ifiledata`` for more.
1307 1307 """
1308 1308
1309 1309 def addgroup(
1310 1310 deltas,
1311 1311 linkmapper,
1312 1312 transaction,
1313 1313 addrevisioncb=None,
1314 1314 duplicaterevisioncb=None,
1315 1315 ):
1316 1316 """Process a series of deltas for storage.
1317 1317
1318 1318 See the documentation in ``ifilemutation`` for more.
1319 1319 """
1320 1320
1321 1321 def rawsize(rev):
1322 1322 """Obtain the size of tracked data.
1323 1323
1324 1324 Is equivalent to ``len(m.rawdata(node))``.
1325 1325
1326 1326 TODO this method is only used by upgrade code and may be removed.
1327 1327 """
1328 1328
1329 1329 def getstrippoint(minlink):
1330 1330 """Find minimum revision that must be stripped to strip a linkrev.
1331 1331
1332 1332 See the documentation in ``ifilemutation`` for more.
1333 1333 """
1334 1334
1335 1335 def strip(minlink, transaction):
1336 1336 """Remove storage of items starting at a linkrev.
1337 1337
1338 1338 See the documentation in ``ifilemutation`` for more.
1339 1339 """
1340 1340
1341 1341 def checksize():
1342 1342 """Obtain the expected sizes of backing files.
1343 1343
1344 1344 TODO this is used by verify and it should not be part of the interface.
1345 1345 """
1346 1346
1347 1347 def files():
1348 1348 """Obtain paths that are backing storage for this manifest.
1349 1349
1350 1350 TODO this is used by verify and there should probably be a better API
1351 1351 for this functionality.
1352 1352 """
1353 1353
1354 1354 def deltaparent(rev):
1355 1355 """Obtain the revision that a revision is delta'd against.
1356 1356
1357 1357 TODO delta encoding is an implementation detail of storage and should
1358 1358 not be exposed to the storage interface.
1359 1359 """
1360 1360
1361 1361 def clone(tr, dest, **kwargs):
1362 1362 """Clone this instance to another."""
1363 1363
1364 1364 def clearcaches(clear_persisted_data=False):
1365 1365 """Clear any caches associated with this instance."""
1366 1366
1367 1367 def dirlog(d):
1368 1368 """Obtain a manifest storage instance for a tree."""
1369 1369
1370 1370 def add(
1371 1371 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1372 1372 ):
1373 1373 """Add a revision to storage.
1374 1374
1375 1375 ``m`` is an object conforming to ``imanifestdict``.
1376 1376
1377 1377 ``link`` is the linkrev revision number.
1378 1378
1379 1379 ``p1`` and ``p2`` are the parent revision numbers.
1380 1380
1381 1381 ``added`` and ``removed`` are iterables of added and removed paths,
1382 1382 respectively.
1383 1383
1384 1384 ``readtree`` is a function that can be used to read the child tree(s)
1385 1385 when recursively writing the full tree structure when using
1386 1386 treemanifets.
1387 1387
1388 1388 ``match`` is a matcher that can be used to hint to storage that not all
1389 1389 paths must be inspected; this is an optimization and can be safely
1390 1390 ignored. Note that the storage must still be able to reproduce a full
1391 1391 manifest including files that did not match.
1392 1392 """
1393 1393
1394 1394 def storageinfo(
1395 1395 exclusivefiles=False,
1396 1396 sharedfiles=False,
1397 1397 revisionscount=False,
1398 1398 trackedsize=False,
1399 1399 storedsize=False,
1400 1400 ):
1401 1401 """Obtain information about storage for this manifest's data.
1402 1402
1403 1403 See ``ifilestorage.storageinfo()`` for a description of this method.
1404 1404 This one behaves the same way, except for manifest data.
1405 1405 """
1406 1406
1407 1407
1408 1408 class imanifestlog(interfaceutil.Interface):
1409 1409 """Interface representing a collection of manifest snapshots.
1410 1410
1411 1411 Represents the root manifest in a repository.
1412 1412
1413 1413 Also serves as a means to access nested tree manifests and to cache
1414 1414 tree manifests.
1415 1415 """
1416 1416
1417 1417 nodeconstants = interfaceutil.Attribute(
1418 1418 """nodeconstants used by the current repository."""
1419 1419 )
1420 1420
1421 1421 def __getitem__(node):
1422 1422 """Obtain a manifest instance for a given binary node.
1423 1423
1424 1424 Equivalent to calling ``self.get('', node)``.
1425 1425
1426 1426 The returned object conforms to the ``imanifestrevisionstored``
1427 1427 interface.
1428 1428 """
1429 1429
1430 1430 def get(tree, node, verify=True):
1431 1431 """Retrieve the manifest instance for a given directory and binary node.
1432 1432
1433 1433 ``node`` always refers to the node of the root manifest (which will be
1434 1434 the only manifest if flat manifests are being used).
1435 1435
1436 1436 If ``tree`` is the empty string, the root manifest is returned.
1437 1437 Otherwise the manifest for the specified directory will be returned
1438 1438 (requires tree manifests).
1439 1439
1440 1440 If ``verify`` is True, ``LookupError`` is raised if the node is not
1441 1441 known.
1442 1442
1443 1443 The returned object conforms to the ``imanifestrevisionstored``
1444 1444 interface.
1445 1445 """
1446 1446
1447 1447 def getstorage(tree):
1448 1448 """Retrieve an interface to storage for a particular tree.
1449 1449
1450 1450 If ``tree`` is the empty bytestring, storage for the root manifest will
1451 1451 be returned. Otherwise storage for a tree manifest is returned.
1452 1452
1453 1453 TODO formalize interface for returned object.
1454 1454 """
1455 1455
1456 1456 def clearcaches():
1457 1457 """Clear caches associated with this collection."""
1458 1458
1459 1459 def rev(node):
1460 1460 """Obtain the revision number for a binary node.
1461 1461
1462 1462 Raises ``error.LookupError`` if the node is not known.
1463 1463 """
1464 1464
1465 1465 def update_caches(transaction):
1466 1466 """update whatever cache are relevant for the used storage."""
1467 1467
1468 1468
1469 1469 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1470 1470 """Local repository sub-interface providing access to tracked file storage.
1471 1471
1472 1472 This interface defines how a repository accesses storage for a single
1473 1473 tracked file path.
1474 1474 """
1475 1475
1476 1476 def file(f):
1477 1477 """Obtain a filelog for a tracked path.
1478 1478
1479 1479 The returned type conforms to the ``ifilestorage`` interface.
1480 1480 """
1481 1481
1482 1482
1483 1483 class ilocalrepositorymain(interfaceutil.Interface):
1484 1484 """Main interface for local repositories.
1485 1485
1486 1486 This currently captures the reality of things - not how things should be.
1487 1487 """
1488 1488
1489 1489 nodeconstants = interfaceutil.Attribute(
1490 1490 """Constant nodes matching the hash function used by the repository."""
1491 1491 )
1492 1492 nullid = interfaceutil.Attribute(
1493 1493 """null revision for the hash function used by the repository."""
1494 1494 )
1495 1495
1496 1496 supported = interfaceutil.Attribute(
1497 1497 """Set of requirements that this repo is capable of opening."""
1498 1498 )
1499 1499
1500 1500 requirements = interfaceutil.Attribute(
1501 1501 """Set of requirements this repo uses."""
1502 1502 )
1503 1503
1504 1504 features = interfaceutil.Attribute(
1505 1505 """Set of "features" this repository supports.
1506 1506
1507 1507 A "feature" is a loosely-defined term. It can refer to a feature
1508 1508 in the classical sense or can describe an implementation detail
1509 1509 of the repository. For example, a ``readonly`` feature may denote
1510 1510 the repository as read-only. Or a ``revlogfilestore`` feature may
1511 1511 denote that the repository is using revlogs for file storage.
1512 1512
1513 1513 The intent of features is to provide a machine-queryable mechanism
1514 1514 for repo consumers to test for various repository characteristics.
1515 1515
1516 1516 Features are similar to ``requirements``. The main difference is that
1517 1517 requirements are stored on-disk and represent requirements to open the
1518 1518 repository. Features are more run-time capabilities of the repository
1519 1519 and more granular capabilities (which may be derived from requirements).
1520 1520 """
1521 1521 )
1522 1522
1523 1523 filtername = interfaceutil.Attribute(
1524 1524 """Name of the repoview that is active on this repo."""
1525 1525 )
1526 1526
1527 vfs_map = interfaceutil.Attribute(
1528 """a bytes-key β†’ vfs mapping used by transaction and others"""
1529 )
1530
1527 1531 wvfs = interfaceutil.Attribute(
1528 1532 """VFS used to access the working directory."""
1529 1533 )
1530 1534
1531 1535 vfs = interfaceutil.Attribute(
1532 1536 """VFS rooted at the .hg directory.
1533 1537
1534 1538 Used to access repository data not in the store.
1535 1539 """
1536 1540 )
1537 1541
1538 1542 svfs = interfaceutil.Attribute(
1539 1543 """VFS rooted at the store.
1540 1544
1541 1545 Used to access repository data in the store. Typically .hg/store.
1542 1546 But can point elsewhere if the store is shared.
1543 1547 """
1544 1548 )
1545 1549
1546 1550 root = interfaceutil.Attribute(
1547 1551 """Path to the root of the working directory."""
1548 1552 )
1549 1553
1550 1554 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1551 1555
1552 1556 origroot = interfaceutil.Attribute(
1553 1557 """The filesystem path that was used to construct the repo."""
1554 1558 )
1555 1559
1556 1560 auditor = interfaceutil.Attribute(
1557 1561 """A pathauditor for the working directory.
1558 1562
1559 1563 This checks if a path refers to a nested repository.
1560 1564
1561 1565 Operates on the filesystem.
1562 1566 """
1563 1567 )
1564 1568
1565 1569 nofsauditor = interfaceutil.Attribute(
1566 1570 """A pathauditor for the working directory.
1567 1571
1568 1572 This is like ``auditor`` except it doesn't do filesystem checks.
1569 1573 """
1570 1574 )
1571 1575
1572 1576 baseui = interfaceutil.Attribute(
1573 1577 """Original ui instance passed into constructor."""
1574 1578 )
1575 1579
1576 1580 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1577 1581
1578 1582 sharedpath = interfaceutil.Attribute(
1579 1583 """Path to the .hg directory of the repo this repo was shared from."""
1580 1584 )
1581 1585
1582 1586 store = interfaceutil.Attribute("""A store instance.""")
1583 1587
1584 1588 spath = interfaceutil.Attribute("""Path to the store.""")
1585 1589
1586 1590 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1587 1591
1588 1592 cachevfs = interfaceutil.Attribute(
1589 1593 """A VFS used to access the cache directory.
1590 1594
1591 1595 Typically .hg/cache.
1592 1596 """
1593 1597 )
1594 1598
1595 1599 wcachevfs = interfaceutil.Attribute(
1596 1600 """A VFS used to access the cache directory dedicated to working copy
1597 1601
1598 1602 Typically .hg/wcache.
1599 1603 """
1600 1604 )
1601 1605
1602 1606 filteredrevcache = interfaceutil.Attribute(
1603 1607 """Holds sets of revisions to be filtered."""
1604 1608 )
1605 1609
1606 1610 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1607 1611
1608 1612 filecopiesmode = interfaceutil.Attribute(
1609 1613 """The way files copies should be dealt with in this repo."""
1610 1614 )
1611 1615
1612 1616 def close():
1613 1617 """Close the handle on this repository."""
1614 1618
1615 1619 def peer(path=None):
1616 1620 """Obtain an object conforming to the ``peer`` interface."""
1617 1621
1618 1622 def unfiltered():
1619 1623 """Obtain an unfiltered/raw view of this repo."""
1620 1624
1621 1625 def filtered(name, visibilityexceptions=None):
1622 1626 """Obtain a named view of this repository."""
1623 1627
1624 1628 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1625 1629
1626 1630 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1627 1631
1628 1632 manifestlog = interfaceutil.Attribute(
1629 1633 """An instance conforming to the ``imanifestlog`` interface.
1630 1634
1631 1635 Provides access to manifests for the repository.
1632 1636 """
1633 1637 )
1634 1638
1635 1639 dirstate = interfaceutil.Attribute("""Working directory state.""")
1636 1640
1637 1641 narrowpats = interfaceutil.Attribute(
1638 1642 """Matcher patterns for this repository's narrowspec."""
1639 1643 )
1640 1644
1641 1645 def narrowmatch(match=None, includeexact=False):
1642 1646 """Obtain a matcher for the narrowspec."""
1643 1647
1644 1648 def setnarrowpats(newincludes, newexcludes):
1645 1649 """Define the narrowspec for this repository."""
1646 1650
1647 1651 def __getitem__(changeid):
1648 1652 """Try to resolve a changectx."""
1649 1653
1650 1654 def __contains__(changeid):
1651 1655 """Whether a changeset exists."""
1652 1656
1653 1657 def __nonzero__():
1654 1658 """Always returns True."""
1655 1659 return True
1656 1660
1657 1661 __bool__ = __nonzero__
1658 1662
1659 1663 def __len__():
1660 1664 """Returns the number of changesets in the repo."""
1661 1665
1662 1666 def __iter__():
1663 1667 """Iterate over revisions in the changelog."""
1664 1668
1665 1669 def revs(expr, *args):
1666 1670 """Evaluate a revset.
1667 1671
1668 1672 Emits revisions.
1669 1673 """
1670 1674
1671 1675 def set(expr, *args):
1672 1676 """Evaluate a revset.
1673 1677
1674 1678 Emits changectx instances.
1675 1679 """
1676 1680
1677 1681 def anyrevs(specs, user=False, localalias=None):
1678 1682 """Find revisions matching one of the given revsets."""
1679 1683
1680 1684 def url():
1681 1685 """Returns a string representing the location of this repo."""
1682 1686
1683 1687 def hook(name, throw=False, **args):
1684 1688 """Call a hook."""
1685 1689
1686 1690 def tags():
1687 1691 """Return a mapping of tag to node."""
1688 1692
1689 1693 def tagtype(tagname):
1690 1694 """Return the type of a given tag."""
1691 1695
1692 1696 def tagslist():
1693 1697 """Return a list of tags ordered by revision."""
1694 1698
1695 1699 def nodetags(node):
1696 1700 """Return the tags associated with a node."""
1697 1701
1698 1702 def nodebookmarks(node):
1699 1703 """Return the list of bookmarks pointing to the specified node."""
1700 1704
1701 1705 def branchmap():
1702 1706 """Return a mapping of branch to heads in that branch."""
1703 1707
1704 1708 def revbranchcache():
1705 1709 pass
1706 1710
1707 1711 def register_changeset(rev, changelogrevision):
1708 1712 """Extension point for caches for new nodes.
1709 1713
1710 1714 Multiple consumers are expected to need parts of the changelogrevision,
1711 1715 so it is provided as optimization to avoid duplicate lookups. A simple
1712 1716 cache would be fragile when other revisions are accessed, too."""
1713 1717 pass
1714 1718
1715 1719 def branchtip(branchtip, ignoremissing=False):
1716 1720 """Return the tip node for a given branch."""
1717 1721
1718 1722 def lookup(key):
1719 1723 """Resolve the node for a revision."""
1720 1724
1721 1725 def lookupbranch(key):
1722 1726 """Look up the branch name of the given revision or branch name."""
1723 1727
1724 1728 def known(nodes):
1725 1729 """Determine whether a series of nodes is known.
1726 1730
1727 1731 Returns a list of bools.
1728 1732 """
1729 1733
1730 1734 def local():
1731 1735 """Whether the repository is local."""
1732 1736 return True
1733 1737
1734 1738 def publishing():
1735 1739 """Whether the repository is a publishing repository."""
1736 1740
1737 1741 def cancopy():
1738 1742 pass
1739 1743
1740 1744 def shared():
1741 1745 """The type of shared repository or None."""
1742 1746
1743 1747 def wjoin(f, *insidef):
1744 1748 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1745 1749
1746 1750 def setparents(p1, p2):
1747 1751 """Set the parent nodes of the working directory."""
1748 1752
1749 1753 def filectx(path, changeid=None, fileid=None):
1750 1754 """Obtain a filectx for the given file revision."""
1751 1755
1752 1756 def getcwd():
1753 1757 """Obtain the current working directory from the dirstate."""
1754 1758
1755 1759 def pathto(f, cwd=None):
1756 1760 """Obtain the relative path to a file."""
1757 1761
1758 1762 def adddatafilter(name, fltr):
1759 1763 pass
1760 1764
1761 1765 def wread(filename):
1762 1766 """Read a file from wvfs, using data filters."""
1763 1767
1764 1768 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1765 1769 """Write data to a file in the wvfs, using data filters."""
1766 1770
1767 1771 def wwritedata(filename, data):
1768 1772 """Resolve data for writing to the wvfs, using data filters."""
1769 1773
1770 1774 def currenttransaction():
1771 1775 """Obtain the current transaction instance or None."""
1772 1776
1773 1777 def transaction(desc, report=None):
1774 1778 """Open a new transaction to write to the repository."""
1775 1779
1776 1780 def undofiles():
1777 1781 """Returns a list of (vfs, path) for files to undo transactions."""
1778 1782
1779 1783 def recover():
1780 1784 """Roll back an interrupted transaction."""
1781 1785
1782 1786 def rollback(dryrun=False, force=False):
1783 1787 """Undo the last transaction.
1784 1788
1785 1789 DANGEROUS.
1786 1790 """
1787 1791
1788 1792 def updatecaches(tr=None, full=False, caches=None):
1789 1793 """Warm repo caches."""
1790 1794
1791 1795 def invalidatecaches():
1792 1796 """Invalidate cached data due to the repository mutating."""
1793 1797
1794 1798 def invalidatevolatilesets():
1795 1799 pass
1796 1800
1797 1801 def invalidatedirstate():
1798 1802 """Invalidate the dirstate."""
1799 1803
1800 1804 def invalidate(clearfilecache=False):
1801 1805 pass
1802 1806
1803 1807 def invalidateall():
1804 1808 pass
1805 1809
1806 1810 def lock(wait=True):
1807 1811 """Lock the repository store and return a lock instance."""
1808 1812
1809 1813 def wlock(wait=True):
1810 1814 """Lock the non-store parts of the repository."""
1811 1815
1812 1816 def currentwlock():
1813 1817 """Return the wlock if it's held or None."""
1814 1818
1815 1819 def checkcommitpatterns(wctx, match, status, fail):
1816 1820 pass
1817 1821
1818 1822 def commit(
1819 1823 text=b'',
1820 1824 user=None,
1821 1825 date=None,
1822 1826 match=None,
1823 1827 force=False,
1824 1828 editor=False,
1825 1829 extra=None,
1826 1830 ):
1827 1831 """Add a new revision to the repository."""
1828 1832
1829 1833 def commitctx(ctx, error=False, origctx=None):
1830 1834 """Commit a commitctx instance to the repository."""
1831 1835
1832 1836 def destroying():
1833 1837 """Inform the repository that nodes are about to be destroyed."""
1834 1838
1835 1839 def destroyed():
1836 1840 """Inform the repository that nodes have been destroyed."""
1837 1841
1838 1842 def status(
1839 1843 node1=b'.',
1840 1844 node2=None,
1841 1845 match=None,
1842 1846 ignored=False,
1843 1847 clean=False,
1844 1848 unknown=False,
1845 1849 listsubrepos=False,
1846 1850 ):
1847 1851 """Convenience method to call repo[x].status()."""
1848 1852
1849 1853 def addpostdsstatus(ps):
1850 1854 pass
1851 1855
1852 1856 def postdsstatus():
1853 1857 pass
1854 1858
1855 1859 def clearpostdsstatus():
1856 1860 pass
1857 1861
1858 1862 def heads(start=None):
1859 1863 """Obtain list of nodes that are DAG heads."""
1860 1864
1861 1865 def branchheads(branch=None, start=None, closed=False):
1862 1866 pass
1863 1867
1864 1868 def branches(nodes):
1865 1869 pass
1866 1870
1867 1871 def between(pairs):
1868 1872 pass
1869 1873
1870 1874 def checkpush(pushop):
1871 1875 pass
1872 1876
1873 1877 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1874 1878
1875 1879 def pushkey(namespace, key, old, new):
1876 1880 pass
1877 1881
1878 1882 def listkeys(namespace):
1879 1883 pass
1880 1884
1881 1885 def debugwireargs(one, two, three=None, four=None, five=None):
1882 1886 pass
1883 1887
1884 1888 def savecommitmessage(text):
1885 1889 pass
1886 1890
1887 1891 def register_sidedata_computer(
1888 1892 kind, category, keys, computer, flags, replace=False
1889 1893 ):
1890 1894 pass
1891 1895
1892 1896 def register_wanted_sidedata(category):
1893 1897 pass
1894 1898
1895 1899
1896 1900 class completelocalrepository(
1897 1901 ilocalrepositorymain, ilocalrepositoryfilestorage
1898 1902 ):
1899 1903 """Complete interface for a local repository."""
1900 1904
1901 1905
1902 1906 class iwireprotocolcommandcacher(interfaceutil.Interface):
1903 1907 """Represents a caching backend for wire protocol commands.
1904 1908
1905 1909 Wire protocol version 2 supports transparent caching of many commands.
1906 1910 To leverage this caching, servers can activate objects that cache
1907 1911 command responses. Objects handle both cache writing and reading.
1908 1912 This interface defines how that response caching mechanism works.
1909 1913
1910 1914 Wire protocol version 2 commands emit a series of objects that are
1911 1915 serialized and sent to the client. The caching layer exists between
1912 1916 the invocation of the command function and the sending of its output
1913 1917 objects to an output layer.
1914 1918
1915 1919 Instances of this interface represent a binding to a cache that
1916 1920 can serve a response (in place of calling a command function) and/or
1917 1921 write responses to a cache for subsequent use.
1918 1922
1919 1923 When a command request arrives, the following happens with regards
1920 1924 to this interface:
1921 1925
1922 1926 1. The server determines whether the command request is cacheable.
1923 1927 2. If it is, an instance of this interface is spawned.
1924 1928 3. The cacher is activated in a context manager (``__enter__`` is called).
1925 1929 4. A cache *key* for that request is derived. This will call the
1926 1930 instance's ``adjustcachekeystate()`` method so the derivation
1927 1931 can be influenced.
1928 1932 5. The cacher is informed of the derived cache key via a call to
1929 1933 ``setcachekey()``.
1930 1934 6. The cacher's ``lookup()`` method is called to test for presence of
1931 1935 the derived key in the cache.
1932 1936 7. If ``lookup()`` returns a hit, that cached result is used in place
1933 1937 of invoking the command function. ``__exit__`` is called and the instance
1934 1938 is discarded.
1935 1939 8. The command function is invoked.
1936 1940 9. ``onobject()`` is called for each object emitted by the command
1937 1941 function.
1938 1942 10. After the final object is seen, ``onfinished()`` is called.
1939 1943 11. ``__exit__`` is called to signal the end of use of the instance.
1940 1944
1941 1945 Cache *key* derivation can be influenced by the instance.
1942 1946
1943 1947 Cache keys are initially derived by a deterministic representation of
1944 1948 the command request. This includes the command name, arguments, protocol
1945 1949 version, etc. This initial key derivation is performed by CBOR-encoding a
1946 1950 data structure and feeding that output into a hasher.
1947 1951
1948 1952 Instances of this interface can influence this initial key derivation
1949 1953 via ``adjustcachekeystate()``.
1950 1954
1951 1955 The instance is informed of the derived cache key via a call to
1952 1956 ``setcachekey()``. The instance must store the key locally so it can
1953 1957 be consulted on subsequent operations that may require it.
1954 1958
1955 1959 When constructed, the instance has access to a callable that can be used
1956 1960 for encoding response objects. This callable receives as its single
1957 1961 argument an object emitted by a command function. It returns an iterable
1958 1962 of bytes chunks representing the encoded object. Unless the cacher is
1959 1963 caching native Python objects in memory or has a way of reconstructing
1960 1964 the original Python objects, implementations typically call this function
1961 1965 to produce bytes from the output objects and then store those bytes in
1962 1966 the cache. When it comes time to re-emit those bytes, they are wrapped
1963 1967 in a ``wireprototypes.encodedresponse`` instance to tell the output
1964 1968 layer that they are pre-encoded.
1965 1969
1966 1970 When receiving the objects emitted by the command function, instances
1967 1971 can choose what to do with those objects. The simplest thing to do is
1968 1972 re-emit the original objects. They will be forwarded to the output
1969 1973 layer and will be processed as if the cacher did not exist.
1970 1974
1971 1975 Implementations could also choose to not emit objects - instead locally
1972 1976 buffering objects or their encoded representation. They could then emit
1973 1977 a single "coalesced" object when ``onfinished()`` is called. In
1974 1978 this way, the implementation would function as a filtering layer of
1975 1979 sorts.
1976 1980
1977 1981 When caching objects, typically the encoded form of the object will
1978 1982 be stored. Keep in mind that if the original object is forwarded to
1979 1983 the output layer, it will need to be encoded there as well. For large
1980 1984 output, this redundant encoding could add overhead. Implementations
1981 1985 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1982 1986 instances to avoid this overhead.
1983 1987 """
1984 1988
1985 1989 def __enter__():
1986 1990 """Marks the instance as active.
1987 1991
1988 1992 Should return self.
1989 1993 """
1990 1994
1991 1995 def __exit__(exctype, excvalue, exctb):
1992 1996 """Called when cacher is no longer used.
1993 1997
1994 1998 This can be used by implementations to perform cleanup actions (e.g.
1995 1999 disconnecting network sockets, aborting a partially cached response.
1996 2000 """
1997 2001
1998 2002 def adjustcachekeystate(state):
1999 2003 """Influences cache key derivation by adjusting state to derive key.
2000 2004
2001 2005 A dict defining the state used to derive the cache key is passed.
2002 2006
2003 2007 Implementations can modify this dict to record additional state that
2004 2008 is wanted to influence key derivation.
2005 2009
2006 2010 Implementations are *highly* encouraged to not modify or delete
2007 2011 existing keys.
2008 2012 """
2009 2013
2010 2014 def setcachekey(key):
2011 2015 """Record the derived cache key for this request.
2012 2016
2013 2017 Instances may mutate the key for internal usage, as desired. e.g.
2014 2018 instances may wish to prepend the repo name, introduce path
2015 2019 components for filesystem or URL addressing, etc. Behavior is up to
2016 2020 the cache.
2017 2021
2018 2022 Returns a bool indicating if the request is cacheable by this
2019 2023 instance.
2020 2024 """
2021 2025
2022 2026 def lookup():
2023 2027 """Attempt to resolve an entry in the cache.
2024 2028
2025 2029 The instance is instructed to look for the cache key that it was
2026 2030 informed about via the call to ``setcachekey()``.
2027 2031
2028 2032 If there's no cache hit or the cacher doesn't wish to use the cached
2029 2033 entry, ``None`` should be returned.
2030 2034
2031 2035 Else, a dict defining the cached result should be returned. The
2032 2036 dict may have the following keys:
2033 2037
2034 2038 objs
2035 2039 An iterable of objects that should be sent to the client. That
2036 2040 iterable of objects is expected to be what the command function
2037 2041 would return if invoked or an equivalent representation thereof.
2038 2042 """
2039 2043
2040 2044 def onobject(obj):
2041 2045 """Called when a new object is emitted from the command function.
2042 2046
2043 2047 Receives as its argument the object that was emitted from the
2044 2048 command function.
2045 2049
2046 2050 This method returns an iterator of objects to forward to the output
2047 2051 layer. The easiest implementation is a generator that just
2048 2052 ``yield obj``.
2049 2053 """
2050 2054
2051 2055 def onfinished():
2052 2056 """Called after all objects have been emitted from the command function.
2053 2057
2054 2058 Implementations should return an iterator of objects to forward to
2055 2059 the output layer.
2056 2060
2057 2061 This method can be a generator.
2058 2062 """
@@ -1,3995 +1,4000 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 # coding: utf-8
3 3 #
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import functools
11 11 import os
12 12 import random
13 13 import re
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from concurrent import futures
19 19 from typing import (
20 20 Optional,
21 21 )
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 bin,
26 26 hex,
27 27 nullrev,
28 28 sha1nodeconstants,
29 29 short,
30 30 )
31 31 from .pycompat import (
32 32 delattr,
33 33 getattr,
34 34 )
35 35 from . import (
36 36 bookmarks,
37 37 branchmap,
38 38 bundle2,
39 39 bundlecaches,
40 40 changegroup,
41 41 color,
42 42 commit,
43 43 context,
44 44 dirstate,
45 45 discovery,
46 46 encoding,
47 47 error,
48 48 exchange,
49 49 extensions,
50 50 filelog,
51 51 hook,
52 52 lock as lockmod,
53 53 match as matchmod,
54 54 mergestate as mergestatemod,
55 55 mergeutil,
56 56 namespaces,
57 57 narrowspec,
58 58 obsolete,
59 59 pathutil,
60 60 phases,
61 61 pushkey,
62 62 pycompat,
63 63 rcutil,
64 64 repoview,
65 65 requirements as requirementsmod,
66 66 revlog,
67 67 revset,
68 68 revsetlang,
69 69 scmutil,
70 70 sparse,
71 71 store as storemod,
72 72 subrepoutil,
73 73 tags as tagsmod,
74 74 transaction,
75 75 txnutil,
76 76 util,
77 77 vfs as vfsmod,
78 78 wireprototypes,
79 79 )
80 80
81 81 from .interfaces import (
82 82 repository,
83 83 util as interfaceutil,
84 84 )
85 85
86 86 from .utils import (
87 87 hashutil,
88 88 procutil,
89 89 stringutil,
90 90 urlutil,
91 91 )
92 92
93 93 from .revlogutils import (
94 94 concurrency_checker as revlogchecker,
95 95 constants as revlogconst,
96 96 sidedata as sidedatamod,
97 97 )
98 98
99 99 release = lockmod.release
100 100 urlerr = util.urlerr
101 101 urlreq = util.urlreq
102 102
103 103 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
104 104 b"^((dirstate|narrowspec.dirstate).*|branch$)"
105 105 )
106 106
107 107 # set of (path, vfs-location) tuples. vfs-location is:
108 108 # - 'plain for vfs relative paths
109 109 # - '' for svfs relative paths
110 110 _cachedfiles = set()
111 111
112 112
113 113 class _basefilecache(scmutil.filecache):
114 114 """All filecache usage on repo are done for logic that should be unfiltered"""
115 115
116 116 def __get__(self, repo, type=None):
117 117 if repo is None:
118 118 return self
119 119 # proxy to unfiltered __dict__ since filtered repo has no entry
120 120 unfi = repo.unfiltered()
121 121 try:
122 122 return unfi.__dict__[self.sname]
123 123 except KeyError:
124 124 pass
125 125 return super(_basefilecache, self).__get__(unfi, type)
126 126
127 127 def set(self, repo, value):
128 128 return super(_basefilecache, self).set(repo.unfiltered(), value)
129 129
130 130
131 131 class repofilecache(_basefilecache):
132 132 """filecache for files in .hg but outside of .hg/store"""
133 133
134 134 def __init__(self, *paths):
135 135 super(repofilecache, self).__init__(*paths)
136 136 for path in paths:
137 137 _cachedfiles.add((path, b'plain'))
138 138
139 139 def join(self, obj, fname):
140 140 return obj.vfs.join(fname)
141 141
142 142
143 143 class storecache(_basefilecache):
144 144 """filecache for files in the store"""
145 145
146 146 def __init__(self, *paths):
147 147 super(storecache, self).__init__(*paths)
148 148 for path in paths:
149 149 _cachedfiles.add((path, b''))
150 150
151 151 def join(self, obj, fname):
152 152 return obj.sjoin(fname)
153 153
154 154
155 155 class changelogcache(storecache):
156 156 """filecache for the changelog"""
157 157
158 158 def __init__(self):
159 159 super(changelogcache, self).__init__()
160 160 _cachedfiles.add((b'00changelog.i', b''))
161 161 _cachedfiles.add((b'00changelog.n', b''))
162 162
163 163 def tracked_paths(self, obj):
164 164 paths = [self.join(obj, b'00changelog.i')]
165 165 if obj.store.opener.options.get(b'persistent-nodemap', False):
166 166 paths.append(self.join(obj, b'00changelog.n'))
167 167 return paths
168 168
169 169
170 170 class manifestlogcache(storecache):
171 171 """filecache for the manifestlog"""
172 172
173 173 def __init__(self):
174 174 super(manifestlogcache, self).__init__()
175 175 _cachedfiles.add((b'00manifest.i', b''))
176 176 _cachedfiles.add((b'00manifest.n', b''))
177 177
178 178 def tracked_paths(self, obj):
179 179 paths = [self.join(obj, b'00manifest.i')]
180 180 if obj.store.opener.options.get(b'persistent-nodemap', False):
181 181 paths.append(self.join(obj, b'00manifest.n'))
182 182 return paths
183 183
184 184
185 185 class mixedrepostorecache(_basefilecache):
186 186 """filecache for a mix files in .hg/store and outside"""
187 187
188 188 def __init__(self, *pathsandlocations):
189 189 # scmutil.filecache only uses the path for passing back into our
190 190 # join(), so we can safely pass a list of paths and locations
191 191 super(mixedrepostorecache, self).__init__(*pathsandlocations)
192 192 _cachedfiles.update(pathsandlocations)
193 193
194 194 def join(self, obj, fnameandlocation):
195 195 fname, location = fnameandlocation
196 196 if location == b'plain':
197 197 return obj.vfs.join(fname)
198 198 else:
199 199 if location != b'':
200 200 raise error.ProgrammingError(
201 201 b'unexpected location: %s' % location
202 202 )
203 203 return obj.sjoin(fname)
204 204
205 205
206 206 def isfilecached(repo, name):
207 207 """check if a repo has already cached "name" filecache-ed property
208 208
209 209 This returns (cachedobj-or-None, iscached) tuple.
210 210 """
211 211 cacheentry = repo.unfiltered()._filecache.get(name, None)
212 212 if not cacheentry:
213 213 return None, False
214 214 return cacheentry.obj, True
215 215
216 216
217 217 class unfilteredpropertycache(util.propertycache):
218 218 """propertycache that apply to unfiltered repo only"""
219 219
220 220 def __get__(self, repo, type=None):
221 221 unfi = repo.unfiltered()
222 222 if unfi is repo:
223 223 return super(unfilteredpropertycache, self).__get__(unfi)
224 224 return getattr(unfi, self.name)
225 225
226 226
227 227 class filteredpropertycache(util.propertycache):
228 228 """propertycache that must take filtering in account"""
229 229
230 230 def cachevalue(self, obj, value):
231 231 object.__setattr__(obj, self.name, value)
232 232
233 233
234 234 def hasunfilteredcache(repo, name):
235 235 """check if a repo has an unfilteredpropertycache value for <name>"""
236 236 return name in vars(repo.unfiltered())
237 237
238 238
239 239 def unfilteredmethod(orig):
240 240 """decorate method that always need to be run on unfiltered version"""
241 241
242 242 @functools.wraps(orig)
243 243 def wrapper(repo, *args, **kwargs):
244 244 return orig(repo.unfiltered(), *args, **kwargs)
245 245
246 246 return wrapper
247 247
248 248
249 249 moderncaps = {
250 250 b'lookup',
251 251 b'branchmap',
252 252 b'pushkey',
253 253 b'known',
254 254 b'getbundle',
255 255 b'unbundle',
256 256 }
257 257 legacycaps = moderncaps.union({b'changegroupsubset'})
258 258
259 259
260 260 @interfaceutil.implementer(repository.ipeercommandexecutor)
261 261 class localcommandexecutor:
262 262 def __init__(self, peer):
263 263 self._peer = peer
264 264 self._sent = False
265 265 self._closed = False
266 266
267 267 def __enter__(self):
268 268 return self
269 269
270 270 def __exit__(self, exctype, excvalue, exctb):
271 271 self.close()
272 272
273 273 def callcommand(self, command, args):
274 274 if self._sent:
275 275 raise error.ProgrammingError(
276 276 b'callcommand() cannot be used after sendcommands()'
277 277 )
278 278
279 279 if self._closed:
280 280 raise error.ProgrammingError(
281 281 b'callcommand() cannot be used after close()'
282 282 )
283 283
284 284 # We don't need to support anything fancy. Just call the named
285 285 # method on the peer and return a resolved future.
286 286 fn = getattr(self._peer, pycompat.sysstr(command))
287 287
288 288 f = futures.Future()
289 289
290 290 try:
291 291 result = fn(**pycompat.strkwargs(args))
292 292 except Exception:
293 293 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
294 294 else:
295 295 f.set_result(result)
296 296
297 297 return f
298 298
299 299 def sendcommands(self):
300 300 self._sent = True
301 301
302 302 def close(self):
303 303 self._closed = True
304 304
305 305
306 306 @interfaceutil.implementer(repository.ipeercommands)
307 307 class localpeer(repository.peer):
308 308 '''peer for a local repo; reflects only the most recent API'''
309 309
310 310 def __init__(self, repo, caps=None, path=None):
311 311 super(localpeer, self).__init__(repo.ui, path=path)
312 312
313 313 if caps is None:
314 314 caps = moderncaps.copy()
315 315 self._repo = repo.filtered(b'served')
316 316
317 317 if repo._wanted_sidedata:
318 318 formatted = bundle2.format_remote_wanted_sidedata(repo)
319 319 caps.add(b'exp-wanted-sidedata=' + formatted)
320 320
321 321 self._caps = repo._restrictcapabilities(caps)
322 322
323 323 # Begin of _basepeer interface.
324 324
325 325 def url(self):
326 326 return self._repo.url()
327 327
328 328 def local(self):
329 329 return self._repo
330 330
331 331 def canpush(self):
332 332 return True
333 333
334 334 def close(self):
335 335 self._repo.close()
336 336
337 337 # End of _basepeer interface.
338 338
339 339 # Begin of _basewirecommands interface.
340 340
341 341 def branchmap(self):
342 342 return self._repo.branchmap()
343 343
344 344 def capabilities(self):
345 345 return self._caps
346 346
347 347 def clonebundles(self):
348 348 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
349 349
350 350 def debugwireargs(self, one, two, three=None, four=None, five=None):
351 351 """Used to test argument passing over the wire"""
352 352 return b"%s %s %s %s %s" % (
353 353 one,
354 354 two,
355 355 pycompat.bytestr(three),
356 356 pycompat.bytestr(four),
357 357 pycompat.bytestr(five),
358 358 )
359 359
360 360 def getbundle(
361 361 self,
362 362 source,
363 363 heads=None,
364 364 common=None,
365 365 bundlecaps=None,
366 366 remote_sidedata=None,
367 367 **kwargs
368 368 ):
369 369 chunks = exchange.getbundlechunks(
370 370 self._repo,
371 371 source,
372 372 heads=heads,
373 373 common=common,
374 374 bundlecaps=bundlecaps,
375 375 remote_sidedata=remote_sidedata,
376 376 **kwargs
377 377 )[1]
378 378 cb = util.chunkbuffer(chunks)
379 379
380 380 if exchange.bundle2requested(bundlecaps):
381 381 # When requesting a bundle2, getbundle returns a stream to make the
382 382 # wire level function happier. We need to build a proper object
383 383 # from it in local peer.
384 384 return bundle2.getunbundler(self.ui, cb)
385 385 else:
386 386 return changegroup.getunbundler(b'01', cb, None)
387 387
388 388 def heads(self):
389 389 return self._repo.heads()
390 390
391 391 def known(self, nodes):
392 392 return self._repo.known(nodes)
393 393
394 394 def listkeys(self, namespace):
395 395 return self._repo.listkeys(namespace)
396 396
397 397 def lookup(self, key):
398 398 return self._repo.lookup(key)
399 399
400 400 def pushkey(self, namespace, key, old, new):
401 401 return self._repo.pushkey(namespace, key, old, new)
402 402
403 403 def stream_out(self):
404 404 raise error.Abort(_(b'cannot perform stream clone against local peer'))
405 405
406 406 def unbundle(self, bundle, heads, url):
407 407 """apply a bundle on a repo
408 408
409 409 This function handles the repo locking itself."""
410 410 try:
411 411 try:
412 412 bundle = exchange.readbundle(self.ui, bundle, None)
413 413 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
414 414 if util.safehasattr(ret, b'getchunks'):
415 415 # This is a bundle20 object, turn it into an unbundler.
416 416 # This little dance should be dropped eventually when the
417 417 # API is finally improved.
418 418 stream = util.chunkbuffer(ret.getchunks())
419 419 ret = bundle2.getunbundler(self.ui, stream)
420 420 return ret
421 421 except Exception as exc:
422 422 # If the exception contains output salvaged from a bundle2
423 423 # reply, we need to make sure it is printed before continuing
424 424 # to fail. So we build a bundle2 with such output and consume
425 425 # it directly.
426 426 #
427 427 # This is not very elegant but allows a "simple" solution for
428 428 # issue4594
429 429 output = getattr(exc, '_bundle2salvagedoutput', ())
430 430 if output:
431 431 bundler = bundle2.bundle20(self._repo.ui)
432 432 for out in output:
433 433 bundler.addpart(out)
434 434 stream = util.chunkbuffer(bundler.getchunks())
435 435 b = bundle2.getunbundler(self.ui, stream)
436 436 bundle2.processbundle(self._repo, b)
437 437 raise
438 438 except error.PushRaced as exc:
439 439 raise error.ResponseError(
440 440 _(b'push failed:'), stringutil.forcebytestr(exc)
441 441 )
442 442
443 443 # End of _basewirecommands interface.
444 444
445 445 # Begin of peer interface.
446 446
447 447 def commandexecutor(self):
448 448 return localcommandexecutor(self)
449 449
450 450 # End of peer interface.
451 451
452 452
453 453 @interfaceutil.implementer(repository.ipeerlegacycommands)
454 454 class locallegacypeer(localpeer):
455 455 """peer extension which implements legacy methods too; used for tests with
456 456 restricted capabilities"""
457 457
458 458 def __init__(self, repo, path=None):
459 459 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
460 460
461 461 # Begin of baselegacywirecommands interface.
462 462
463 463 def between(self, pairs):
464 464 return self._repo.between(pairs)
465 465
466 466 def branches(self, nodes):
467 467 return self._repo.branches(nodes)
468 468
469 469 def changegroup(self, nodes, source):
470 470 outgoing = discovery.outgoing(
471 471 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
472 472 )
473 473 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
474 474
475 475 def changegroupsubset(self, bases, heads, source):
476 476 outgoing = discovery.outgoing(
477 477 self._repo, missingroots=bases, ancestorsof=heads
478 478 )
479 479 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480 480
481 481 # End of baselegacywirecommands interface.
482 482
483 483
484 484 # Functions receiving (ui, features) that extensions can register to impact
485 485 # the ability to load repositories with custom requirements. Only
486 486 # functions defined in loaded extensions are called.
487 487 #
488 488 # The function receives a set of requirement strings that the repository
489 489 # is capable of opening. Functions will typically add elements to the
490 490 # set to reflect that the extension knows how to handle that requirements.
491 491 featuresetupfuncs = set()
492 492
493 493
494 494 def _getsharedvfs(hgvfs, requirements):
495 495 """returns the vfs object pointing to root of shared source
496 496 repo for a shared repository
497 497
498 498 hgvfs is vfs pointing at .hg/ of current repo (shared one)
499 499 requirements is a set of requirements of current repo (shared one)
500 500 """
501 501 # The ``shared`` or ``relshared`` requirements indicate the
502 502 # store lives in the path contained in the ``.hg/sharedpath`` file.
503 503 # This is an absolute path for ``shared`` and relative to
504 504 # ``.hg/`` for ``relshared``.
505 505 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
506 506 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
507 507 sharedpath = util.normpath(hgvfs.join(sharedpath))
508 508
509 509 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
510 510
511 511 if not sharedvfs.exists():
512 512 raise error.RepoError(
513 513 _(b'.hg/sharedpath points to nonexistent directory %s')
514 514 % sharedvfs.base
515 515 )
516 516 return sharedvfs
517 517
518 518
519 519 def _readrequires(vfs, allowmissing):
520 520 """reads the require file present at root of this vfs
521 521 and return a set of requirements
522 522
523 523 If allowmissing is True, we suppress FileNotFoundError if raised"""
524 524 # requires file contains a newline-delimited list of
525 525 # features/capabilities the opener (us) must have in order to use
526 526 # the repository. This file was introduced in Mercurial 0.9.2,
527 527 # which means very old repositories may not have one. We assume
528 528 # a missing file translates to no requirements.
529 529 read = vfs.tryread if allowmissing else vfs.read
530 530 return set(read(b'requires').splitlines())
531 531
532 532
533 533 def makelocalrepository(baseui, path: bytes, intents=None):
534 534 """Create a local repository object.
535 535
536 536 Given arguments needed to construct a local repository, this function
537 537 performs various early repository loading functionality (such as
538 538 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
539 539 the repository can be opened, derives a type suitable for representing
540 540 that repository, and returns an instance of it.
541 541
542 542 The returned object conforms to the ``repository.completelocalrepository``
543 543 interface.
544 544
545 545 The repository type is derived by calling a series of factory functions
546 546 for each aspect/interface of the final repository. These are defined by
547 547 ``REPO_INTERFACES``.
548 548
549 549 Each factory function is called to produce a type implementing a specific
550 550 interface. The cumulative list of returned types will be combined into a
551 551 new type and that type will be instantiated to represent the local
552 552 repository.
553 553
554 554 The factory functions each receive various state that may be consulted
555 555 as part of deriving a type.
556 556
557 557 Extensions should wrap these factory functions to customize repository type
558 558 creation. Note that an extension's wrapped function may be called even if
559 559 that extension is not loaded for the repo being constructed. Extensions
560 560 should check if their ``__name__`` appears in the
561 561 ``extensionmodulenames`` set passed to the factory function and no-op if
562 562 not.
563 563 """
564 564 ui = baseui.copy()
565 565 # Prevent copying repo configuration.
566 566 ui.copy = baseui.copy
567 567
568 568 # Working directory VFS rooted at repository root.
569 569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
570 570
571 571 # Main VFS for .hg/ directory.
572 572 hgpath = wdirvfs.join(b'.hg')
573 573 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
574 574 # Whether this repository is shared one or not
575 575 shared = False
576 576 # If this repository is shared, vfs pointing to shared repo
577 577 sharedvfs = None
578 578
579 579 # The .hg/ path should exist and should be a directory. All other
580 580 # cases are errors.
581 581 if not hgvfs.isdir():
582 582 try:
583 583 hgvfs.stat()
584 584 except FileNotFoundError:
585 585 pass
586 586 except ValueError as e:
587 587 # Can be raised on Python 3.8 when path is invalid.
588 588 raise error.Abort(
589 589 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
590 590 )
591 591
592 592 raise error.RepoError(_(b'repository %s not found') % path)
593 593
594 594 requirements = _readrequires(hgvfs, True)
595 595 shared = (
596 596 requirementsmod.SHARED_REQUIREMENT in requirements
597 597 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
598 598 )
599 599 storevfs = None
600 600 if shared:
601 601 # This is a shared repo
602 602 sharedvfs = _getsharedvfs(hgvfs, requirements)
603 603 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
604 604 else:
605 605 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
606 606
607 607 # if .hg/requires contains the sharesafe requirement, it means
608 608 # there exists a `.hg/store/requires` too and we should read it
609 609 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
610 610 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
611 611 # is not present, refer checkrequirementscompat() for that
612 612 #
613 613 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
614 614 # repository was shared the old way. We check the share source .hg/requires
615 615 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
616 616 # to be reshared
617 617 hint = _(b"see `hg help config.format.use-share-safe` for more information")
618 618 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
619 619 if (
620 620 shared
621 621 and requirementsmod.SHARESAFE_REQUIREMENT
622 622 not in _readrequires(sharedvfs, True)
623 623 ):
624 624 mismatch_warn = ui.configbool(
625 625 b'share', b'safe-mismatch.source-not-safe.warn'
626 626 )
627 627 mismatch_config = ui.config(
628 628 b'share', b'safe-mismatch.source-not-safe'
629 629 )
630 630 mismatch_verbose_upgrade = ui.configbool(
631 631 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
632 632 )
633 633 if mismatch_config in (
634 634 b'downgrade-allow',
635 635 b'allow',
636 636 b'downgrade-abort',
637 637 ):
638 638 # prevent cyclic import localrepo -> upgrade -> localrepo
639 639 from . import upgrade
640 640
641 641 upgrade.downgrade_share_to_non_safe(
642 642 ui,
643 643 hgvfs,
644 644 sharedvfs,
645 645 requirements,
646 646 mismatch_config,
647 647 mismatch_warn,
648 648 mismatch_verbose_upgrade,
649 649 )
650 650 elif mismatch_config == b'abort':
651 651 raise error.Abort(
652 652 _(b"share source does not support share-safe requirement"),
653 653 hint=hint,
654 654 )
655 655 else:
656 656 raise error.Abort(
657 657 _(
658 658 b"share-safe mismatch with source.\nUnrecognized"
659 659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
660 660 b" set."
661 661 )
662 662 % mismatch_config,
663 663 hint=hint,
664 664 )
665 665 else:
666 666 requirements |= _readrequires(storevfs, False)
667 667 elif shared:
668 668 sourcerequires = _readrequires(sharedvfs, False)
669 669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
670 670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
671 671 mismatch_warn = ui.configbool(
672 672 b'share', b'safe-mismatch.source-safe.warn'
673 673 )
674 674 mismatch_verbose_upgrade = ui.configbool(
675 675 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
676 676 )
677 677 if mismatch_config in (
678 678 b'upgrade-allow',
679 679 b'allow',
680 680 b'upgrade-abort',
681 681 ):
682 682 # prevent cyclic import localrepo -> upgrade -> localrepo
683 683 from . import upgrade
684 684
685 685 upgrade.upgrade_share_to_safe(
686 686 ui,
687 687 hgvfs,
688 688 storevfs,
689 689 requirements,
690 690 mismatch_config,
691 691 mismatch_warn,
692 692 mismatch_verbose_upgrade,
693 693 )
694 694 elif mismatch_config == b'abort':
695 695 raise error.Abort(
696 696 _(
697 697 b'version mismatch: source uses share-safe'
698 698 b' functionality while the current share does not'
699 699 ),
700 700 hint=hint,
701 701 )
702 702 else:
703 703 raise error.Abort(
704 704 _(
705 705 b"share-safe mismatch with source.\nUnrecognized"
706 706 b" value '%s' of `share.safe-mismatch.source-safe` set."
707 707 )
708 708 % mismatch_config,
709 709 hint=hint,
710 710 )
711 711
712 712 # The .hg/hgrc file may load extensions or contain config options
713 713 # that influence repository construction. Attempt to load it and
714 714 # process any new extensions that it may have pulled in.
715 715 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
716 716 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
717 717 extensions.loadall(ui)
718 718 extensions.populateui(ui)
719 719
720 720 # Set of module names of extensions loaded for this repository.
721 721 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
722 722
723 723 supportedrequirements = gathersupportedrequirements(ui)
724 724
725 725 # We first validate the requirements are known.
726 726 ensurerequirementsrecognized(requirements, supportedrequirements)
727 727
728 728 # Then we validate that the known set is reasonable to use together.
729 729 ensurerequirementscompatible(ui, requirements)
730 730
731 731 # TODO there are unhandled edge cases related to opening repositories with
732 732 # shared storage. If storage is shared, we should also test for requirements
733 733 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
734 734 # that repo, as that repo may load extensions needed to open it. This is a
735 735 # bit complicated because we don't want the other hgrc to overwrite settings
736 736 # in this hgrc.
737 737 #
738 738 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
739 739 # file when sharing repos. But if a requirement is added after the share is
740 740 # performed, thereby introducing a new requirement for the opener, we may
741 741 # will not see that and could encounter a run-time error interacting with
742 742 # that shared store since it has an unknown-to-us requirement.
743 743
744 744 # At this point, we know we should be capable of opening the repository.
745 745 # Now get on with doing that.
746 746
747 747 features = set()
748 748
749 749 # The "store" part of the repository holds versioned data. How it is
750 750 # accessed is determined by various requirements. If `shared` or
751 751 # `relshared` requirements are present, this indicates current repository
752 752 # is a share and store exists in path mentioned in `.hg/sharedpath`
753 753 if shared:
754 754 storebasepath = sharedvfs.base
755 755 cachepath = sharedvfs.join(b'cache')
756 756 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
757 757 else:
758 758 storebasepath = hgvfs.base
759 759 cachepath = hgvfs.join(b'cache')
760 760 wcachepath = hgvfs.join(b'wcache')
761 761
762 762 # The store has changed over time and the exact layout is dictated by
763 763 # requirements. The store interface abstracts differences across all
764 764 # of them.
765 765 store = makestore(
766 766 requirements,
767 767 storebasepath,
768 768 lambda base: vfsmod.vfs(base, cacheaudited=True),
769 769 )
770 770 hgvfs.createmode = store.createmode
771 771
772 772 storevfs = store.vfs
773 773 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
774 774
775 775 if (
776 776 requirementsmod.REVLOGV2_REQUIREMENT in requirements
777 777 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
778 778 ):
779 779 features.add(repository.REPO_FEATURE_SIDE_DATA)
780 780 # the revlogv2 docket introduced race condition that we need to fix
781 781 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
782 782
783 783 # The cache vfs is used to manage cache files.
784 784 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
785 785 cachevfs.createmode = store.createmode
786 786 # The cache vfs is used to manage cache files related to the working copy
787 787 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
788 788 wcachevfs.createmode = store.createmode
789 789
790 790 # Now resolve the type for the repository object. We do this by repeatedly
791 791 # calling a factory function to produces types for specific aspects of the
792 792 # repo's operation. The aggregate returned types are used as base classes
793 793 # for a dynamically-derived type, which will represent our new repository.
794 794
795 795 bases = []
796 796 extrastate = {}
797 797
798 798 for iface, fn in REPO_INTERFACES:
799 799 # We pass all potentially useful state to give extensions tons of
800 800 # flexibility.
801 801 typ = fn()(
802 802 ui=ui,
803 803 intents=intents,
804 804 requirements=requirements,
805 805 features=features,
806 806 wdirvfs=wdirvfs,
807 807 hgvfs=hgvfs,
808 808 store=store,
809 809 storevfs=storevfs,
810 810 storeoptions=storevfs.options,
811 811 cachevfs=cachevfs,
812 812 wcachevfs=wcachevfs,
813 813 extensionmodulenames=extensionmodulenames,
814 814 extrastate=extrastate,
815 815 baseclasses=bases,
816 816 )
817 817
818 818 if not isinstance(typ, type):
819 819 raise error.ProgrammingError(
820 820 b'unable to construct type for %s' % iface
821 821 )
822 822
823 823 bases.append(typ)
824 824
825 825 # type() allows you to use characters in type names that wouldn't be
826 826 # recognized as Python symbols in source code. We abuse that to add
827 827 # rich information about our constructed repo.
828 828 name = pycompat.sysstr(
829 829 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
830 830 )
831 831
832 832 cls = type(name, tuple(bases), {})
833 833
834 834 return cls(
835 835 baseui=baseui,
836 836 ui=ui,
837 837 origroot=path,
838 838 wdirvfs=wdirvfs,
839 839 hgvfs=hgvfs,
840 840 requirements=requirements,
841 841 supportedrequirements=supportedrequirements,
842 842 sharedpath=storebasepath,
843 843 store=store,
844 844 cachevfs=cachevfs,
845 845 wcachevfs=wcachevfs,
846 846 features=features,
847 847 intents=intents,
848 848 )
849 849
850 850
851 851 def loadhgrc(
852 852 ui,
853 853 wdirvfs: vfsmod.vfs,
854 854 hgvfs: vfsmod.vfs,
855 855 requirements,
856 856 sharedvfs: Optional[vfsmod.vfs] = None,
857 857 ):
858 858 """Load hgrc files/content into a ui instance.
859 859
860 860 This is called during repository opening to load any additional
861 861 config files or settings relevant to the current repository.
862 862
863 863 Returns a bool indicating whether any additional configs were loaded.
864 864
865 865 Extensions should monkeypatch this function to modify how per-repo
866 866 configs are loaded. For example, an extension may wish to pull in
867 867 configs from alternate files or sources.
868 868
869 869 sharedvfs is vfs object pointing to source repo if the current one is a
870 870 shared one
871 871 """
872 872 if not rcutil.use_repo_hgrc():
873 873 return False
874 874
875 875 ret = False
876 876 # first load config from shared source if we has to
877 877 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
878 878 try:
879 879 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
880 880 ret = True
881 881 except IOError:
882 882 pass
883 883
884 884 try:
885 885 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
886 886 ret = True
887 887 except IOError:
888 888 pass
889 889
890 890 try:
891 891 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
892 892 ret = True
893 893 except IOError:
894 894 pass
895 895
896 896 return ret
897 897
898 898
899 899 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
900 900 """Perform additional actions after .hg/hgrc is loaded.
901 901
902 902 This function is called during repository loading immediately after
903 903 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
904 904
905 905 The function can be used to validate configs, automatically add
906 906 options (including extensions) based on requirements, etc.
907 907 """
908 908
909 909 # Map of requirements to list of extensions to load automatically when
910 910 # requirement is present.
911 911 autoextensions = {
912 912 b'git': [b'git'],
913 913 b'largefiles': [b'largefiles'],
914 914 b'lfs': [b'lfs'],
915 915 }
916 916
917 917 for requirement, names in sorted(autoextensions.items()):
918 918 if requirement not in requirements:
919 919 continue
920 920
921 921 for name in names:
922 922 if not ui.hasconfig(b'extensions', name):
923 923 ui.setconfig(b'extensions', name, b'', source=b'autoload')
924 924
925 925
926 926 def gathersupportedrequirements(ui):
927 927 """Determine the complete set of recognized requirements."""
928 928 # Start with all requirements supported by this file.
929 929 supported = set(localrepository._basesupported)
930 930
931 931 # Execute ``featuresetupfuncs`` entries if they belong to an extension
932 932 # relevant to this ui instance.
933 933 modules = {m.__name__ for n, m in extensions.extensions(ui)}
934 934
935 935 for fn in featuresetupfuncs:
936 936 if fn.__module__ in modules:
937 937 fn(ui, supported)
938 938
939 939 # Add derived requirements from registered compression engines.
940 940 for name in util.compengines:
941 941 engine = util.compengines[name]
942 942 if engine.available() and engine.revlogheader():
943 943 supported.add(b'exp-compression-%s' % name)
944 944 if engine.name() == b'zstd':
945 945 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
946 946
947 947 return supported
948 948
949 949
950 950 def ensurerequirementsrecognized(requirements, supported):
951 951 """Validate that a set of local requirements is recognized.
952 952
953 953 Receives a set of requirements. Raises an ``error.RepoError`` if there
954 954 exists any requirement in that set that currently loaded code doesn't
955 955 recognize.
956 956
957 957 Returns a set of supported requirements.
958 958 """
959 959 missing = set()
960 960
961 961 for requirement in requirements:
962 962 if requirement in supported:
963 963 continue
964 964
965 965 if not requirement or not requirement[0:1].isalnum():
966 966 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
967 967
968 968 missing.add(requirement)
969 969
970 970 if missing:
971 971 raise error.RequirementError(
972 972 _(b'repository requires features unknown to this Mercurial: %s')
973 973 % b' '.join(sorted(missing)),
974 974 hint=_(
975 975 b'see https://mercurial-scm.org/wiki/MissingRequirement '
976 976 b'for more information'
977 977 ),
978 978 )
979 979
980 980
981 981 def ensurerequirementscompatible(ui, requirements):
982 982 """Validates that a set of recognized requirements is mutually compatible.
983 983
984 984 Some requirements may not be compatible with others or require
985 985 config options that aren't enabled. This function is called during
986 986 repository opening to ensure that the set of requirements needed
987 987 to open a repository is sane and compatible with config options.
988 988
989 989 Extensions can monkeypatch this function to perform additional
990 990 checking.
991 991
992 992 ``error.RepoError`` should be raised on failure.
993 993 """
994 994 if (
995 995 requirementsmod.SPARSE_REQUIREMENT in requirements
996 996 and not sparse.enabled
997 997 ):
998 998 raise error.RepoError(
999 999 _(
1000 1000 b'repository is using sparse feature but '
1001 1001 b'sparse is not enabled; enable the '
1002 1002 b'"sparse" extensions to access'
1003 1003 )
1004 1004 )
1005 1005
1006 1006
1007 1007 def makestore(requirements, path, vfstype):
1008 1008 """Construct a storage object for a repository."""
1009 1009 if requirementsmod.STORE_REQUIREMENT in requirements:
1010 1010 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1011 1011 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1012 1012 return storemod.fncachestore(path, vfstype, dotencode)
1013 1013
1014 1014 return storemod.encodedstore(path, vfstype)
1015 1015
1016 1016 return storemod.basicstore(path, vfstype)
1017 1017
1018 1018
1019 1019 def resolvestorevfsoptions(ui, requirements, features):
1020 1020 """Resolve the options to pass to the store vfs opener.
1021 1021
1022 1022 The returned dict is used to influence behavior of the storage layer.
1023 1023 """
1024 1024 options = {}
1025 1025
1026 1026 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1027 1027 options[b'treemanifest'] = True
1028 1028
1029 1029 # experimental config: format.manifestcachesize
1030 1030 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1031 1031 if manifestcachesize is not None:
1032 1032 options[b'manifestcachesize'] = manifestcachesize
1033 1033
1034 1034 # In the absence of another requirement superseding a revlog-related
1035 1035 # requirement, we have to assume the repo is using revlog version 0.
1036 1036 # This revlog format is super old and we don't bother trying to parse
1037 1037 # opener options for it because those options wouldn't do anything
1038 1038 # meaningful on such old repos.
1039 1039 if (
1040 1040 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1041 1041 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1042 1042 ):
1043 1043 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1044 1044 else: # explicitly mark repo as using revlogv0
1045 1045 options[b'revlogv0'] = True
1046 1046
1047 1047 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1048 1048 options[b'copies-storage'] = b'changeset-sidedata'
1049 1049 else:
1050 1050 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1051 1051 copiesextramode = (b'changeset-only', b'compatibility')
1052 1052 if writecopiesto in copiesextramode:
1053 1053 options[b'copies-storage'] = b'extra'
1054 1054
1055 1055 return options
1056 1056
1057 1057
1058 1058 def resolverevlogstorevfsoptions(ui, requirements, features):
1059 1059 """Resolve opener options specific to revlogs."""
1060 1060
1061 1061 options = {}
1062 1062 options[b'flagprocessors'] = {}
1063 1063
1064 1064 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1065 1065 options[b'revlogv1'] = True
1066 1066 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1067 1067 options[b'revlogv2'] = True
1068 1068 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1069 1069 options[b'changelogv2'] = True
1070 1070 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1071 1071 options[b'changelogv2.compute-rank'] = cmp_rank
1072 1072
1073 1073 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1074 1074 options[b'generaldelta'] = True
1075 1075
1076 1076 # experimental config: format.chunkcachesize
1077 1077 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1078 1078 if chunkcachesize is not None:
1079 1079 options[b'chunkcachesize'] = chunkcachesize
1080 1080
1081 1081 deltabothparents = ui.configbool(
1082 1082 b'storage', b'revlog.optimize-delta-parent-choice'
1083 1083 )
1084 1084 options[b'deltabothparents'] = deltabothparents
1085 1085 dps_cgds = ui.configint(
1086 1086 b'storage',
1087 1087 b'revlog.delta-parent-search.candidate-group-chunk-size',
1088 1088 )
1089 1089 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1090 1090 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1091 1091
1092 1092 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1093 1093 options[b'issue6528.fix-incoming'] = issue6528
1094 1094
1095 1095 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1096 1096 lazydeltabase = False
1097 1097 if lazydelta:
1098 1098 lazydeltabase = ui.configbool(
1099 1099 b'storage', b'revlog.reuse-external-delta-parent'
1100 1100 )
1101 1101 if lazydeltabase is None:
1102 1102 lazydeltabase = not scmutil.gddeltaconfig(ui)
1103 1103 options[b'lazydelta'] = lazydelta
1104 1104 options[b'lazydeltabase'] = lazydeltabase
1105 1105
1106 1106 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1107 1107 if 0 <= chainspan:
1108 1108 options[b'maxdeltachainspan'] = chainspan
1109 1109
1110 1110 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1111 1111 if mmapindexthreshold is not None:
1112 1112 options[b'mmapindexthreshold'] = mmapindexthreshold
1113 1113
1114 1114 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1115 1115 srdensitythres = float(
1116 1116 ui.config(b'experimental', b'sparse-read.density-threshold')
1117 1117 )
1118 1118 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1119 1119 options[b'with-sparse-read'] = withsparseread
1120 1120 options[b'sparse-read-density-threshold'] = srdensitythres
1121 1121 options[b'sparse-read-min-gap-size'] = srmingapsize
1122 1122
1123 1123 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1124 1124 options[b'sparse-revlog'] = sparserevlog
1125 1125 if sparserevlog:
1126 1126 options[b'generaldelta'] = True
1127 1127
1128 1128 maxchainlen = None
1129 1129 if sparserevlog:
1130 1130 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1131 1131 # experimental config: format.maxchainlen
1132 1132 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1133 1133 if maxchainlen is not None:
1134 1134 options[b'maxchainlen'] = maxchainlen
1135 1135
1136 1136 for r in requirements:
1137 1137 # we allow multiple compression engine requirement to co-exist because
1138 1138 # strickly speaking, revlog seems to support mixed compression style.
1139 1139 #
1140 1140 # The compression used for new entries will be "the last one"
1141 1141 prefix = r.startswith
1142 1142 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1143 1143 options[b'compengine'] = r.split(b'-', 2)[2]
1144 1144
1145 1145 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1146 1146 if options[b'zlib.level'] is not None:
1147 1147 if not (0 <= options[b'zlib.level'] <= 9):
1148 1148 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1149 1149 raise error.Abort(msg % options[b'zlib.level'])
1150 1150 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1151 1151 if options[b'zstd.level'] is not None:
1152 1152 if not (0 <= options[b'zstd.level'] <= 22):
1153 1153 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1154 1154 raise error.Abort(msg % options[b'zstd.level'])
1155 1155
1156 1156 if requirementsmod.NARROW_REQUIREMENT in requirements:
1157 1157 options[b'enableellipsis'] = True
1158 1158
1159 1159 if ui.configbool(b'experimental', b'rust.index'):
1160 1160 options[b'rust.index'] = True
1161 1161 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1162 1162 slow_path = ui.config(
1163 1163 b'storage', b'revlog.persistent-nodemap.slow-path'
1164 1164 )
1165 1165 if slow_path not in (b'allow', b'warn', b'abort'):
1166 1166 default = ui.config_default(
1167 1167 b'storage', b'revlog.persistent-nodemap.slow-path'
1168 1168 )
1169 1169 msg = _(
1170 1170 b'unknown value for config '
1171 1171 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1172 1172 )
1173 1173 ui.warn(msg % slow_path)
1174 1174 if not ui.quiet:
1175 1175 ui.warn(_(b'falling back to default value: %s\n') % default)
1176 1176 slow_path = default
1177 1177
1178 1178 msg = _(
1179 1179 b"accessing `persistent-nodemap` repository without associated "
1180 1180 b"fast implementation."
1181 1181 )
1182 1182 hint = _(
1183 1183 b"check `hg help config.format.use-persistent-nodemap` "
1184 1184 b"for details"
1185 1185 )
1186 1186 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1187 1187 if slow_path == b'warn':
1188 1188 msg = b"warning: " + msg + b'\n'
1189 1189 ui.warn(msg)
1190 1190 if not ui.quiet:
1191 1191 hint = b'(' + hint + b')\n'
1192 1192 ui.warn(hint)
1193 1193 if slow_path == b'abort':
1194 1194 raise error.Abort(msg, hint=hint)
1195 1195 options[b'persistent-nodemap'] = True
1196 1196 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1197 1197 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1198 1198 if slow_path not in (b'allow', b'warn', b'abort'):
1199 1199 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1200 1200 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1201 1201 ui.warn(msg % slow_path)
1202 1202 if not ui.quiet:
1203 1203 ui.warn(_(b'falling back to default value: %s\n') % default)
1204 1204 slow_path = default
1205 1205
1206 1206 msg = _(
1207 1207 b"accessing `dirstate-v2` repository without associated "
1208 1208 b"fast implementation."
1209 1209 )
1210 1210 hint = _(
1211 1211 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1212 1212 )
1213 1213 if not dirstate.HAS_FAST_DIRSTATE_V2:
1214 1214 if slow_path == b'warn':
1215 1215 msg = b"warning: " + msg + b'\n'
1216 1216 ui.warn(msg)
1217 1217 if not ui.quiet:
1218 1218 hint = b'(' + hint + b')\n'
1219 1219 ui.warn(hint)
1220 1220 if slow_path == b'abort':
1221 1221 raise error.Abort(msg, hint=hint)
1222 1222 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1223 1223 options[b'persistent-nodemap.mmap'] = True
1224 1224 if ui.configbool(b'devel', b'persistent-nodemap'):
1225 1225 options[b'devel-force-nodemap'] = True
1226 1226
1227 1227 return options
1228 1228
1229 1229
1230 1230 def makemain(**kwargs):
1231 1231 """Produce a type conforming to ``ilocalrepositorymain``."""
1232 1232 return localrepository
1233 1233
1234 1234
1235 1235 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1236 1236 class revlogfilestorage:
1237 1237 """File storage when using revlogs."""
1238 1238
1239 1239 def file(self, path):
1240 1240 if path.startswith(b'/'):
1241 1241 path = path[1:]
1242 1242
1243 1243 return filelog.filelog(self.svfs, path)
1244 1244
1245 1245
1246 1246 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1247 1247 class revlognarrowfilestorage:
1248 1248 """File storage when using revlogs and narrow files."""
1249 1249
1250 1250 def file(self, path):
1251 1251 if path.startswith(b'/'):
1252 1252 path = path[1:]
1253 1253
1254 1254 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1255 1255
1256 1256
1257 1257 def makefilestorage(requirements, features, **kwargs):
1258 1258 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1259 1259 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1260 1260 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1261 1261
1262 1262 if requirementsmod.NARROW_REQUIREMENT in requirements:
1263 1263 return revlognarrowfilestorage
1264 1264 else:
1265 1265 return revlogfilestorage
1266 1266
1267 1267
1268 1268 # List of repository interfaces and factory functions for them. Each
1269 1269 # will be called in order during ``makelocalrepository()`` to iteratively
1270 1270 # derive the final type for a local repository instance. We capture the
1271 1271 # function as a lambda so we don't hold a reference and the module-level
1272 1272 # functions can be wrapped.
1273 1273 REPO_INTERFACES = [
1274 1274 (repository.ilocalrepositorymain, lambda: makemain),
1275 1275 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1276 1276 ]
1277 1277
1278 1278
1279 1279 @interfaceutil.implementer(repository.ilocalrepositorymain)
1280 1280 class localrepository:
1281 1281 """Main class for representing local repositories.
1282 1282
1283 1283 All local repositories are instances of this class.
1284 1284
1285 1285 Constructed on its own, instances of this class are not usable as
1286 1286 repository objects. To obtain a usable repository object, call
1287 1287 ``hg.repository()``, ``localrepo.instance()``, or
1288 1288 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1289 1289 ``instance()`` adds support for creating new repositories.
1290 1290 ``hg.repository()`` adds more extension integration, including calling
1291 1291 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1292 1292 used.
1293 1293 """
1294 1294
1295 1295 _basesupported = {
1296 1296 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1297 1297 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1298 1298 requirementsmod.CHANGELOGV2_REQUIREMENT,
1299 1299 requirementsmod.COPIESSDC_REQUIREMENT,
1300 1300 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1301 1301 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1302 1302 requirementsmod.DOTENCODE_REQUIREMENT,
1303 1303 requirementsmod.FNCACHE_REQUIREMENT,
1304 1304 requirementsmod.GENERALDELTA_REQUIREMENT,
1305 1305 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1306 1306 requirementsmod.NODEMAP_REQUIREMENT,
1307 1307 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1308 1308 requirementsmod.REVLOGV1_REQUIREMENT,
1309 1309 requirementsmod.REVLOGV2_REQUIREMENT,
1310 1310 requirementsmod.SHARED_REQUIREMENT,
1311 1311 requirementsmod.SHARESAFE_REQUIREMENT,
1312 1312 requirementsmod.SPARSE_REQUIREMENT,
1313 1313 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1314 1314 requirementsmod.STORE_REQUIREMENT,
1315 1315 requirementsmod.TREEMANIFEST_REQUIREMENT,
1316 1316 }
1317 1317
1318 1318 # list of prefix for file which can be written without 'wlock'
1319 1319 # Extensions should extend this list when needed
1320 1320 _wlockfreeprefix = {
1321 1321 # We migh consider requiring 'wlock' for the next
1322 1322 # two, but pretty much all the existing code assume
1323 1323 # wlock is not needed so we keep them excluded for
1324 1324 # now.
1325 1325 b'hgrc',
1326 1326 b'requires',
1327 1327 # XXX cache is a complicatged business someone
1328 1328 # should investigate this in depth at some point
1329 1329 b'cache/',
1330 1330 # XXX bisect was still a bit too messy at the time
1331 1331 # this changeset was introduced. Someone should fix
1332 1332 # the remainig bit and drop this line
1333 1333 b'bisect.state',
1334 1334 }
1335 1335
1336 1336 def __init__(
1337 1337 self,
1338 1338 baseui,
1339 1339 ui,
1340 1340 origroot: bytes,
1341 1341 wdirvfs: vfsmod.vfs,
1342 1342 hgvfs: vfsmod.vfs,
1343 1343 requirements,
1344 1344 supportedrequirements,
1345 1345 sharedpath: bytes,
1346 1346 store,
1347 1347 cachevfs: vfsmod.vfs,
1348 1348 wcachevfs: vfsmod.vfs,
1349 1349 features,
1350 1350 intents=None,
1351 1351 ):
1352 1352 """Create a new local repository instance.
1353 1353
1354 1354 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1355 1355 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1356 1356 object.
1357 1357
1358 1358 Arguments:
1359 1359
1360 1360 baseui
1361 1361 ``ui.ui`` instance that ``ui`` argument was based off of.
1362 1362
1363 1363 ui
1364 1364 ``ui.ui`` instance for use by the repository.
1365 1365
1366 1366 origroot
1367 1367 ``bytes`` path to working directory root of this repository.
1368 1368
1369 1369 wdirvfs
1370 1370 ``vfs.vfs`` rooted at the working directory.
1371 1371
1372 1372 hgvfs
1373 1373 ``vfs.vfs`` rooted at .hg/
1374 1374
1375 1375 requirements
1376 1376 ``set`` of bytestrings representing repository opening requirements.
1377 1377
1378 1378 supportedrequirements
1379 1379 ``set`` of bytestrings representing repository requirements that we
1380 1380 know how to open. May be a supetset of ``requirements``.
1381 1381
1382 1382 sharedpath
1383 1383 ``bytes`` Defining path to storage base directory. Points to a
1384 1384 ``.hg/`` directory somewhere.
1385 1385
1386 1386 store
1387 1387 ``store.basicstore`` (or derived) instance providing access to
1388 1388 versioned storage.
1389 1389
1390 1390 cachevfs
1391 1391 ``vfs.vfs`` used for cache files.
1392 1392
1393 1393 wcachevfs
1394 1394 ``vfs.vfs`` used for cache files related to the working copy.
1395 1395
1396 1396 features
1397 1397 ``set`` of bytestrings defining features/capabilities of this
1398 1398 instance.
1399 1399
1400 1400 intents
1401 1401 ``set`` of system strings indicating what this repo will be used
1402 1402 for.
1403 1403 """
1404 1404 self.baseui = baseui
1405 1405 self.ui = ui
1406 1406 self.origroot = origroot
1407 1407 # vfs rooted at working directory.
1408 1408 self.wvfs = wdirvfs
1409 1409 self.root = wdirvfs.base
1410 1410 # vfs rooted at .hg/. Used to access most non-store paths.
1411 1411 self.vfs = hgvfs
1412 1412 self.path = hgvfs.base
1413 1413 self.requirements = requirements
1414 1414 self.nodeconstants = sha1nodeconstants
1415 1415 self.nullid = self.nodeconstants.nullid
1416 1416 self.supported = supportedrequirements
1417 1417 self.sharedpath = sharedpath
1418 1418 self.store = store
1419 1419 self.cachevfs = cachevfs
1420 1420 self.wcachevfs = wcachevfs
1421 1421 self.features = features
1422 1422
1423 1423 self.filtername = None
1424 1424
1425 1425 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1426 1426 b'devel', b'check-locks'
1427 1427 ):
1428 1428 self.vfs.audit = self._getvfsward(self.vfs.audit)
1429 1429 # A list of callback to shape the phase if no data were found.
1430 1430 # Callback are in the form: func(repo, roots) --> processed root.
1431 1431 # This list it to be filled by extension during repo setup
1432 1432 self._phasedefaults = []
1433 1433
1434 1434 color.setup(self.ui)
1435 1435
1436 1436 self.spath = self.store.path
1437 1437 self.svfs = self.store.vfs
1438 1438 self.sjoin = self.store.join
1439 1439 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1440 1440 b'devel', b'check-locks'
1441 1441 ):
1442 1442 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1443 1443 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1444 1444 else: # standard vfs
1445 1445 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1446 1446
1447 1447 self._dirstatevalidatewarned = False
1448 1448
1449 1449 self._branchcaches = branchmap.BranchMapCache()
1450 1450 self._revbranchcache = None
1451 1451 self._filterpats = {}
1452 1452 self._datafilters = {}
1453 1453 self._transref = self._lockref = self._wlockref = None
1454 1454
1455 1455 # A cache for various files under .hg/ that tracks file changes,
1456 1456 # (used by the filecache decorator)
1457 1457 #
1458 1458 # Maps a property name to its util.filecacheentry
1459 1459 self._filecache = {}
1460 1460
1461 1461 # hold sets of revision to be filtered
1462 1462 # should be cleared when something might have changed the filter value:
1463 1463 # - new changesets,
1464 1464 # - phase change,
1465 1465 # - new obsolescence marker,
1466 1466 # - working directory parent change,
1467 1467 # - bookmark changes
1468 1468 self.filteredrevcache = {}
1469 1469
1470 1470 self._dirstate = None
1471 1471 # post-dirstate-status hooks
1472 1472 self._postdsstatus = []
1473 1473
1474 1474 self._pending_narrow_pats = None
1475 1475 self._pending_narrow_pats_dirstate = None
1476 1476
1477 1477 # generic mapping between names and nodes
1478 1478 self.names = namespaces.namespaces()
1479 1479
1480 1480 # Key to signature value.
1481 1481 self._sparsesignaturecache = {}
1482 1482 # Signature to cached matcher instance.
1483 1483 self._sparsematchercache = {}
1484 1484
1485 1485 self._extrafilterid = repoview.extrafilter(ui)
1486 1486
1487 1487 self.filecopiesmode = None
1488 1488 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1489 1489 self.filecopiesmode = b'changeset-sidedata'
1490 1490
1491 1491 self._wanted_sidedata = set()
1492 1492 self._sidedata_computers = {}
1493 1493 sidedatamod.set_sidedata_spec_for_repo(self)
1494 1494
1495 1495 def _getvfsward(self, origfunc):
1496 1496 """build a ward for self.vfs"""
1497 1497 rref = weakref.ref(self)
1498 1498
1499 1499 def checkvfs(path, mode=None):
1500 1500 ret = origfunc(path, mode=mode)
1501 1501 repo = rref()
1502 1502 if (
1503 1503 repo is None
1504 1504 or not util.safehasattr(repo, b'_wlockref')
1505 1505 or not util.safehasattr(repo, b'_lockref')
1506 1506 ):
1507 1507 return
1508 1508 if mode in (None, b'r', b'rb'):
1509 1509 return
1510 1510 if path.startswith(repo.path):
1511 1511 # truncate name relative to the repository (.hg)
1512 1512 path = path[len(repo.path) + 1 :]
1513 1513 if path.startswith(b'cache/'):
1514 1514 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1515 1515 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1516 1516 # path prefixes covered by 'lock'
1517 1517 vfs_path_prefixes = (
1518 1518 b'journal.',
1519 1519 b'undo.',
1520 1520 b'strip-backup/',
1521 1521 b'cache/',
1522 1522 )
1523 1523 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1524 1524 if repo._currentlock(repo._lockref) is None:
1525 1525 repo.ui.develwarn(
1526 1526 b'write with no lock: "%s"' % path,
1527 1527 stacklevel=3,
1528 1528 config=b'check-locks',
1529 1529 )
1530 1530 elif repo._currentlock(repo._wlockref) is None:
1531 1531 # rest of vfs files are covered by 'wlock'
1532 1532 #
1533 1533 # exclude special files
1534 1534 for prefix in self._wlockfreeprefix:
1535 1535 if path.startswith(prefix):
1536 1536 return
1537 1537 repo.ui.develwarn(
1538 1538 b'write with no wlock: "%s"' % path,
1539 1539 stacklevel=3,
1540 1540 config=b'check-locks',
1541 1541 )
1542 1542 return ret
1543 1543
1544 1544 return checkvfs
1545 1545
1546 1546 def _getsvfsward(self, origfunc):
1547 1547 """build a ward for self.svfs"""
1548 1548 rref = weakref.ref(self)
1549 1549
1550 1550 def checksvfs(path, mode=None):
1551 1551 ret = origfunc(path, mode=mode)
1552 1552 repo = rref()
1553 1553 if repo is None or not util.safehasattr(repo, b'_lockref'):
1554 1554 return
1555 1555 if mode in (None, b'r', b'rb'):
1556 1556 return
1557 1557 if path.startswith(repo.sharedpath):
1558 1558 # truncate name relative to the repository (.hg)
1559 1559 path = path[len(repo.sharedpath) + 1 :]
1560 1560 if repo._currentlock(repo._lockref) is None:
1561 1561 repo.ui.develwarn(
1562 1562 b'write with no lock: "%s"' % path, stacklevel=4
1563 1563 )
1564 1564 return ret
1565 1565
1566 1566 return checksvfs
1567 1567
1568 @property
1569 def vfs_map(self):
1570 return {
1571 b'': self.svfs,
1572 b'plain': self.vfs,
1573 b'store': self.svfs,
1574 }
1575
1568 1576 def close(self):
1569 1577 self._writecaches()
1570 1578
1571 1579 def _writecaches(self):
1572 1580 if self._revbranchcache:
1573 1581 self._revbranchcache.write()
1574 1582
1575 1583 def _restrictcapabilities(self, caps):
1576 1584 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1577 1585 caps = set(caps)
1578 1586 capsblob = bundle2.encodecaps(
1579 1587 bundle2.getrepocaps(self, role=b'client')
1580 1588 )
1581 1589 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1582 1590 if self.ui.configbool(b'experimental', b'narrow'):
1583 1591 caps.add(wireprototypes.NARROWCAP)
1584 1592 return caps
1585 1593
1586 1594 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1587 1595 # self -> auditor -> self._checknested -> self
1588 1596
1589 1597 @property
1590 1598 def auditor(self):
1591 1599 # This is only used by context.workingctx.match in order to
1592 1600 # detect files in subrepos.
1593 1601 return pathutil.pathauditor(self.root, callback=self._checknested)
1594 1602
1595 1603 @property
1596 1604 def nofsauditor(self):
1597 1605 # This is only used by context.basectx.match in order to detect
1598 1606 # files in subrepos.
1599 1607 return pathutil.pathauditor(
1600 1608 self.root, callback=self._checknested, realfs=False, cached=True
1601 1609 )
1602 1610
1603 1611 def _checknested(self, path):
1604 1612 """Determine if path is a legal nested repository."""
1605 1613 if not path.startswith(self.root):
1606 1614 return False
1607 1615 subpath = path[len(self.root) + 1 :]
1608 1616 normsubpath = util.pconvert(subpath)
1609 1617
1610 1618 # XXX: Checking against the current working copy is wrong in
1611 1619 # the sense that it can reject things like
1612 1620 #
1613 1621 # $ hg cat -r 10 sub/x.txt
1614 1622 #
1615 1623 # if sub/ is no longer a subrepository in the working copy
1616 1624 # parent revision.
1617 1625 #
1618 1626 # However, it can of course also allow things that would have
1619 1627 # been rejected before, such as the above cat command if sub/
1620 1628 # is a subrepository now, but was a normal directory before.
1621 1629 # The old path auditor would have rejected by mistake since it
1622 1630 # panics when it sees sub/.hg/.
1623 1631 #
1624 1632 # All in all, checking against the working copy seems sensible
1625 1633 # since we want to prevent access to nested repositories on
1626 1634 # the filesystem *now*.
1627 1635 ctx = self[None]
1628 1636 parts = util.splitpath(subpath)
1629 1637 while parts:
1630 1638 prefix = b'/'.join(parts)
1631 1639 if prefix in ctx.substate:
1632 1640 if prefix == normsubpath:
1633 1641 return True
1634 1642 else:
1635 1643 sub = ctx.sub(prefix)
1636 1644 return sub.checknested(subpath[len(prefix) + 1 :])
1637 1645 else:
1638 1646 parts.pop()
1639 1647 return False
1640 1648
1641 1649 def peer(self, path=None):
1642 1650 return localpeer(self, path=path) # not cached to avoid reference cycle
1643 1651
1644 1652 def unfiltered(self):
1645 1653 """Return unfiltered version of the repository
1646 1654
1647 1655 Intended to be overwritten by filtered repo."""
1648 1656 return self
1649 1657
1650 1658 def filtered(self, name, visibilityexceptions=None):
1651 1659 """Return a filtered version of a repository
1652 1660
1653 1661 The `name` parameter is the identifier of the requested view. This
1654 1662 will return a repoview object set "exactly" to the specified view.
1655 1663
1656 1664 This function does not apply recursive filtering to a repository. For
1657 1665 example calling `repo.filtered("served")` will return a repoview using
1658 1666 the "served" view, regardless of the initial view used by `repo`.
1659 1667
1660 1668 In other word, there is always only one level of `repoview` "filtering".
1661 1669 """
1662 1670 if self._extrafilterid is not None and b'%' not in name:
1663 1671 name = name + b'%' + self._extrafilterid
1664 1672
1665 1673 cls = repoview.newtype(self.unfiltered().__class__)
1666 1674 return cls(self, name, visibilityexceptions)
1667 1675
1668 1676 @mixedrepostorecache(
1669 1677 (b'bookmarks', b'plain'),
1670 1678 (b'bookmarks.current', b'plain'),
1671 1679 (b'bookmarks', b''),
1672 1680 (b'00changelog.i', b''),
1673 1681 )
1674 1682 def _bookmarks(self):
1675 1683 # Since the multiple files involved in the transaction cannot be
1676 1684 # written atomically (with current repository format), there is a race
1677 1685 # condition here.
1678 1686 #
1679 1687 # 1) changelog content A is read
1680 1688 # 2) outside transaction update changelog to content B
1681 1689 # 3) outside transaction update bookmark file referring to content B
1682 1690 # 4) bookmarks file content is read and filtered against changelog-A
1683 1691 #
1684 1692 # When this happens, bookmarks against nodes missing from A are dropped.
1685 1693 #
1686 1694 # Having this happening during read is not great, but it become worse
1687 1695 # when this happen during write because the bookmarks to the "unknown"
1688 1696 # nodes will be dropped for good. However, writes happen within locks.
1689 1697 # This locking makes it possible to have a race free consistent read.
1690 1698 # For this purpose data read from disc before locking are
1691 1699 # "invalidated" right after the locks are taken. This invalidations are
1692 1700 # "light", the `filecache` mechanism keep the data in memory and will
1693 1701 # reuse them if the underlying files did not changed. Not parsing the
1694 1702 # same data multiple times helps performances.
1695 1703 #
1696 1704 # Unfortunately in the case describe above, the files tracked by the
1697 1705 # bookmarks file cache might not have changed, but the in-memory
1698 1706 # content is still "wrong" because we used an older changelog content
1699 1707 # to process the on-disk data. So after locking, the changelog would be
1700 1708 # refreshed but `_bookmarks` would be preserved.
1701 1709 # Adding `00changelog.i` to the list of tracked file is not
1702 1710 # enough, because at the time we build the content for `_bookmarks` in
1703 1711 # (4), the changelog file has already diverged from the content used
1704 1712 # for loading `changelog` in (1)
1705 1713 #
1706 1714 # To prevent the issue, we force the changelog to be explicitly
1707 1715 # reloaded while computing `_bookmarks`. The data race can still happen
1708 1716 # without the lock (with a narrower window), but it would no longer go
1709 1717 # undetected during the lock time refresh.
1710 1718 #
1711 1719 # The new schedule is as follow
1712 1720 #
1713 1721 # 1) filecache logic detect that `_bookmarks` needs to be computed
1714 1722 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1715 1723 # 3) We force `changelog` filecache to be tested
1716 1724 # 4) cachestat for `changelog` are captured (for changelog)
1717 1725 # 5) `_bookmarks` is computed and cached
1718 1726 #
1719 1727 # The step in (3) ensure we have a changelog at least as recent as the
1720 1728 # cache stat computed in (1). As a result at locking time:
1721 1729 # * if the changelog did not changed since (1) -> we can reuse the data
1722 1730 # * otherwise -> the bookmarks get refreshed.
1723 1731 self._refreshchangelog()
1724 1732 return bookmarks.bmstore(self)
1725 1733
1726 1734 def _refreshchangelog(self):
1727 1735 """make sure the in memory changelog match the on-disk one"""
1728 1736 if 'changelog' in vars(self) and self.currenttransaction() is None:
1729 1737 del self.changelog
1730 1738
1731 1739 @property
1732 1740 def _activebookmark(self):
1733 1741 return self._bookmarks.active
1734 1742
1735 1743 # _phasesets depend on changelog. what we need is to call
1736 1744 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1737 1745 # can't be easily expressed in filecache mechanism.
1738 1746 @storecache(b'phaseroots', b'00changelog.i')
1739 1747 def _phasecache(self):
1740 1748 return phases.phasecache(self, self._phasedefaults)
1741 1749
1742 1750 @storecache(b'obsstore')
1743 1751 def obsstore(self):
1744 1752 return obsolete.makestore(self.ui, self)
1745 1753
1746 1754 @changelogcache()
1747 1755 def changelog(repo):
1748 1756 # load dirstate before changelog to avoid race see issue6303
1749 1757 repo.dirstate.prefetch_parents()
1750 1758 return repo.store.changelog(
1751 1759 txnutil.mayhavepending(repo.root),
1752 1760 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1753 1761 )
1754 1762
1755 1763 @manifestlogcache()
1756 1764 def manifestlog(self):
1757 1765 return self.store.manifestlog(self, self._storenarrowmatch)
1758 1766
1759 1767 @unfilteredpropertycache
1760 1768 def dirstate(self):
1761 1769 if self._dirstate is None:
1762 1770 self._dirstate = self._makedirstate()
1763 1771 else:
1764 1772 self._dirstate.refresh()
1765 1773 return self._dirstate
1766 1774
1767 1775 def _makedirstate(self):
1768 1776 """Extension point for wrapping the dirstate per-repo."""
1769 1777 sparsematchfn = None
1770 1778 if sparse.use_sparse(self):
1771 1779 sparsematchfn = lambda: sparse.matcher(self)
1772 1780 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1773 1781 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1774 1782 use_dirstate_v2 = v2_req in self.requirements
1775 1783 use_tracked_hint = th in self.requirements
1776 1784
1777 1785 return dirstate.dirstate(
1778 1786 self.vfs,
1779 1787 self.ui,
1780 1788 self.root,
1781 1789 self._dirstatevalidate,
1782 1790 sparsematchfn,
1783 1791 self.nodeconstants,
1784 1792 use_dirstate_v2,
1785 1793 use_tracked_hint=use_tracked_hint,
1786 1794 )
1787 1795
1788 1796 def _dirstatevalidate(self, node):
1789 1797 try:
1790 1798 self.changelog.rev(node)
1791 1799 return node
1792 1800 except error.LookupError:
1793 1801 if not self._dirstatevalidatewarned:
1794 1802 self._dirstatevalidatewarned = True
1795 1803 self.ui.warn(
1796 1804 _(b"warning: ignoring unknown working parent %s!\n")
1797 1805 % short(node)
1798 1806 )
1799 1807 return self.nullid
1800 1808
1801 1809 @storecache(narrowspec.FILENAME)
1802 1810 def narrowpats(self):
1803 1811 """matcher patterns for this repository's narrowspec
1804 1812
1805 1813 A tuple of (includes, excludes).
1806 1814 """
1807 1815 # the narrow management should probably move into its own object
1808 1816 val = self._pending_narrow_pats
1809 1817 if val is None:
1810 1818 val = narrowspec.load(self)
1811 1819 return val
1812 1820
1813 1821 @storecache(narrowspec.FILENAME)
1814 1822 def _storenarrowmatch(self):
1815 1823 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1816 1824 return matchmod.always()
1817 1825 include, exclude = self.narrowpats
1818 1826 return narrowspec.match(self.root, include=include, exclude=exclude)
1819 1827
1820 1828 @storecache(narrowspec.FILENAME)
1821 1829 def _narrowmatch(self):
1822 1830 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1823 1831 return matchmod.always()
1824 1832 narrowspec.checkworkingcopynarrowspec(self)
1825 1833 include, exclude = self.narrowpats
1826 1834 return narrowspec.match(self.root, include=include, exclude=exclude)
1827 1835
1828 1836 def narrowmatch(self, match=None, includeexact=False):
1829 1837 """matcher corresponding the the repo's narrowspec
1830 1838
1831 1839 If `match` is given, then that will be intersected with the narrow
1832 1840 matcher.
1833 1841
1834 1842 If `includeexact` is True, then any exact matches from `match` will
1835 1843 be included even if they're outside the narrowspec.
1836 1844 """
1837 1845 if match:
1838 1846 if includeexact and not self._narrowmatch.always():
1839 1847 # do not exclude explicitly-specified paths so that they can
1840 1848 # be warned later on
1841 1849 em = matchmod.exact(match.files())
1842 1850 nm = matchmod.unionmatcher([self._narrowmatch, em])
1843 1851 return matchmod.intersectmatchers(match, nm)
1844 1852 return matchmod.intersectmatchers(match, self._narrowmatch)
1845 1853 return self._narrowmatch
1846 1854
1847 1855 def setnarrowpats(self, newincludes, newexcludes):
1848 1856 narrowspec.save(self, newincludes, newexcludes)
1849 1857 self.invalidate(clearfilecache=True)
1850 1858
1851 1859 @unfilteredpropertycache
1852 1860 def _quick_access_changeid_null(self):
1853 1861 return {
1854 1862 b'null': (nullrev, self.nodeconstants.nullid),
1855 1863 nullrev: (nullrev, self.nodeconstants.nullid),
1856 1864 self.nullid: (nullrev, self.nullid),
1857 1865 }
1858 1866
1859 1867 @unfilteredpropertycache
1860 1868 def _quick_access_changeid_wc(self):
1861 1869 # also fast path access to the working copy parents
1862 1870 # however, only do it for filter that ensure wc is visible.
1863 1871 quick = self._quick_access_changeid_null.copy()
1864 1872 cl = self.unfiltered().changelog
1865 1873 for node in self.dirstate.parents():
1866 1874 if node == self.nullid:
1867 1875 continue
1868 1876 rev = cl.index.get_rev(node)
1869 1877 if rev is None:
1870 1878 # unknown working copy parent case:
1871 1879 #
1872 1880 # skip the fast path and let higher code deal with it
1873 1881 continue
1874 1882 pair = (rev, node)
1875 1883 quick[rev] = pair
1876 1884 quick[node] = pair
1877 1885 # also add the parents of the parents
1878 1886 for r in cl.parentrevs(rev):
1879 1887 if r == nullrev:
1880 1888 continue
1881 1889 n = cl.node(r)
1882 1890 pair = (r, n)
1883 1891 quick[r] = pair
1884 1892 quick[n] = pair
1885 1893 p1node = self.dirstate.p1()
1886 1894 if p1node != self.nullid:
1887 1895 quick[b'.'] = quick[p1node]
1888 1896 return quick
1889 1897
1890 1898 @unfilteredmethod
1891 1899 def _quick_access_changeid_invalidate(self):
1892 1900 if '_quick_access_changeid_wc' in vars(self):
1893 1901 del self.__dict__['_quick_access_changeid_wc']
1894 1902
1895 1903 @property
1896 1904 def _quick_access_changeid(self):
1897 1905 """an helper dictionnary for __getitem__ calls
1898 1906
1899 1907 This contains a list of symbol we can recognise right away without
1900 1908 further processing.
1901 1909 """
1902 1910 if self.filtername in repoview.filter_has_wc:
1903 1911 return self._quick_access_changeid_wc
1904 1912 return self._quick_access_changeid_null
1905 1913
1906 1914 def __getitem__(self, changeid):
1907 1915 # dealing with special cases
1908 1916 if changeid is None:
1909 1917 return context.workingctx(self)
1910 1918 if isinstance(changeid, context.basectx):
1911 1919 return changeid
1912 1920
1913 1921 # dealing with multiple revisions
1914 1922 if isinstance(changeid, slice):
1915 1923 # wdirrev isn't contiguous so the slice shouldn't include it
1916 1924 return [
1917 1925 self[i]
1918 1926 for i in range(*changeid.indices(len(self)))
1919 1927 if i not in self.changelog.filteredrevs
1920 1928 ]
1921 1929
1922 1930 # dealing with some special values
1923 1931 quick_access = self._quick_access_changeid.get(changeid)
1924 1932 if quick_access is not None:
1925 1933 rev, node = quick_access
1926 1934 return context.changectx(self, rev, node, maybe_filtered=False)
1927 1935 if changeid == b'tip':
1928 1936 node = self.changelog.tip()
1929 1937 rev = self.changelog.rev(node)
1930 1938 return context.changectx(self, rev, node)
1931 1939
1932 1940 # dealing with arbitrary values
1933 1941 try:
1934 1942 if isinstance(changeid, int):
1935 1943 node = self.changelog.node(changeid)
1936 1944 rev = changeid
1937 1945 elif changeid == b'.':
1938 1946 # this is a hack to delay/avoid loading obsmarkers
1939 1947 # when we know that '.' won't be hidden
1940 1948 node = self.dirstate.p1()
1941 1949 rev = self.unfiltered().changelog.rev(node)
1942 1950 elif len(changeid) == self.nodeconstants.nodelen:
1943 1951 try:
1944 1952 node = changeid
1945 1953 rev = self.changelog.rev(changeid)
1946 1954 except error.FilteredLookupError:
1947 1955 changeid = hex(changeid) # for the error message
1948 1956 raise
1949 1957 except LookupError:
1950 1958 # check if it might have come from damaged dirstate
1951 1959 #
1952 1960 # XXX we could avoid the unfiltered if we had a recognizable
1953 1961 # exception for filtered changeset access
1954 1962 if (
1955 1963 self.local()
1956 1964 and changeid in self.unfiltered().dirstate.parents()
1957 1965 ):
1958 1966 msg = _(b"working directory has unknown parent '%s'!")
1959 1967 raise error.Abort(msg % short(changeid))
1960 1968 changeid = hex(changeid) # for the error message
1961 1969 raise
1962 1970
1963 1971 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1964 1972 node = bin(changeid)
1965 1973 rev = self.changelog.rev(node)
1966 1974 else:
1967 1975 raise error.ProgrammingError(
1968 1976 b"unsupported changeid '%s' of type %s"
1969 1977 % (changeid, pycompat.bytestr(type(changeid)))
1970 1978 )
1971 1979
1972 1980 return context.changectx(self, rev, node)
1973 1981
1974 1982 except (error.FilteredIndexError, error.FilteredLookupError):
1975 1983 raise error.FilteredRepoLookupError(
1976 1984 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1977 1985 )
1978 1986 except (IndexError, LookupError):
1979 1987 raise error.RepoLookupError(
1980 1988 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1981 1989 )
1982 1990 except error.WdirUnsupported:
1983 1991 return context.workingctx(self)
1984 1992
1985 1993 def __contains__(self, changeid):
1986 1994 """True if the given changeid exists"""
1987 1995 try:
1988 1996 self[changeid]
1989 1997 return True
1990 1998 except error.RepoLookupError:
1991 1999 return False
1992 2000
1993 2001 def __nonzero__(self):
1994 2002 return True
1995 2003
1996 2004 __bool__ = __nonzero__
1997 2005
1998 2006 def __len__(self):
1999 2007 # no need to pay the cost of repoview.changelog
2000 2008 unfi = self.unfiltered()
2001 2009 return len(unfi.changelog)
2002 2010
2003 2011 def __iter__(self):
2004 2012 return iter(self.changelog)
2005 2013
2006 2014 def revs(self, expr: bytes, *args):
2007 2015 """Find revisions matching a revset.
2008 2016
2009 2017 The revset is specified as a string ``expr`` that may contain
2010 2018 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2011 2019
2012 2020 Revset aliases from the configuration are not expanded. To expand
2013 2021 user aliases, consider calling ``scmutil.revrange()`` or
2014 2022 ``repo.anyrevs([expr], user=True)``.
2015 2023
2016 2024 Returns a smartset.abstractsmartset, which is a list-like interface
2017 2025 that contains integer revisions.
2018 2026 """
2019 2027 tree = revsetlang.spectree(expr, *args)
2020 2028 return revset.makematcher(tree)(self)
2021 2029
2022 2030 def set(self, expr: bytes, *args):
2023 2031 """Find revisions matching a revset and emit changectx instances.
2024 2032
2025 2033 This is a convenience wrapper around ``revs()`` that iterates the
2026 2034 result and is a generator of changectx instances.
2027 2035
2028 2036 Revset aliases from the configuration are not expanded. To expand
2029 2037 user aliases, consider calling ``scmutil.revrange()``.
2030 2038 """
2031 2039 for r in self.revs(expr, *args):
2032 2040 yield self[r]
2033 2041
2034 2042 def anyrevs(self, specs: bytes, user=False, localalias=None):
2035 2043 """Find revisions matching one of the given revsets.
2036 2044
2037 2045 Revset aliases from the configuration are not expanded by default. To
2038 2046 expand user aliases, specify ``user=True``. To provide some local
2039 2047 definitions overriding user aliases, set ``localalias`` to
2040 2048 ``{name: definitionstring}``.
2041 2049 """
2042 2050 if specs == [b'null']:
2043 2051 return revset.baseset([nullrev])
2044 2052 if specs == [b'.']:
2045 2053 quick_data = self._quick_access_changeid.get(b'.')
2046 2054 if quick_data is not None:
2047 2055 return revset.baseset([quick_data[0]])
2048 2056 if user:
2049 2057 m = revset.matchany(
2050 2058 self.ui,
2051 2059 specs,
2052 2060 lookup=revset.lookupfn(self),
2053 2061 localalias=localalias,
2054 2062 )
2055 2063 else:
2056 2064 m = revset.matchany(None, specs, localalias=localalias)
2057 2065 return m(self)
2058 2066
2059 2067 def url(self) -> bytes:
2060 2068 return b'file:' + self.root
2061 2069
2062 2070 def hook(self, name, throw=False, **args):
2063 2071 """Call a hook, passing this repo instance.
2064 2072
2065 2073 This a convenience method to aid invoking hooks. Extensions likely
2066 2074 won't call this unless they have registered a custom hook or are
2067 2075 replacing code that is expected to call a hook.
2068 2076 """
2069 2077 return hook.hook(self.ui, self, name, throw, **args)
2070 2078
2071 2079 @filteredpropertycache
2072 2080 def _tagscache(self):
2073 2081 """Returns a tagscache object that contains various tags related
2074 2082 caches."""
2075 2083
2076 2084 # This simplifies its cache management by having one decorated
2077 2085 # function (this one) and the rest simply fetch things from it.
2078 2086 class tagscache:
2079 2087 def __init__(self):
2080 2088 # These two define the set of tags for this repository. tags
2081 2089 # maps tag name to node; tagtypes maps tag name to 'global' or
2082 2090 # 'local'. (Global tags are defined by .hgtags across all
2083 2091 # heads, and local tags are defined in .hg/localtags.)
2084 2092 # They constitute the in-memory cache of tags.
2085 2093 self.tags = self.tagtypes = None
2086 2094
2087 2095 self.nodetagscache = self.tagslist = None
2088 2096
2089 2097 cache = tagscache()
2090 2098 cache.tags, cache.tagtypes = self._findtags()
2091 2099
2092 2100 return cache
2093 2101
2094 2102 def tags(self):
2095 2103 '''return a mapping of tag to node'''
2096 2104 t = {}
2097 2105 if self.changelog.filteredrevs:
2098 2106 tags, tt = self._findtags()
2099 2107 else:
2100 2108 tags = self._tagscache.tags
2101 2109 rev = self.changelog.rev
2102 2110 for k, v in tags.items():
2103 2111 try:
2104 2112 # ignore tags to unknown nodes
2105 2113 rev(v)
2106 2114 t[k] = v
2107 2115 except (error.LookupError, ValueError):
2108 2116 pass
2109 2117 return t
2110 2118
2111 2119 def _findtags(self):
2112 2120 """Do the hard work of finding tags. Return a pair of dicts
2113 2121 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2114 2122 maps tag name to a string like \'global\' or \'local\'.
2115 2123 Subclasses or extensions are free to add their own tags, but
2116 2124 should be aware that the returned dicts will be retained for the
2117 2125 duration of the localrepo object."""
2118 2126
2119 2127 # XXX what tagtype should subclasses/extensions use? Currently
2120 2128 # mq and bookmarks add tags, but do not set the tagtype at all.
2121 2129 # Should each extension invent its own tag type? Should there
2122 2130 # be one tagtype for all such "virtual" tags? Or is the status
2123 2131 # quo fine?
2124 2132
2125 2133 # map tag name to (node, hist)
2126 2134 alltags = tagsmod.findglobaltags(self.ui, self)
2127 2135 # map tag name to tag type
2128 2136 tagtypes = {tag: b'global' for tag in alltags}
2129 2137
2130 2138 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2131 2139
2132 2140 # Build the return dicts. Have to re-encode tag names because
2133 2141 # the tags module always uses UTF-8 (in order not to lose info
2134 2142 # writing to the cache), but the rest of Mercurial wants them in
2135 2143 # local encoding.
2136 2144 tags = {}
2137 2145 for name, (node, hist) in alltags.items():
2138 2146 if node != self.nullid:
2139 2147 tags[encoding.tolocal(name)] = node
2140 2148 tags[b'tip'] = self.changelog.tip()
2141 2149 tagtypes = {
2142 2150 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2143 2151 }
2144 2152 return (tags, tagtypes)
2145 2153
2146 2154 def tagtype(self, tagname):
2147 2155 """
2148 2156 return the type of the given tag. result can be:
2149 2157
2150 2158 'local' : a local tag
2151 2159 'global' : a global tag
2152 2160 None : tag does not exist
2153 2161 """
2154 2162
2155 2163 return self._tagscache.tagtypes.get(tagname)
2156 2164
2157 2165 def tagslist(self):
2158 2166 '''return a list of tags ordered by revision'''
2159 2167 if not self._tagscache.tagslist:
2160 2168 l = []
2161 2169 for t, n in self.tags().items():
2162 2170 l.append((self.changelog.rev(n), t, n))
2163 2171 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2164 2172
2165 2173 return self._tagscache.tagslist
2166 2174
2167 2175 def nodetags(self, node):
2168 2176 '''return the tags associated with a node'''
2169 2177 if not self._tagscache.nodetagscache:
2170 2178 nodetagscache = {}
2171 2179 for t, n in self._tagscache.tags.items():
2172 2180 nodetagscache.setdefault(n, []).append(t)
2173 2181 for tags in nodetagscache.values():
2174 2182 tags.sort()
2175 2183 self._tagscache.nodetagscache = nodetagscache
2176 2184 return self._tagscache.nodetagscache.get(node, [])
2177 2185
2178 2186 def nodebookmarks(self, node):
2179 2187 """return the list of bookmarks pointing to the specified node"""
2180 2188 return self._bookmarks.names(node)
2181 2189
2182 2190 def branchmap(self):
2183 2191 """returns a dictionary {branch: [branchheads]} with branchheads
2184 2192 ordered by increasing revision number"""
2185 2193 return self._branchcaches[self]
2186 2194
2187 2195 @unfilteredmethod
2188 2196 def revbranchcache(self):
2189 2197 if not self._revbranchcache:
2190 2198 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2191 2199 return self._revbranchcache
2192 2200
2193 2201 def register_changeset(self, rev, changelogrevision):
2194 2202 self.revbranchcache().setdata(rev, changelogrevision)
2195 2203
2196 2204 def branchtip(self, branch, ignoremissing=False):
2197 2205 """return the tip node for a given branch
2198 2206
2199 2207 If ignoremissing is True, then this method will not raise an error.
2200 2208 This is helpful for callers that only expect None for a missing branch
2201 2209 (e.g. namespace).
2202 2210
2203 2211 """
2204 2212 try:
2205 2213 return self.branchmap().branchtip(branch)
2206 2214 except KeyError:
2207 2215 if not ignoremissing:
2208 2216 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2209 2217 else:
2210 2218 pass
2211 2219
2212 2220 def lookup(self, key):
2213 2221 node = scmutil.revsymbol(self, key).node()
2214 2222 if node is None:
2215 2223 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2216 2224 return node
2217 2225
2218 2226 def lookupbranch(self, key):
2219 2227 if self.branchmap().hasbranch(key):
2220 2228 return key
2221 2229
2222 2230 return scmutil.revsymbol(self, key).branch()
2223 2231
2224 2232 def known(self, nodes):
2225 2233 cl = self.changelog
2226 2234 get_rev = cl.index.get_rev
2227 2235 filtered = cl.filteredrevs
2228 2236 result = []
2229 2237 for n in nodes:
2230 2238 r = get_rev(n)
2231 2239 resp = not (r is None or r in filtered)
2232 2240 result.append(resp)
2233 2241 return result
2234 2242
2235 2243 def local(self):
2236 2244 return self
2237 2245
2238 2246 def publishing(self):
2239 2247 # it's safe (and desirable) to trust the publish flag unconditionally
2240 2248 # so that we don't finalize changes shared between users via ssh or nfs
2241 2249 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2242 2250
2243 2251 def cancopy(self):
2244 2252 # so statichttprepo's override of local() works
2245 2253 if not self.local():
2246 2254 return False
2247 2255 if not self.publishing():
2248 2256 return True
2249 2257 # if publishing we can't copy if there is filtered content
2250 2258 return not self.filtered(b'visible').changelog.filteredrevs
2251 2259
2252 2260 def shared(self):
2253 2261 '''the type of shared repository (None if not shared)'''
2254 2262 if self.sharedpath != self.path:
2255 2263 return b'store'
2256 2264 return None
2257 2265
2258 2266 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2259 2267 return self.vfs.reljoin(self.root, f, *insidef)
2260 2268
2261 2269 def setparents(self, p1, p2=None):
2262 2270 if p2 is None:
2263 2271 p2 = self.nullid
2264 2272 self[None].setparents(p1, p2)
2265 2273 self._quick_access_changeid_invalidate()
2266 2274
2267 2275 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2268 2276 """changeid must be a changeset revision, if specified.
2269 2277 fileid can be a file revision or node."""
2270 2278 return context.filectx(
2271 2279 self, path, changeid, fileid, changectx=changectx
2272 2280 )
2273 2281
2274 2282 def getcwd(self) -> bytes:
2275 2283 return self.dirstate.getcwd()
2276 2284
2277 2285 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2278 2286 return self.dirstate.pathto(f, cwd)
2279 2287
2280 2288 def _loadfilter(self, filter):
2281 2289 if filter not in self._filterpats:
2282 2290 l = []
2283 2291 for pat, cmd in self.ui.configitems(filter):
2284 2292 if cmd == b'!':
2285 2293 continue
2286 2294 mf = matchmod.match(self.root, b'', [pat])
2287 2295 fn = None
2288 2296 params = cmd
2289 2297 for name, filterfn in self._datafilters.items():
2290 2298 if cmd.startswith(name):
2291 2299 fn = filterfn
2292 2300 params = cmd[len(name) :].lstrip()
2293 2301 break
2294 2302 if not fn:
2295 2303 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2296 2304 fn.__name__ = 'commandfilter'
2297 2305 # Wrap old filters not supporting keyword arguments
2298 2306 if not pycompat.getargspec(fn)[2]:
2299 2307 oldfn = fn
2300 2308 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2301 2309 fn.__name__ = 'compat-' + oldfn.__name__
2302 2310 l.append((mf, fn, params))
2303 2311 self._filterpats[filter] = l
2304 2312 return self._filterpats[filter]
2305 2313
2306 2314 def _filter(self, filterpats, filename, data):
2307 2315 for mf, fn, cmd in filterpats:
2308 2316 if mf(filename):
2309 2317 self.ui.debug(
2310 2318 b"filtering %s through %s\n"
2311 2319 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2312 2320 )
2313 2321 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2314 2322 break
2315 2323
2316 2324 return data
2317 2325
2318 2326 @unfilteredpropertycache
2319 2327 def _encodefilterpats(self):
2320 2328 return self._loadfilter(b'encode')
2321 2329
2322 2330 @unfilteredpropertycache
2323 2331 def _decodefilterpats(self):
2324 2332 return self._loadfilter(b'decode')
2325 2333
2326 2334 def adddatafilter(self, name, filter):
2327 2335 self._datafilters[name] = filter
2328 2336
2329 2337 def wread(self, filename: bytes) -> bytes:
2330 2338 if self.wvfs.islink(filename):
2331 2339 data = self.wvfs.readlink(filename)
2332 2340 else:
2333 2341 data = self.wvfs.read(filename)
2334 2342 return self._filter(self._encodefilterpats, filename, data)
2335 2343
2336 2344 def wwrite(
2337 2345 self,
2338 2346 filename: bytes,
2339 2347 data: bytes,
2340 2348 flags: bytes,
2341 2349 backgroundclose=False,
2342 2350 **kwargs
2343 2351 ) -> int:
2344 2352 """write ``data`` into ``filename`` in the working directory
2345 2353
2346 2354 This returns length of written (maybe decoded) data.
2347 2355 """
2348 2356 data = self._filter(self._decodefilterpats, filename, data)
2349 2357 if b'l' in flags:
2350 2358 self.wvfs.symlink(data, filename)
2351 2359 else:
2352 2360 self.wvfs.write(
2353 2361 filename, data, backgroundclose=backgroundclose, **kwargs
2354 2362 )
2355 2363 if b'x' in flags:
2356 2364 self.wvfs.setflags(filename, False, True)
2357 2365 else:
2358 2366 self.wvfs.setflags(filename, False, False)
2359 2367 return len(data)
2360 2368
2361 2369 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2362 2370 return self._filter(self._decodefilterpats, filename, data)
2363 2371
2364 2372 def currenttransaction(self):
2365 2373 """return the current transaction or None if non exists"""
2366 2374 if self._transref:
2367 2375 tr = self._transref()
2368 2376 else:
2369 2377 tr = None
2370 2378
2371 2379 if tr and tr.running():
2372 2380 return tr
2373 2381 return None
2374 2382
2375 2383 def transaction(self, desc, report=None):
2376 2384 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2377 2385 b'devel', b'check-locks'
2378 2386 ):
2379 2387 if self._currentlock(self._lockref) is None:
2380 2388 raise error.ProgrammingError(b'transaction requires locking')
2381 2389 tr = self.currenttransaction()
2382 2390 if tr is not None:
2383 2391 return tr.nest(name=desc)
2384 2392
2385 2393 # abort here if the journal already exists
2386 2394 if self.svfs.exists(b"journal"):
2387 2395 raise error.RepoError(
2388 2396 _(b"abandoned transaction found"),
2389 2397 hint=_(b"run 'hg recover' to clean up transaction"),
2390 2398 )
2391 2399
2392 2400 # At that point your dirstate should be clean:
2393 2401 #
2394 2402 # - If you don't have the wlock, why would you still have a dirty
2395 2403 # dirstate ?
2396 2404 #
2397 2405 # - If you hold the wlock, you should not be opening a transaction in
2398 2406 # the middle of a `distate.changing_*` block. The transaction needs to
2399 2407 # be open before that and wrap the change-context.
2400 2408 #
2401 2409 # - If you are not within a `dirstate.changing_*` context, why is our
2402 2410 # dirstate dirty?
2403 2411 if self.dirstate._dirty:
2404 2412 m = "cannot open a transaction with a dirty dirstate"
2405 2413 raise error.ProgrammingError(m)
2406 2414
2407 2415 idbase = b"%.40f#%f" % (random.random(), time.time())
2408 2416 ha = hex(hashutil.sha1(idbase).digest())
2409 2417 txnid = b'TXN:' + ha
2410 2418 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2411 2419
2412 2420 self._writejournal(desc)
2413 2421 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2414 2422 if report:
2415 2423 rp = report
2416 2424 else:
2417 2425 rp = self.ui.warn
2418 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2426 vfsmap = self.vfs_map
2419 2427 # we must avoid cyclic reference between repo and transaction.
2420 2428 reporef = weakref.ref(self)
2421 2429 # Code to track tag movement
2422 2430 #
2423 2431 # Since tags are all handled as file content, it is actually quite hard
2424 2432 # to track these movement from a code perspective. So we fallback to a
2425 2433 # tracking at the repository level. One could envision to track changes
2426 2434 # to the '.hgtags' file through changegroup apply but that fails to
2427 2435 # cope with case where transaction expose new heads without changegroup
2428 2436 # being involved (eg: phase movement).
2429 2437 #
2430 2438 # For now, We gate the feature behind a flag since this likely comes
2431 2439 # with performance impacts. The current code run more often than needed
2432 2440 # and do not use caches as much as it could. The current focus is on
2433 2441 # the behavior of the feature so we disable it by default. The flag
2434 2442 # will be removed when we are happy with the performance impact.
2435 2443 #
2436 2444 # Once this feature is no longer experimental move the following
2437 2445 # documentation to the appropriate help section:
2438 2446 #
2439 2447 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2440 2448 # tags (new or changed or deleted tags). In addition the details of
2441 2449 # these changes are made available in a file at:
2442 2450 # ``REPOROOT/.hg/changes/tags.changes``.
2443 2451 # Make sure you check for HG_TAG_MOVED before reading that file as it
2444 2452 # might exist from a previous transaction even if no tag were touched
2445 2453 # in this one. Changes are recorded in a line base format::
2446 2454 #
2447 2455 # <action> <hex-node> <tag-name>\n
2448 2456 #
2449 2457 # Actions are defined as follow:
2450 2458 # "-R": tag is removed,
2451 2459 # "+A": tag is added,
2452 2460 # "-M": tag is moved (old value),
2453 2461 # "+M": tag is moved (new value),
2454 2462 tracktags = lambda x: None
2455 2463 # experimental config: experimental.hook-track-tags
2456 2464 shouldtracktags = self.ui.configbool(
2457 2465 b'experimental', b'hook-track-tags'
2458 2466 )
2459 2467 if desc != b'strip' and shouldtracktags:
2460 2468 oldheads = self.changelog.headrevs()
2461 2469
2462 2470 def tracktags(tr2):
2463 2471 repo = reporef()
2464 2472 assert repo is not None # help pytype
2465 2473 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2466 2474 newheads = repo.changelog.headrevs()
2467 2475 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2468 2476 # notes: we compare lists here.
2469 2477 # As we do it only once buiding set would not be cheaper
2470 2478 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2471 2479 if changes:
2472 2480 tr2.hookargs[b'tag_moved'] = b'1'
2473 2481 with repo.vfs(
2474 2482 b'changes/tags.changes', b'w', atomictemp=True
2475 2483 ) as changesfile:
2476 2484 # note: we do not register the file to the transaction
2477 2485 # because we needs it to still exist on the transaction
2478 2486 # is close (for txnclose hooks)
2479 2487 tagsmod.writediff(changesfile, changes)
2480 2488
2481 2489 def validate(tr2):
2482 2490 """will run pre-closing hooks"""
2483 2491 # XXX the transaction API is a bit lacking here so we take a hacky
2484 2492 # path for now
2485 2493 #
2486 2494 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2487 2495 # dict is copied before these run. In addition we needs the data
2488 2496 # available to in memory hooks too.
2489 2497 #
2490 2498 # Moreover, we also need to make sure this runs before txnclose
2491 2499 # hooks and there is no "pending" mechanism that would execute
2492 2500 # logic only if hooks are about to run.
2493 2501 #
2494 2502 # Fixing this limitation of the transaction is also needed to track
2495 2503 # other families of changes (bookmarks, phases, obsolescence).
2496 2504 #
2497 2505 # This will have to be fixed before we remove the experimental
2498 2506 # gating.
2499 2507 tracktags(tr2)
2500 2508 repo = reporef()
2501 2509 assert repo is not None # help pytype
2502 2510
2503 2511 singleheadopt = (b'experimental', b'single-head-per-branch')
2504 2512 singlehead = repo.ui.configbool(*singleheadopt)
2505 2513 if singlehead:
2506 2514 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2507 2515 accountclosed = singleheadsub.get(
2508 2516 b"account-closed-heads", False
2509 2517 )
2510 2518 if singleheadsub.get(b"public-changes-only", False):
2511 2519 filtername = b"immutable"
2512 2520 else:
2513 2521 filtername = b"visible"
2514 2522 scmutil.enforcesinglehead(
2515 2523 repo, tr2, desc, accountclosed, filtername
2516 2524 )
2517 2525 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2518 2526 for name, (old, new) in sorted(
2519 2527 tr.changes[b'bookmarks'].items()
2520 2528 ):
2521 2529 args = tr.hookargs.copy()
2522 2530 args.update(bookmarks.preparehookargs(name, old, new))
2523 2531 repo.hook(
2524 2532 b'pretxnclose-bookmark',
2525 2533 throw=True,
2526 2534 **pycompat.strkwargs(args)
2527 2535 )
2528 2536 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2529 2537 cl = repo.unfiltered().changelog
2530 2538 for revs, (old, new) in tr.changes[b'phases']:
2531 2539 for rev in revs:
2532 2540 args = tr.hookargs.copy()
2533 2541 node = hex(cl.node(rev))
2534 2542 args.update(phases.preparehookargs(node, old, new))
2535 2543 repo.hook(
2536 2544 b'pretxnclose-phase',
2537 2545 throw=True,
2538 2546 **pycompat.strkwargs(args)
2539 2547 )
2540 2548
2541 2549 repo.hook(
2542 2550 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2543 2551 )
2544 2552
2545 2553 def releasefn(tr, success):
2546 2554 repo = reporef()
2547 2555 if repo is None:
2548 2556 # If the repo has been GC'd (and this release function is being
2549 2557 # called from transaction.__del__), there's not much we can do,
2550 2558 # so just leave the unfinished transaction there and let the
2551 2559 # user run `hg recover`.
2552 2560 return
2553 2561 if success:
2554 2562 # this should be explicitly invoked here, because
2555 2563 # in-memory changes aren't written out at closing
2556 2564 # transaction, if tr.addfilegenerator (via
2557 2565 # dirstate.write or so) isn't invoked while
2558 2566 # transaction running
2559 2567 repo.dirstate.write(None)
2560 2568 else:
2561 2569 # discard all changes (including ones already written
2562 2570 # out) in this transaction
2563 2571 repo.invalidate(clearfilecache=True)
2564 2572
2565 2573 tr = transaction.transaction(
2566 2574 rp,
2567 2575 self.svfs,
2568 2576 vfsmap,
2569 2577 b"journal",
2570 2578 b"undo",
2571 2579 aftertrans(renames),
2572 2580 self.store.createmode,
2573 2581 validator=validate,
2574 2582 releasefn=releasefn,
2575 2583 checkambigfiles=_cachedfiles,
2576 2584 name=desc,
2577 2585 )
2578 2586 tr.changes[b'origrepolen'] = len(self)
2579 2587 tr.changes[b'obsmarkers'] = set()
2580 2588 tr.changes[b'phases'] = []
2581 2589 tr.changes[b'bookmarks'] = {}
2582 2590
2583 2591 tr.hookargs[b'txnid'] = txnid
2584 2592 tr.hookargs[b'txnname'] = desc
2585 2593 tr.hookargs[b'changes'] = tr.changes
2586 2594 # note: writing the fncache only during finalize mean that the file is
2587 2595 # outdated when running hooks. As fncache is used for streaming clone,
2588 2596 # this is not expected to break anything that happen during the hooks.
2589 2597 tr.addfinalize(b'flush-fncache', self.store.write)
2590 2598
2591 2599 def txnclosehook(tr2):
2592 2600 """To be run if transaction is successful, will schedule a hook run"""
2593 2601 # Don't reference tr2 in hook() so we don't hold a reference.
2594 2602 # This reduces memory consumption when there are multiple
2595 2603 # transactions per lock. This can likely go away if issue5045
2596 2604 # fixes the function accumulation.
2597 2605 hookargs = tr2.hookargs
2598 2606
2599 2607 def hookfunc(unused_success):
2600 2608 repo = reporef()
2601 2609 assert repo is not None # help pytype
2602 2610
2603 2611 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2604 2612 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2605 2613 for name, (old, new) in bmchanges:
2606 2614 args = tr.hookargs.copy()
2607 2615 args.update(bookmarks.preparehookargs(name, old, new))
2608 2616 repo.hook(
2609 2617 b'txnclose-bookmark',
2610 2618 throw=False,
2611 2619 **pycompat.strkwargs(args)
2612 2620 )
2613 2621
2614 2622 if hook.hashook(repo.ui, b'txnclose-phase'):
2615 2623 cl = repo.unfiltered().changelog
2616 2624 phasemv = sorted(
2617 2625 tr.changes[b'phases'], key=lambda r: r[0][0]
2618 2626 )
2619 2627 for revs, (old, new) in phasemv:
2620 2628 for rev in revs:
2621 2629 args = tr.hookargs.copy()
2622 2630 node = hex(cl.node(rev))
2623 2631 args.update(phases.preparehookargs(node, old, new))
2624 2632 repo.hook(
2625 2633 b'txnclose-phase',
2626 2634 throw=False,
2627 2635 **pycompat.strkwargs(args)
2628 2636 )
2629 2637
2630 2638 repo.hook(
2631 2639 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2632 2640 )
2633 2641
2634 2642 repo = reporef()
2635 2643 assert repo is not None # help pytype
2636 2644 repo._afterlock(hookfunc)
2637 2645
2638 2646 tr.addfinalize(b'txnclose-hook', txnclosehook)
2639 2647 # Include a leading "-" to make it happen before the transaction summary
2640 2648 # reports registered via scmutil.registersummarycallback() whose names
2641 2649 # are 00-txnreport etc. That way, the caches will be warm when the
2642 2650 # callbacks run.
2643 2651 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2644 2652
2645 2653 def txnaborthook(tr2):
2646 2654 """To be run if transaction is aborted"""
2647 2655 repo = reporef()
2648 2656 assert repo is not None # help pytype
2649 2657 repo.hook(
2650 2658 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2651 2659 )
2652 2660
2653 2661 tr.addabort(b'txnabort-hook', txnaborthook)
2654 2662 # avoid eager cache invalidation. in-memory data should be identical
2655 2663 # to stored data if transaction has no error.
2656 2664 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2657 2665 self._transref = weakref.ref(tr)
2658 2666 scmutil.registersummarycallback(self, tr, desc)
2659 2667 # This only exist to deal with the need of rollback to have viable
2660 2668 # parents at the end of the operation. So backup viable parents at the
2661 2669 # time of this operation.
2662 2670 #
2663 2671 # We only do it when the `wlock` is taken, otherwise other might be
2664 2672 # altering the dirstate under us.
2665 2673 #
2666 2674 # This is really not a great way to do this (first, because we cannot
2667 2675 # always do it). There are more viable alternative that exists
2668 2676 #
2669 2677 # - backing only the working copy parent in a dedicated files and doing
2670 2678 # a clean "keep-update" to them on `hg rollback`.
2671 2679 #
2672 2680 # - slightly changing the behavior an applying a logic similar to "hg
2673 2681 # strip" to pick a working copy destination on `hg rollback`
2674 2682 if self.currentwlock() is not None:
2675 2683 ds = self.dirstate
2676 2684 if not self.vfs.exists(b'branch'):
2677 2685 # force a file to be written if None exist
2678 2686 ds.setbranch(b'default', None)
2679 2687
2680 2688 def backup_dirstate(tr):
2681 2689 for f in ds.all_file_names():
2682 2690 # hardlink backup is okay because `dirstate` is always
2683 2691 # atomically written and possible data file are append only
2684 2692 # and resistant to trailing data.
2685 2693 tr.addbackup(f, hardlink=True, location=b'plain')
2686 2694
2687 2695 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2688 2696 return tr
2689 2697
2690 2698 def _journalfiles(self):
2691 2699 return (
2692 2700 (self.svfs, b'journal'),
2693 2701 (self.vfs, b'journal.desc'),
2694 2702 )
2695 2703
2696 2704 def undofiles(self):
2697 2705 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2698 2706
2699 2707 @unfilteredmethod
2700 2708 def _writejournal(self, desc):
2701 2709 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2702 2710
2703 2711 def recover(self):
2704 2712 with self.lock():
2705 2713 if self.svfs.exists(b"journal"):
2706 2714 self.ui.status(_(b"rolling back interrupted transaction\n"))
2707 vfsmap = {
2708 b'': self.svfs,
2709 b'plain': self.vfs,
2710 }
2715 vfsmap = self.vfs_map
2711 2716 transaction.rollback(
2712 2717 self.svfs,
2713 2718 vfsmap,
2714 2719 b"journal",
2715 2720 self.ui.warn,
2716 2721 checkambigfiles=_cachedfiles,
2717 2722 )
2718 2723 self.invalidate()
2719 2724 return True
2720 2725 else:
2721 2726 self.ui.warn(_(b"no interrupted transaction available\n"))
2722 2727 return False
2723 2728
2724 2729 def rollback(self, dryrun=False, force=False):
2725 2730 wlock = lock = None
2726 2731 try:
2727 2732 wlock = self.wlock()
2728 2733 lock = self.lock()
2729 2734 if self.svfs.exists(b"undo"):
2730 2735 return self._rollback(dryrun, force)
2731 2736 else:
2732 2737 self.ui.warn(_(b"no rollback information available\n"))
2733 2738 return 1
2734 2739 finally:
2735 2740 release(lock, wlock)
2736 2741
2737 2742 @unfilteredmethod # Until we get smarter cache management
2738 2743 def _rollback(self, dryrun, force):
2739 2744 ui = self.ui
2740 2745
2741 2746 parents = self.dirstate.parents()
2742 2747 try:
2743 2748 args = self.vfs.read(b'undo.desc').splitlines()
2744 2749 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2745 2750 if len(args) >= 3:
2746 2751 detail = args[2]
2747 2752 oldtip = oldlen - 1
2748 2753
2749 2754 if detail and ui.verbose:
2750 2755 msg = _(
2751 2756 b'repository tip rolled back to revision %d'
2752 2757 b' (undo %s: %s)\n'
2753 2758 ) % (oldtip, desc, detail)
2754 2759 else:
2755 2760 msg = _(
2756 2761 b'repository tip rolled back to revision %d (undo %s)\n'
2757 2762 ) % (oldtip, desc)
2758 2763 parentgone = any(self[p].rev() > oldtip for p in parents)
2759 2764 except IOError:
2760 2765 msg = _(b'rolling back unknown transaction\n')
2761 2766 desc = None
2762 2767 parentgone = True
2763 2768
2764 2769 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2765 2770 raise error.Abort(
2766 2771 _(
2767 2772 b'rollback of last commit while not checked out '
2768 2773 b'may lose data'
2769 2774 ),
2770 2775 hint=_(b'use -f to force'),
2771 2776 )
2772 2777
2773 2778 ui.status(msg)
2774 2779 if dryrun:
2775 2780 return 0
2776 2781
2777 2782 self.destroying()
2778 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2783 vfsmap = self.vfs_map
2779 2784 skip_journal_pattern = None
2780 2785 if not parentgone:
2781 2786 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2782 2787 transaction.rollback(
2783 2788 self.svfs,
2784 2789 vfsmap,
2785 2790 b'undo',
2786 2791 ui.warn,
2787 2792 checkambigfiles=_cachedfiles,
2788 2793 skip_journal_pattern=skip_journal_pattern,
2789 2794 )
2790 2795 self.invalidate()
2791 2796 self.dirstate.invalidate()
2792 2797
2793 2798 if parentgone:
2794 2799 # replace this with some explicit parent update in the future.
2795 2800 has_node = self.changelog.index.has_node
2796 2801 if not all(has_node(p) for p in self.dirstate._pl):
2797 2802 # There was no dirstate to backup initially, we need to drop
2798 2803 # the existing one.
2799 2804 with self.dirstate.changing_parents(self):
2800 2805 self.dirstate.setparents(self.nullid)
2801 2806 self.dirstate.clear()
2802 2807
2803 2808 parents = tuple([p.rev() for p in self[None].parents()])
2804 2809 if len(parents) > 1:
2805 2810 ui.status(
2806 2811 _(
2807 2812 b'working directory now based on '
2808 2813 b'revisions %d and %d\n'
2809 2814 )
2810 2815 % parents
2811 2816 )
2812 2817 else:
2813 2818 ui.status(
2814 2819 _(b'working directory now based on revision %d\n') % parents
2815 2820 )
2816 2821 mergestatemod.mergestate.clean(self)
2817 2822
2818 2823 # TODO: if we know which new heads may result from this rollback, pass
2819 2824 # them to destroy(), which will prevent the branchhead cache from being
2820 2825 # invalidated.
2821 2826 self.destroyed()
2822 2827 return 0
2823 2828
2824 2829 def _buildcacheupdater(self, newtransaction):
2825 2830 """called during transaction to build the callback updating cache
2826 2831
2827 2832 Lives on the repository to help extension who might want to augment
2828 2833 this logic. For this purpose, the created transaction is passed to the
2829 2834 method.
2830 2835 """
2831 2836 # we must avoid cyclic reference between repo and transaction.
2832 2837 reporef = weakref.ref(self)
2833 2838
2834 2839 def updater(tr):
2835 2840 repo = reporef()
2836 2841 assert repo is not None # help pytype
2837 2842 repo.updatecaches(tr)
2838 2843
2839 2844 return updater
2840 2845
2841 2846 @unfilteredmethod
2842 2847 def updatecaches(self, tr=None, full=False, caches=None):
2843 2848 """warm appropriate caches
2844 2849
2845 2850 If this function is called after a transaction closed. The transaction
2846 2851 will be available in the 'tr' argument. This can be used to selectively
2847 2852 update caches relevant to the changes in that transaction.
2848 2853
2849 2854 If 'full' is set, make sure all caches the function knows about have
2850 2855 up-to-date data. Even the ones usually loaded more lazily.
2851 2856
2852 2857 The `full` argument can take a special "post-clone" value. In this case
2853 2858 the cache warming is made after a clone and of the slower cache might
2854 2859 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2855 2860 as we plan for a cleaner way to deal with this for 5.9.
2856 2861 """
2857 2862 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2858 2863 # During strip, many caches are invalid but
2859 2864 # later call to `destroyed` will refresh them.
2860 2865 return
2861 2866
2862 2867 unfi = self.unfiltered()
2863 2868
2864 2869 if full:
2865 2870 msg = (
2866 2871 "`full` argument for `repo.updatecaches` is deprecated\n"
2867 2872 "(use `caches=repository.CACHE_ALL` instead)"
2868 2873 )
2869 2874 self.ui.deprecwarn(msg, b"5.9")
2870 2875 caches = repository.CACHES_ALL
2871 2876 if full == b"post-clone":
2872 2877 caches = repository.CACHES_POST_CLONE
2873 2878 caches = repository.CACHES_ALL
2874 2879 elif caches is None:
2875 2880 caches = repository.CACHES_DEFAULT
2876 2881
2877 2882 if repository.CACHE_BRANCHMAP_SERVED in caches:
2878 2883 if tr is None or tr.changes[b'origrepolen'] < len(self):
2879 2884 # accessing the 'served' branchmap should refresh all the others,
2880 2885 self.ui.debug(b'updating the branch cache\n')
2881 2886 self.filtered(b'served').branchmap()
2882 2887 self.filtered(b'served.hidden').branchmap()
2883 2888 # flush all possibly delayed write.
2884 2889 self._branchcaches.write_delayed(self)
2885 2890
2886 2891 if repository.CACHE_CHANGELOG_CACHE in caches:
2887 2892 self.changelog.update_caches(transaction=tr)
2888 2893
2889 2894 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2890 2895 self.manifestlog.update_caches(transaction=tr)
2891 2896
2892 2897 if repository.CACHE_REV_BRANCH in caches:
2893 2898 rbc = unfi.revbranchcache()
2894 2899 for r in unfi.changelog:
2895 2900 rbc.branchinfo(r)
2896 2901 rbc.write()
2897 2902
2898 2903 if repository.CACHE_FULL_MANIFEST in caches:
2899 2904 # ensure the working copy parents are in the manifestfulltextcache
2900 2905 for ctx in self[b'.'].parents():
2901 2906 ctx.manifest() # accessing the manifest is enough
2902 2907
2903 2908 if repository.CACHE_FILE_NODE_TAGS in caches:
2904 2909 # accessing fnode cache warms the cache
2905 2910 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2906 2911
2907 2912 if repository.CACHE_TAGS_DEFAULT in caches:
2908 2913 # accessing tags warm the cache
2909 2914 self.tags()
2910 2915 if repository.CACHE_TAGS_SERVED in caches:
2911 2916 self.filtered(b'served').tags()
2912 2917
2913 2918 if repository.CACHE_BRANCHMAP_ALL in caches:
2914 2919 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2915 2920 # so we're forcing a write to cause these caches to be warmed up
2916 2921 # even if they haven't explicitly been requested yet (if they've
2917 2922 # never been used by hg, they won't ever have been written, even if
2918 2923 # they're a subset of another kind of cache that *has* been used).
2919 2924 for filt in repoview.filtertable.keys():
2920 2925 filtered = self.filtered(filt)
2921 2926 filtered.branchmap().write(filtered)
2922 2927
2923 2928 def invalidatecaches(self):
2924 2929 if '_tagscache' in vars(self):
2925 2930 # can't use delattr on proxy
2926 2931 del self.__dict__['_tagscache']
2927 2932
2928 2933 self._branchcaches.clear()
2929 2934 self.invalidatevolatilesets()
2930 2935 self._sparsesignaturecache.clear()
2931 2936
2932 2937 def invalidatevolatilesets(self):
2933 2938 self.filteredrevcache.clear()
2934 2939 obsolete.clearobscaches(self)
2935 2940 self._quick_access_changeid_invalidate()
2936 2941
2937 2942 def invalidatedirstate(self):
2938 2943 """Invalidates the dirstate, causing the next call to dirstate
2939 2944 to check if it was modified since the last time it was read,
2940 2945 rereading it if it has.
2941 2946
2942 2947 This is different to dirstate.invalidate() that it doesn't always
2943 2948 rereads the dirstate. Use dirstate.invalidate() if you want to
2944 2949 explicitly read the dirstate again (i.e. restoring it to a previous
2945 2950 known good state)."""
2946 2951 unfi = self.unfiltered()
2947 2952 if 'dirstate' in unfi.__dict__:
2948 2953 del unfi.__dict__['dirstate']
2949 2954
2950 2955 def invalidate(self, clearfilecache=False):
2951 2956 """Invalidates both store and non-store parts other than dirstate
2952 2957
2953 2958 If a transaction is running, invalidation of store is omitted,
2954 2959 because discarding in-memory changes might cause inconsistency
2955 2960 (e.g. incomplete fncache causes unintentional failure, but
2956 2961 redundant one doesn't).
2957 2962 """
2958 2963 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2959 2964 for k in list(self._filecache.keys()):
2960 2965 if (
2961 2966 k == b'changelog'
2962 2967 and self.currenttransaction()
2963 2968 and self.changelog._delayed
2964 2969 ):
2965 2970 # The changelog object may store unwritten revisions. We don't
2966 2971 # want to lose them.
2967 2972 # TODO: Solve the problem instead of working around it.
2968 2973 continue
2969 2974
2970 2975 if clearfilecache:
2971 2976 del self._filecache[k]
2972 2977 try:
2973 2978 delattr(unfiltered, k)
2974 2979 except AttributeError:
2975 2980 pass
2976 2981 self.invalidatecaches()
2977 2982 if not self.currenttransaction():
2978 2983 # TODO: Changing contents of store outside transaction
2979 2984 # causes inconsistency. We should make in-memory store
2980 2985 # changes detectable, and abort if changed.
2981 2986 self.store.invalidatecaches()
2982 2987
2983 2988 def invalidateall(self):
2984 2989 """Fully invalidates both store and non-store parts, causing the
2985 2990 subsequent operation to reread any outside changes."""
2986 2991 # extension should hook this to invalidate its caches
2987 2992 self.invalidate()
2988 2993 self.invalidatedirstate()
2989 2994
2990 2995 @unfilteredmethod
2991 2996 def _refreshfilecachestats(self, tr):
2992 2997 """Reload stats of cached files so that they are flagged as valid"""
2993 2998 for k, ce in self._filecache.items():
2994 2999 k = pycompat.sysstr(k)
2995 3000 if k == 'dirstate' or k not in self.__dict__:
2996 3001 continue
2997 3002 ce.refresh()
2998 3003
2999 3004 def _lock(
3000 3005 self,
3001 3006 vfs,
3002 3007 lockname,
3003 3008 wait,
3004 3009 releasefn,
3005 3010 acquirefn,
3006 3011 desc,
3007 3012 ):
3008 3013 timeout = 0
3009 3014 warntimeout = 0
3010 3015 if wait:
3011 3016 timeout = self.ui.configint(b"ui", b"timeout")
3012 3017 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3013 3018 # internal config: ui.signal-safe-lock
3014 3019 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3015 3020
3016 3021 l = lockmod.trylock(
3017 3022 self.ui,
3018 3023 vfs,
3019 3024 lockname,
3020 3025 timeout,
3021 3026 warntimeout,
3022 3027 releasefn=releasefn,
3023 3028 acquirefn=acquirefn,
3024 3029 desc=desc,
3025 3030 signalsafe=signalsafe,
3026 3031 )
3027 3032 return l
3028 3033
3029 3034 def _afterlock(self, callback):
3030 3035 """add a callback to be run when the repository is fully unlocked
3031 3036
3032 3037 The callback will be executed when the outermost lock is released
3033 3038 (with wlock being higher level than 'lock')."""
3034 3039 for ref in (self._wlockref, self._lockref):
3035 3040 l = ref and ref()
3036 3041 if l and l.held:
3037 3042 l.postrelease.append(callback)
3038 3043 break
3039 3044 else: # no lock have been found.
3040 3045 callback(True)
3041 3046
3042 3047 def lock(self, wait=True):
3043 3048 """Lock the repository store (.hg/store) and return a weak reference
3044 3049 to the lock. Use this before modifying the store (e.g. committing or
3045 3050 stripping). If you are opening a transaction, get a lock as well.)
3046 3051
3047 3052 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3048 3053 'wlock' first to avoid a dead-lock hazard."""
3049 3054 l = self._currentlock(self._lockref)
3050 3055 if l is not None:
3051 3056 l.lock()
3052 3057 return l
3053 3058
3054 3059 l = self._lock(
3055 3060 vfs=self.svfs,
3056 3061 lockname=b"lock",
3057 3062 wait=wait,
3058 3063 releasefn=None,
3059 3064 acquirefn=self.invalidate,
3060 3065 desc=_(b'repository %s') % self.origroot,
3061 3066 )
3062 3067 self._lockref = weakref.ref(l)
3063 3068 return l
3064 3069
3065 3070 def wlock(self, wait=True):
3066 3071 """Lock the non-store parts of the repository (everything under
3067 3072 .hg except .hg/store) and return a weak reference to the lock.
3068 3073
3069 3074 Use this before modifying files in .hg.
3070 3075
3071 3076 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3072 3077 'wlock' first to avoid a dead-lock hazard."""
3073 3078 l = self._wlockref() if self._wlockref else None
3074 3079 if l is not None and l.held:
3075 3080 l.lock()
3076 3081 return l
3077 3082
3078 3083 # We do not need to check for non-waiting lock acquisition. Such
3079 3084 # acquisition would not cause dead-lock as they would just fail.
3080 3085 if wait and (
3081 3086 self.ui.configbool(b'devel', b'all-warnings')
3082 3087 or self.ui.configbool(b'devel', b'check-locks')
3083 3088 ):
3084 3089 if self._currentlock(self._lockref) is not None:
3085 3090 self.ui.develwarn(b'"wlock" acquired after "lock"')
3086 3091
3087 3092 def unlock():
3088 3093 if self.dirstate.is_changing_any:
3089 3094 msg = b"wlock release in the middle of a changing parents"
3090 3095 self.ui.develwarn(msg)
3091 3096 self.dirstate.invalidate()
3092 3097 else:
3093 3098 if self.dirstate._dirty:
3094 3099 msg = b"dirty dirstate on wlock release"
3095 3100 self.ui.develwarn(msg)
3096 3101 self.dirstate.write(None)
3097 3102
3098 3103 unfi = self.unfiltered()
3099 3104 if 'dirstate' in unfi.__dict__:
3100 3105 del unfi.__dict__['dirstate']
3101 3106
3102 3107 l = self._lock(
3103 3108 self.vfs,
3104 3109 b"wlock",
3105 3110 wait,
3106 3111 unlock,
3107 3112 self.invalidatedirstate,
3108 3113 _(b'working directory of %s') % self.origroot,
3109 3114 )
3110 3115 self._wlockref = weakref.ref(l)
3111 3116 return l
3112 3117
3113 3118 def _currentlock(self, lockref):
3114 3119 """Returns the lock if it's held, or None if it's not."""
3115 3120 if lockref is None:
3116 3121 return None
3117 3122 l = lockref()
3118 3123 if l is None or not l.held:
3119 3124 return None
3120 3125 return l
3121 3126
3122 3127 def currentwlock(self):
3123 3128 """Returns the wlock if it's held, or None if it's not."""
3124 3129 return self._currentlock(self._wlockref)
3125 3130
3126 3131 def checkcommitpatterns(self, wctx, match, status, fail):
3127 3132 """check for commit arguments that aren't committable"""
3128 3133 if match.isexact() or match.prefix():
3129 3134 matched = set(status.modified + status.added + status.removed)
3130 3135
3131 3136 for f in match.files():
3132 3137 f = self.dirstate.normalize(f)
3133 3138 if f == b'.' or f in matched or f in wctx.substate:
3134 3139 continue
3135 3140 if f in status.deleted:
3136 3141 fail(f, _(b'file not found!'))
3137 3142 # Is it a directory that exists or used to exist?
3138 3143 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3139 3144 d = f + b'/'
3140 3145 for mf in matched:
3141 3146 if mf.startswith(d):
3142 3147 break
3143 3148 else:
3144 3149 fail(f, _(b"no match under directory!"))
3145 3150 elif f not in self.dirstate:
3146 3151 fail(f, _(b"file not tracked!"))
3147 3152
3148 3153 @unfilteredmethod
3149 3154 def commit(
3150 3155 self,
3151 3156 text=b"",
3152 3157 user=None,
3153 3158 date=None,
3154 3159 match=None,
3155 3160 force=False,
3156 3161 editor=None,
3157 3162 extra=None,
3158 3163 ):
3159 3164 """Add a new revision to current repository.
3160 3165
3161 3166 Revision information is gathered from the working directory,
3162 3167 match can be used to filter the committed files. If editor is
3163 3168 supplied, it is called to get a commit message.
3164 3169 """
3165 3170 if extra is None:
3166 3171 extra = {}
3167 3172
3168 3173 def fail(f, msg):
3169 3174 raise error.InputError(b'%s: %s' % (f, msg))
3170 3175
3171 3176 if not match:
3172 3177 match = matchmod.always()
3173 3178
3174 3179 if not force:
3175 3180 match.bad = fail
3176 3181
3177 3182 # lock() for recent changelog (see issue4368)
3178 3183 with self.wlock(), self.lock():
3179 3184 wctx = self[None]
3180 3185 merge = len(wctx.parents()) > 1
3181 3186
3182 3187 if not force and merge and not match.always():
3183 3188 raise error.Abort(
3184 3189 _(
3185 3190 b'cannot partially commit a merge '
3186 3191 b'(do not specify files or patterns)'
3187 3192 )
3188 3193 )
3189 3194
3190 3195 status = self.status(match=match, clean=force)
3191 3196 if force:
3192 3197 status.modified.extend(
3193 3198 status.clean
3194 3199 ) # mq may commit clean files
3195 3200
3196 3201 # check subrepos
3197 3202 subs, commitsubs, newstate = subrepoutil.precommit(
3198 3203 self.ui, wctx, status, match, force=force
3199 3204 )
3200 3205
3201 3206 # make sure all explicit patterns are matched
3202 3207 if not force:
3203 3208 self.checkcommitpatterns(wctx, match, status, fail)
3204 3209
3205 3210 cctx = context.workingcommitctx(
3206 3211 self, status, text, user, date, extra
3207 3212 )
3208 3213
3209 3214 ms = mergestatemod.mergestate.read(self)
3210 3215 mergeutil.checkunresolved(ms)
3211 3216
3212 3217 # internal config: ui.allowemptycommit
3213 3218 if cctx.isempty() and not self.ui.configbool(
3214 3219 b'ui', b'allowemptycommit'
3215 3220 ):
3216 3221 self.ui.debug(b'nothing to commit, clearing merge state\n')
3217 3222 ms.reset()
3218 3223 return None
3219 3224
3220 3225 if merge and cctx.deleted():
3221 3226 raise error.Abort(_(b"cannot commit merge with missing files"))
3222 3227
3223 3228 if editor:
3224 3229 cctx._text = editor(self, cctx, subs)
3225 3230 edited = text != cctx._text
3226 3231
3227 3232 # Save commit message in case this transaction gets rolled back
3228 3233 # (e.g. by a pretxncommit hook). Leave the content alone on
3229 3234 # the assumption that the user will use the same editor again.
3230 3235 msg_path = self.savecommitmessage(cctx._text)
3231 3236
3232 3237 # commit subs and write new state
3233 3238 if subs:
3234 3239 uipathfn = scmutil.getuipathfn(self)
3235 3240 for s in sorted(commitsubs):
3236 3241 sub = wctx.sub(s)
3237 3242 self.ui.status(
3238 3243 _(b'committing subrepository %s\n')
3239 3244 % uipathfn(subrepoutil.subrelpath(sub))
3240 3245 )
3241 3246 sr = sub.commit(cctx._text, user, date)
3242 3247 newstate[s] = (newstate[s][0], sr)
3243 3248 subrepoutil.writestate(self, newstate)
3244 3249
3245 3250 p1, p2 = self.dirstate.parents()
3246 3251 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3247 3252 try:
3248 3253 self.hook(
3249 3254 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3250 3255 )
3251 3256 with self.transaction(b'commit'):
3252 3257 ret = self.commitctx(cctx, True)
3253 3258 # update bookmarks, dirstate and mergestate
3254 3259 bookmarks.update(self, [p1, p2], ret)
3255 3260 cctx.markcommitted(ret)
3256 3261 ms.reset()
3257 3262 except: # re-raises
3258 3263 if edited:
3259 3264 self.ui.write(
3260 3265 _(b'note: commit message saved in %s\n') % msg_path
3261 3266 )
3262 3267 self.ui.write(
3263 3268 _(
3264 3269 b"note: use 'hg commit --logfile "
3265 3270 b"%s --edit' to reuse it\n"
3266 3271 )
3267 3272 % msg_path
3268 3273 )
3269 3274 raise
3270 3275
3271 3276 def commithook(unused_success):
3272 3277 # hack for command that use a temporary commit (eg: histedit)
3273 3278 # temporary commit got stripped before hook release
3274 3279 if self.changelog.hasnode(ret):
3275 3280 self.hook(
3276 3281 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3277 3282 )
3278 3283
3279 3284 self._afterlock(commithook)
3280 3285 return ret
3281 3286
3282 3287 @unfilteredmethod
3283 3288 def commitctx(self, ctx, error=False, origctx=None):
3284 3289 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3285 3290
3286 3291 @unfilteredmethod
3287 3292 def destroying(self):
3288 3293 """Inform the repository that nodes are about to be destroyed.
3289 3294 Intended for use by strip and rollback, so there's a common
3290 3295 place for anything that has to be done before destroying history.
3291 3296
3292 3297 This is mostly useful for saving state that is in memory and waiting
3293 3298 to be flushed when the current lock is released. Because a call to
3294 3299 destroyed is imminent, the repo will be invalidated causing those
3295 3300 changes to stay in memory (waiting for the next unlock), or vanish
3296 3301 completely.
3297 3302 """
3298 3303 # When using the same lock to commit and strip, the phasecache is left
3299 3304 # dirty after committing. Then when we strip, the repo is invalidated,
3300 3305 # causing those changes to disappear.
3301 3306 if '_phasecache' in vars(self):
3302 3307 self._phasecache.write()
3303 3308
3304 3309 @unfilteredmethod
3305 3310 def destroyed(self):
3306 3311 """Inform the repository that nodes have been destroyed.
3307 3312 Intended for use by strip and rollback, so there's a common
3308 3313 place for anything that has to be done after destroying history.
3309 3314 """
3310 3315 # When one tries to:
3311 3316 # 1) destroy nodes thus calling this method (e.g. strip)
3312 3317 # 2) use phasecache somewhere (e.g. commit)
3313 3318 #
3314 3319 # then 2) will fail because the phasecache contains nodes that were
3315 3320 # removed. We can either remove phasecache from the filecache,
3316 3321 # causing it to reload next time it is accessed, or simply filter
3317 3322 # the removed nodes now and write the updated cache.
3318 3323 self._phasecache.filterunknown(self)
3319 3324 self._phasecache.write()
3320 3325
3321 3326 # refresh all repository caches
3322 3327 self.updatecaches()
3323 3328
3324 3329 # Ensure the persistent tag cache is updated. Doing it now
3325 3330 # means that the tag cache only has to worry about destroyed
3326 3331 # heads immediately after a strip/rollback. That in turn
3327 3332 # guarantees that "cachetip == currenttip" (comparing both rev
3328 3333 # and node) always means no nodes have been added or destroyed.
3329 3334
3330 3335 # XXX this is suboptimal when qrefresh'ing: we strip the current
3331 3336 # head, refresh the tag cache, then immediately add a new head.
3332 3337 # But I think doing it this way is necessary for the "instant
3333 3338 # tag cache retrieval" case to work.
3334 3339 self.invalidate()
3335 3340
3336 3341 def status(
3337 3342 self,
3338 3343 node1=b'.',
3339 3344 node2=None,
3340 3345 match=None,
3341 3346 ignored=False,
3342 3347 clean=False,
3343 3348 unknown=False,
3344 3349 listsubrepos=False,
3345 3350 ):
3346 3351 '''a convenience method that calls node1.status(node2)'''
3347 3352 return self[node1].status(
3348 3353 node2, match, ignored, clean, unknown, listsubrepos
3349 3354 )
3350 3355
3351 3356 def addpostdsstatus(self, ps):
3352 3357 """Add a callback to run within the wlock, at the point at which status
3353 3358 fixups happen.
3354 3359
3355 3360 On status completion, callback(wctx, status) will be called with the
3356 3361 wlock held, unless the dirstate has changed from underneath or the wlock
3357 3362 couldn't be grabbed.
3358 3363
3359 3364 Callbacks should not capture and use a cached copy of the dirstate --
3360 3365 it might change in the meanwhile. Instead, they should access the
3361 3366 dirstate via wctx.repo().dirstate.
3362 3367
3363 3368 This list is emptied out after each status run -- extensions should
3364 3369 make sure it adds to this list each time dirstate.status is called.
3365 3370 Extensions should also make sure they don't call this for statuses
3366 3371 that don't involve the dirstate.
3367 3372 """
3368 3373
3369 3374 # The list is located here for uniqueness reasons -- it is actually
3370 3375 # managed by the workingctx, but that isn't unique per-repo.
3371 3376 self._postdsstatus.append(ps)
3372 3377
3373 3378 def postdsstatus(self):
3374 3379 """Used by workingctx to get the list of post-dirstate-status hooks."""
3375 3380 return self._postdsstatus
3376 3381
3377 3382 def clearpostdsstatus(self):
3378 3383 """Used by workingctx to clear post-dirstate-status hooks."""
3379 3384 del self._postdsstatus[:]
3380 3385
3381 3386 def heads(self, start=None):
3382 3387 if start is None:
3383 3388 cl = self.changelog
3384 3389 headrevs = reversed(cl.headrevs())
3385 3390 return [cl.node(rev) for rev in headrevs]
3386 3391
3387 3392 heads = self.changelog.heads(start)
3388 3393 # sort the output in rev descending order
3389 3394 return sorted(heads, key=self.changelog.rev, reverse=True)
3390 3395
3391 3396 def branchheads(self, branch=None, start=None, closed=False):
3392 3397 """return a (possibly filtered) list of heads for the given branch
3393 3398
3394 3399 Heads are returned in topological order, from newest to oldest.
3395 3400 If branch is None, use the dirstate branch.
3396 3401 If start is not None, return only heads reachable from start.
3397 3402 If closed is True, return heads that are marked as closed as well.
3398 3403 """
3399 3404 if branch is None:
3400 3405 branch = self[None].branch()
3401 3406 branches = self.branchmap()
3402 3407 if not branches.hasbranch(branch):
3403 3408 return []
3404 3409 # the cache returns heads ordered lowest to highest
3405 3410 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3406 3411 if start is not None:
3407 3412 # filter out the heads that cannot be reached from startrev
3408 3413 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3409 3414 bheads = [h for h in bheads if h in fbheads]
3410 3415 return bheads
3411 3416
3412 3417 def branches(self, nodes):
3413 3418 if not nodes:
3414 3419 nodes = [self.changelog.tip()]
3415 3420 b = []
3416 3421 for n in nodes:
3417 3422 t = n
3418 3423 while True:
3419 3424 p = self.changelog.parents(n)
3420 3425 if p[1] != self.nullid or p[0] == self.nullid:
3421 3426 b.append((t, n, p[0], p[1]))
3422 3427 break
3423 3428 n = p[0]
3424 3429 return b
3425 3430
3426 3431 def between(self, pairs):
3427 3432 r = []
3428 3433
3429 3434 for top, bottom in pairs:
3430 3435 n, l, i = top, [], 0
3431 3436 f = 1
3432 3437
3433 3438 while n != bottom and n != self.nullid:
3434 3439 p = self.changelog.parents(n)[0]
3435 3440 if i == f:
3436 3441 l.append(n)
3437 3442 f = f * 2
3438 3443 n = p
3439 3444 i += 1
3440 3445
3441 3446 r.append(l)
3442 3447
3443 3448 return r
3444 3449
3445 3450 def checkpush(self, pushop):
3446 3451 """Extensions can override this function if additional checks have
3447 3452 to be performed before pushing, or call it if they override push
3448 3453 command.
3449 3454 """
3450 3455
3451 3456 @unfilteredpropertycache
3452 3457 def prepushoutgoinghooks(self):
3453 3458 """Return util.hooks consists of a pushop with repo, remote, outgoing
3454 3459 methods, which are called before pushing changesets.
3455 3460 """
3456 3461 return util.hooks()
3457 3462
3458 3463 def pushkey(self, namespace, key, old, new):
3459 3464 try:
3460 3465 tr = self.currenttransaction()
3461 3466 hookargs = {}
3462 3467 if tr is not None:
3463 3468 hookargs.update(tr.hookargs)
3464 3469 hookargs = pycompat.strkwargs(hookargs)
3465 3470 hookargs['namespace'] = namespace
3466 3471 hookargs['key'] = key
3467 3472 hookargs['old'] = old
3468 3473 hookargs['new'] = new
3469 3474 self.hook(b'prepushkey', throw=True, **hookargs)
3470 3475 except error.HookAbort as exc:
3471 3476 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3472 3477 if exc.hint:
3473 3478 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3474 3479 return False
3475 3480 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3476 3481 ret = pushkey.push(self, namespace, key, old, new)
3477 3482
3478 3483 def runhook(unused_success):
3479 3484 self.hook(
3480 3485 b'pushkey',
3481 3486 namespace=namespace,
3482 3487 key=key,
3483 3488 old=old,
3484 3489 new=new,
3485 3490 ret=ret,
3486 3491 )
3487 3492
3488 3493 self._afterlock(runhook)
3489 3494 return ret
3490 3495
3491 3496 def listkeys(self, namespace):
3492 3497 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3493 3498 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3494 3499 values = pushkey.list(self, namespace)
3495 3500 self.hook(b'listkeys', namespace=namespace, values=values)
3496 3501 return values
3497 3502
3498 3503 def debugwireargs(self, one, two, three=None, four=None, five=None):
3499 3504 '''used to test argument passing over the wire'''
3500 3505 return b"%s %s %s %s %s" % (
3501 3506 one,
3502 3507 two,
3503 3508 pycompat.bytestr(three),
3504 3509 pycompat.bytestr(four),
3505 3510 pycompat.bytestr(five),
3506 3511 )
3507 3512
3508 3513 def savecommitmessage(self, text):
3509 3514 fp = self.vfs(b'last-message.txt', b'wb')
3510 3515 try:
3511 3516 fp.write(text)
3512 3517 finally:
3513 3518 fp.close()
3514 3519 return self.pathto(fp.name[len(self.root) + 1 :])
3515 3520
3516 3521 def register_wanted_sidedata(self, category):
3517 3522 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3518 3523 # Only revlogv2 repos can want sidedata.
3519 3524 return
3520 3525 self._wanted_sidedata.add(pycompat.bytestr(category))
3521 3526
3522 3527 def register_sidedata_computer(
3523 3528 self, kind, category, keys, computer, flags, replace=False
3524 3529 ):
3525 3530 if kind not in revlogconst.ALL_KINDS:
3526 3531 msg = _(b"unexpected revlog kind '%s'.")
3527 3532 raise error.ProgrammingError(msg % kind)
3528 3533 category = pycompat.bytestr(category)
3529 3534 already_registered = category in self._sidedata_computers.get(kind, [])
3530 3535 if already_registered and not replace:
3531 3536 msg = _(
3532 3537 b"cannot register a sidedata computer twice for category '%s'."
3533 3538 )
3534 3539 raise error.ProgrammingError(msg % category)
3535 3540 if replace and not already_registered:
3536 3541 msg = _(
3537 3542 b"cannot replace a sidedata computer that isn't registered "
3538 3543 b"for category '%s'."
3539 3544 )
3540 3545 raise error.ProgrammingError(msg % category)
3541 3546 self._sidedata_computers.setdefault(kind, {})
3542 3547 self._sidedata_computers[kind][category] = (keys, computer, flags)
3543 3548
3544 3549
3545 3550 # used to avoid circular references so destructors work
3546 3551 def aftertrans(files):
3547 3552 renamefiles = [tuple(t) for t in files]
3548 3553
3549 3554 def a():
3550 3555 for vfs, src, dest in renamefiles:
3551 3556 # if src and dest refer to a same file, vfs.rename is a no-op,
3552 3557 # leaving both src and dest on disk. delete dest to make sure
3553 3558 # the rename couldn't be such a no-op.
3554 3559 vfs.tryunlink(dest)
3555 3560 try:
3556 3561 vfs.rename(src, dest)
3557 3562 except FileNotFoundError: # journal file does not yet exist
3558 3563 pass
3559 3564
3560 3565 return a
3561 3566
3562 3567
3563 3568 def undoname(fn: bytes) -> bytes:
3564 3569 base, name = os.path.split(fn)
3565 3570 assert name.startswith(b'journal')
3566 3571 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3567 3572
3568 3573
3569 3574 def instance(ui, path: bytes, create, intents=None, createopts=None):
3570 3575 # prevent cyclic import localrepo -> upgrade -> localrepo
3571 3576 from . import upgrade
3572 3577
3573 3578 localpath = urlutil.urllocalpath(path)
3574 3579 if create:
3575 3580 createrepository(ui, localpath, createopts=createopts)
3576 3581
3577 3582 def repo_maker():
3578 3583 return makelocalrepository(ui, localpath, intents=intents)
3579 3584
3580 3585 repo = repo_maker()
3581 3586 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3582 3587 return repo
3583 3588
3584 3589
3585 3590 def islocal(path: bytes) -> bool:
3586 3591 return True
3587 3592
3588 3593
3589 3594 def defaultcreateopts(ui, createopts=None):
3590 3595 """Populate the default creation options for a repository.
3591 3596
3592 3597 A dictionary of explicitly requested creation options can be passed
3593 3598 in. Missing keys will be populated.
3594 3599 """
3595 3600 createopts = dict(createopts or {})
3596 3601
3597 3602 if b'backend' not in createopts:
3598 3603 # experimental config: storage.new-repo-backend
3599 3604 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3600 3605
3601 3606 return createopts
3602 3607
3603 3608
3604 3609 def clone_requirements(ui, createopts, srcrepo):
3605 3610 """clone the requirements of a local repo for a local clone
3606 3611
3607 3612 The store requirements are unchanged while the working copy requirements
3608 3613 depends on the configuration
3609 3614 """
3610 3615 target_requirements = set()
3611 3616 if not srcrepo.requirements:
3612 3617 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3613 3618 # with it.
3614 3619 return target_requirements
3615 3620 createopts = defaultcreateopts(ui, createopts=createopts)
3616 3621 for r in newreporequirements(ui, createopts):
3617 3622 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3618 3623 target_requirements.add(r)
3619 3624
3620 3625 for r in srcrepo.requirements:
3621 3626 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3622 3627 target_requirements.add(r)
3623 3628 return target_requirements
3624 3629
3625 3630
3626 3631 def newreporequirements(ui, createopts):
3627 3632 """Determine the set of requirements for a new local repository.
3628 3633
3629 3634 Extensions can wrap this function to specify custom requirements for
3630 3635 new repositories.
3631 3636 """
3632 3637
3633 3638 if b'backend' not in createopts:
3634 3639 raise error.ProgrammingError(
3635 3640 b'backend key not present in createopts; '
3636 3641 b'was defaultcreateopts() called?'
3637 3642 )
3638 3643
3639 3644 if createopts[b'backend'] != b'revlogv1':
3640 3645 raise error.Abort(
3641 3646 _(
3642 3647 b'unable to determine repository requirements for '
3643 3648 b'storage backend: %s'
3644 3649 )
3645 3650 % createopts[b'backend']
3646 3651 )
3647 3652
3648 3653 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3649 3654 if ui.configbool(b'format', b'usestore'):
3650 3655 requirements.add(requirementsmod.STORE_REQUIREMENT)
3651 3656 if ui.configbool(b'format', b'usefncache'):
3652 3657 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3653 3658 if ui.configbool(b'format', b'dotencode'):
3654 3659 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3655 3660
3656 3661 compengines = ui.configlist(b'format', b'revlog-compression')
3657 3662 for compengine in compengines:
3658 3663 if compengine in util.compengines:
3659 3664 engine = util.compengines[compengine]
3660 3665 if engine.available() and engine.revlogheader():
3661 3666 break
3662 3667 else:
3663 3668 raise error.Abort(
3664 3669 _(
3665 3670 b'compression engines %s defined by '
3666 3671 b'format.revlog-compression not available'
3667 3672 )
3668 3673 % b', '.join(b'"%s"' % e for e in compengines),
3669 3674 hint=_(
3670 3675 b'run "hg debuginstall" to list available '
3671 3676 b'compression engines'
3672 3677 ),
3673 3678 )
3674 3679
3675 3680 # zlib is the historical default and doesn't need an explicit requirement.
3676 3681 if compengine == b'zstd':
3677 3682 requirements.add(b'revlog-compression-zstd')
3678 3683 elif compengine != b'zlib':
3679 3684 requirements.add(b'exp-compression-%s' % compengine)
3680 3685
3681 3686 if scmutil.gdinitconfig(ui):
3682 3687 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3683 3688 if ui.configbool(b'format', b'sparse-revlog'):
3684 3689 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3685 3690
3686 3691 # experimental config: format.use-dirstate-v2
3687 3692 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3688 3693 if ui.configbool(b'format', b'use-dirstate-v2'):
3689 3694 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3690 3695
3691 3696 # experimental config: format.exp-use-copies-side-data-changeset
3692 3697 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3693 3698 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3694 3699 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3695 3700 if ui.configbool(b'experimental', b'treemanifest'):
3696 3701 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3697 3702
3698 3703 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3699 3704 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3700 3705 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3701 3706
3702 3707 revlogv2 = ui.config(b'experimental', b'revlogv2')
3703 3708 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3704 3709 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3705 3710 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3706 3711 # experimental config: format.internal-phase
3707 3712 if ui.configbool(b'format', b'use-internal-phase'):
3708 3713 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3709 3714
3710 3715 # experimental config: format.exp-archived-phase
3711 3716 if ui.configbool(b'format', b'exp-archived-phase'):
3712 3717 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3713 3718
3714 3719 if createopts.get(b'narrowfiles'):
3715 3720 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3716 3721
3717 3722 if createopts.get(b'lfs'):
3718 3723 requirements.add(b'lfs')
3719 3724
3720 3725 if ui.configbool(b'format', b'bookmarks-in-store'):
3721 3726 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3722 3727
3723 3728 if ui.configbool(b'format', b'use-persistent-nodemap'):
3724 3729 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3725 3730
3726 3731 # if share-safe is enabled, let's create the new repository with the new
3727 3732 # requirement
3728 3733 if ui.configbool(b'format', b'use-share-safe'):
3729 3734 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3730 3735
3731 3736 # if we are creating a share-repoΒΉ we have to handle requirement
3732 3737 # differently.
3733 3738 #
3734 3739 # [1] (i.e. reusing the store from another repository, just having a
3735 3740 # working copy)
3736 3741 if b'sharedrepo' in createopts:
3737 3742 source_requirements = set(createopts[b'sharedrepo'].requirements)
3738 3743
3739 3744 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3740 3745 # share to an old school repository, we have to copy the
3741 3746 # requirements and hope for the best.
3742 3747 requirements = source_requirements
3743 3748 else:
3744 3749 # We have control on the working copy only, so "copy" the non
3745 3750 # working copy part over, ignoring previous logic.
3746 3751 to_drop = set()
3747 3752 for req in requirements:
3748 3753 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3749 3754 continue
3750 3755 if req in source_requirements:
3751 3756 continue
3752 3757 to_drop.add(req)
3753 3758 requirements -= to_drop
3754 3759 requirements |= source_requirements
3755 3760
3756 3761 if createopts.get(b'sharedrelative'):
3757 3762 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3758 3763 else:
3759 3764 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3760 3765
3761 3766 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3762 3767 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3763 3768 msg = _(b"ignoring unknown tracked key version: %d\n")
3764 3769 hint = _(
3765 3770 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3766 3771 )
3767 3772 if version != 1:
3768 3773 ui.warn(msg % version, hint=hint)
3769 3774 else:
3770 3775 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3771 3776
3772 3777 return requirements
3773 3778
3774 3779
3775 3780 def checkrequirementscompat(ui, requirements):
3776 3781 """Checks compatibility of repository requirements enabled and disabled.
3777 3782
3778 3783 Returns a set of requirements which needs to be dropped because dependend
3779 3784 requirements are not enabled. Also warns users about it"""
3780 3785
3781 3786 dropped = set()
3782 3787
3783 3788 if requirementsmod.STORE_REQUIREMENT not in requirements:
3784 3789 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3785 3790 ui.warn(
3786 3791 _(
3787 3792 b'ignoring enabled \'format.bookmarks-in-store\' config '
3788 3793 b'beacuse it is incompatible with disabled '
3789 3794 b'\'format.usestore\' config\n'
3790 3795 )
3791 3796 )
3792 3797 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3793 3798
3794 3799 if (
3795 3800 requirementsmod.SHARED_REQUIREMENT in requirements
3796 3801 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3797 3802 ):
3798 3803 raise error.Abort(
3799 3804 _(
3800 3805 b"cannot create shared repository as source was created"
3801 3806 b" with 'format.usestore' config disabled"
3802 3807 )
3803 3808 )
3804 3809
3805 3810 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3806 3811 if ui.hasconfig(b'format', b'use-share-safe'):
3807 3812 msg = _(
3808 3813 b"ignoring enabled 'format.use-share-safe' config because "
3809 3814 b"it is incompatible with disabled 'format.usestore'"
3810 3815 b" config\n"
3811 3816 )
3812 3817 ui.warn(msg)
3813 3818 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3814 3819
3815 3820 return dropped
3816 3821
3817 3822
3818 3823 def filterknowncreateopts(ui, createopts):
3819 3824 """Filters a dict of repo creation options against options that are known.
3820 3825
3821 3826 Receives a dict of repo creation options and returns a dict of those
3822 3827 options that we don't know how to handle.
3823 3828
3824 3829 This function is called as part of repository creation. If the
3825 3830 returned dict contains any items, repository creation will not
3826 3831 be allowed, as it means there was a request to create a repository
3827 3832 with options not recognized by loaded code.
3828 3833
3829 3834 Extensions can wrap this function to filter out creation options
3830 3835 they know how to handle.
3831 3836 """
3832 3837 known = {
3833 3838 b'backend',
3834 3839 b'lfs',
3835 3840 b'narrowfiles',
3836 3841 b'sharedrepo',
3837 3842 b'sharedrelative',
3838 3843 b'shareditems',
3839 3844 b'shallowfilestore',
3840 3845 }
3841 3846
3842 3847 return {k: v for k, v in createopts.items() if k not in known}
3843 3848
3844 3849
3845 3850 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3846 3851 """Create a new repository in a vfs.
3847 3852
3848 3853 ``path`` path to the new repo's working directory.
3849 3854 ``createopts`` options for the new repository.
3850 3855 ``requirement`` predefined set of requirements.
3851 3856 (incompatible with ``createopts``)
3852 3857
3853 3858 The following keys for ``createopts`` are recognized:
3854 3859
3855 3860 backend
3856 3861 The storage backend to use.
3857 3862 lfs
3858 3863 Repository will be created with ``lfs`` requirement. The lfs extension
3859 3864 will automatically be loaded when the repository is accessed.
3860 3865 narrowfiles
3861 3866 Set up repository to support narrow file storage.
3862 3867 sharedrepo
3863 3868 Repository object from which storage should be shared.
3864 3869 sharedrelative
3865 3870 Boolean indicating if the path to the shared repo should be
3866 3871 stored as relative. By default, the pointer to the "parent" repo
3867 3872 is stored as an absolute path.
3868 3873 shareditems
3869 3874 Set of items to share to the new repository (in addition to storage).
3870 3875 shallowfilestore
3871 3876 Indicates that storage for files should be shallow (not all ancestor
3872 3877 revisions are known).
3873 3878 """
3874 3879
3875 3880 if requirements is not None:
3876 3881 if createopts is not None:
3877 3882 msg = b'cannot specify both createopts and requirements'
3878 3883 raise error.ProgrammingError(msg)
3879 3884 createopts = {}
3880 3885 else:
3881 3886 createopts = defaultcreateopts(ui, createopts=createopts)
3882 3887
3883 3888 unknownopts = filterknowncreateopts(ui, createopts)
3884 3889
3885 3890 if not isinstance(unknownopts, dict):
3886 3891 raise error.ProgrammingError(
3887 3892 b'filterknowncreateopts() did not return a dict'
3888 3893 )
3889 3894
3890 3895 if unknownopts:
3891 3896 raise error.Abort(
3892 3897 _(
3893 3898 b'unable to create repository because of unknown '
3894 3899 b'creation option: %s'
3895 3900 )
3896 3901 % b', '.join(sorted(unknownopts)),
3897 3902 hint=_(b'is a required extension not loaded?'),
3898 3903 )
3899 3904
3900 3905 requirements = newreporequirements(ui, createopts=createopts)
3901 3906 requirements -= checkrequirementscompat(ui, requirements)
3902 3907
3903 3908 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3904 3909
3905 3910 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3906 3911 if hgvfs.exists():
3907 3912 raise error.RepoError(_(b'repository %s already exists') % path)
3908 3913
3909 3914 if b'sharedrepo' in createopts:
3910 3915 sharedpath = createopts[b'sharedrepo'].sharedpath
3911 3916
3912 3917 if createopts.get(b'sharedrelative'):
3913 3918 try:
3914 3919 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3915 3920 sharedpath = util.pconvert(sharedpath)
3916 3921 except (IOError, ValueError) as e:
3917 3922 # ValueError is raised on Windows if the drive letters differ
3918 3923 # on each path.
3919 3924 raise error.Abort(
3920 3925 _(b'cannot calculate relative path'),
3921 3926 hint=stringutil.forcebytestr(e),
3922 3927 )
3923 3928
3924 3929 if not wdirvfs.exists():
3925 3930 wdirvfs.makedirs()
3926 3931
3927 3932 hgvfs.makedir(notindexed=True)
3928 3933 if b'sharedrepo' not in createopts:
3929 3934 hgvfs.mkdir(b'cache')
3930 3935 hgvfs.mkdir(b'wcache')
3931 3936
3932 3937 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3933 3938 if has_store and b'sharedrepo' not in createopts:
3934 3939 hgvfs.mkdir(b'store')
3935 3940
3936 3941 # We create an invalid changelog outside the store so very old
3937 3942 # Mercurial versions (which didn't know about the requirements
3938 3943 # file) encounter an error on reading the changelog. This
3939 3944 # effectively locks out old clients and prevents them from
3940 3945 # mucking with a repo in an unknown format.
3941 3946 #
3942 3947 # The revlog header has version 65535, which won't be recognized by
3943 3948 # such old clients.
3944 3949 hgvfs.append(
3945 3950 b'00changelog.i',
3946 3951 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3947 3952 b'layout',
3948 3953 )
3949 3954
3950 3955 # Filter the requirements into working copy and store ones
3951 3956 wcreq, storereq = scmutil.filterrequirements(requirements)
3952 3957 # write working copy ones
3953 3958 scmutil.writerequires(hgvfs, wcreq)
3954 3959 # If there are store requirements and the current repository
3955 3960 # is not a shared one, write stored requirements
3956 3961 # For new shared repository, we don't need to write the store
3957 3962 # requirements as they are already present in store requires
3958 3963 if storereq and b'sharedrepo' not in createopts:
3959 3964 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3960 3965 scmutil.writerequires(storevfs, storereq)
3961 3966
3962 3967 # Write out file telling readers where to find the shared store.
3963 3968 if b'sharedrepo' in createopts:
3964 3969 hgvfs.write(b'sharedpath', sharedpath)
3965 3970
3966 3971 if createopts.get(b'shareditems'):
3967 3972 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3968 3973 hgvfs.write(b'shared', shared)
3969 3974
3970 3975
3971 3976 def poisonrepository(repo):
3972 3977 """Poison a repository instance so it can no longer be used."""
3973 3978 # Perform any cleanup on the instance.
3974 3979 repo.close()
3975 3980
3976 3981 # Our strategy is to replace the type of the object with one that
3977 3982 # has all attribute lookups result in error.
3978 3983 #
3979 3984 # But we have to allow the close() method because some constructors
3980 3985 # of repos call close() on repo references.
3981 3986 class poisonedrepository:
3982 3987 def __getattribute__(self, item):
3983 3988 if item == 'close':
3984 3989 return object.__getattribute__(self, item)
3985 3990
3986 3991 raise error.ProgrammingError(
3987 3992 b'repo instances should not be used after unshare'
3988 3993 )
3989 3994
3990 3995 def close(self):
3991 3996 pass
3992 3997
3993 3998 # We may have a repoview, which intercepts __setattr__. So be sure
3994 3999 # we operate at the lowest level possible.
3995 4000 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now