##// END OF EJS Templates
repository: introduce constant for sparse repo requirement and use it...
Pulkit Goyal -
r45914:a1f51c7d default
parent child Browse files
Show More
@@ -1,1975 +1,1978
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 #
3 3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from ..i18n import _
11 11 from .. import error
12 12 from . import util as interfaceutil
13 13
14 14 # When narrowing is finalized and no longer subject to format changes,
15 15 # we should move this to just "narrow" or similar.
16 16 NARROW_REQUIREMENT = b'narrowhg-experimental'
17 17
18 # Enables sparse working directory usage
19 SPARSE_REQUIREMENT = b'exp-sparse'
20
18 21 # Local repository feature string.
19 22
20 23 # Revlogs are being used for file storage.
21 24 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
22 25 # The storage part of the repository is shared from an external source.
23 26 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
24 27 # LFS supported for backing file storage.
25 28 REPO_FEATURE_LFS = b'lfs'
26 29 # Repository supports being stream cloned.
27 30 REPO_FEATURE_STREAM_CLONE = b'streamclone'
28 31 # Files storage may lack data for all ancestors.
29 32 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
30 33
31 34 REVISION_FLAG_CENSORED = 1 << 15
32 35 REVISION_FLAG_ELLIPSIS = 1 << 14
33 36 REVISION_FLAG_EXTSTORED = 1 << 13
34 37 REVISION_FLAG_SIDEDATA = 1 << 12
35 38
36 39 REVISION_FLAGS_KNOWN = (
37 40 REVISION_FLAG_CENSORED
38 41 | REVISION_FLAG_ELLIPSIS
39 42 | REVISION_FLAG_EXTSTORED
40 43 | REVISION_FLAG_SIDEDATA
41 44 )
42 45
43 46 CG_DELTAMODE_STD = b'default'
44 47 CG_DELTAMODE_PREV = b'previous'
45 48 CG_DELTAMODE_FULL = b'fulltext'
46 49 CG_DELTAMODE_P1 = b'p1'
47 50
48 51
49 52 class ipeerconnection(interfaceutil.Interface):
50 53 """Represents a "connection" to a repository.
51 54
52 55 This is the base interface for representing a connection to a repository.
53 56 It holds basic properties and methods applicable to all peer types.
54 57
55 58 This is not a complete interface definition and should not be used
56 59 outside of this module.
57 60 """
58 61
59 62 ui = interfaceutil.Attribute("""ui.ui instance""")
60 63
61 64 def url():
62 65 """Returns a URL string representing this peer.
63 66
64 67 Currently, implementations expose the raw URL used to construct the
65 68 instance. It may contain credentials as part of the URL. The
66 69 expectations of the value aren't well-defined and this could lead to
67 70 data leakage.
68 71
69 72 TODO audit/clean consumers and more clearly define the contents of this
70 73 value.
71 74 """
72 75
73 76 def local():
74 77 """Returns a local repository instance.
75 78
76 79 If the peer represents a local repository, returns an object that
77 80 can be used to interface with it. Otherwise returns ``None``.
78 81 """
79 82
80 83 def peer():
81 84 """Returns an object conforming to this interface.
82 85
83 86 Most implementations will ``return self``.
84 87 """
85 88
86 89 def canpush():
87 90 """Returns a boolean indicating if this peer can be pushed to."""
88 91
89 92 def close():
90 93 """Close the connection to this peer.
91 94
92 95 This is called when the peer will no longer be used. Resources
93 96 associated with the peer should be cleaned up.
94 97 """
95 98
96 99
97 100 class ipeercapabilities(interfaceutil.Interface):
98 101 """Peer sub-interface related to capabilities."""
99 102
100 103 def capable(name):
101 104 """Determine support for a named capability.
102 105
103 106 Returns ``False`` if capability not supported.
104 107
105 108 Returns ``True`` if boolean capability is supported. Returns a string
106 109 if capability support is non-boolean.
107 110
108 111 Capability strings may or may not map to wire protocol capabilities.
109 112 """
110 113
111 114 def requirecap(name, purpose):
112 115 """Require a capability to be present.
113 116
114 117 Raises a ``CapabilityError`` if the capability isn't present.
115 118 """
116 119
117 120
118 121 class ipeercommands(interfaceutil.Interface):
119 122 """Client-side interface for communicating over the wire protocol.
120 123
121 124 This interface is used as a gateway to the Mercurial wire protocol.
122 125 methods commonly call wire protocol commands of the same name.
123 126 """
124 127
125 128 def branchmap():
126 129 """Obtain heads in named branches.
127 130
128 131 Returns a dict mapping branch name to an iterable of nodes that are
129 132 heads on that branch.
130 133 """
131 134
132 135 def capabilities():
133 136 """Obtain capabilities of the peer.
134 137
135 138 Returns a set of string capabilities.
136 139 """
137 140
138 141 def clonebundles():
139 142 """Obtains the clone bundles manifest for the repo.
140 143
141 144 Returns the manifest as unparsed bytes.
142 145 """
143 146
144 147 def debugwireargs(one, two, three=None, four=None, five=None):
145 148 """Used to facilitate debugging of arguments passed over the wire."""
146 149
147 150 def getbundle(source, **kwargs):
148 151 """Obtain remote repository data as a bundle.
149 152
150 153 This command is how the bulk of repository data is transferred from
151 154 the peer to the local repository
152 155
153 156 Returns a generator of bundle data.
154 157 """
155 158
156 159 def heads():
157 160 """Determine all known head revisions in the peer.
158 161
159 162 Returns an iterable of binary nodes.
160 163 """
161 164
162 165 def known(nodes):
163 166 """Determine whether multiple nodes are known.
164 167
165 168 Accepts an iterable of nodes whose presence to check for.
166 169
167 170 Returns an iterable of booleans indicating of the corresponding node
168 171 at that index is known to the peer.
169 172 """
170 173
171 174 def listkeys(namespace):
172 175 """Obtain all keys in a pushkey namespace.
173 176
174 177 Returns an iterable of key names.
175 178 """
176 179
177 180 def lookup(key):
178 181 """Resolve a value to a known revision.
179 182
180 183 Returns a binary node of the resolved revision on success.
181 184 """
182 185
183 186 def pushkey(namespace, key, old, new):
184 187 """Set a value using the ``pushkey`` protocol.
185 188
186 189 Arguments correspond to the pushkey namespace and key to operate on and
187 190 the old and new values for that key.
188 191
189 192 Returns a string with the peer result. The value inside varies by the
190 193 namespace.
191 194 """
192 195
193 196 def stream_out():
194 197 """Obtain streaming clone data.
195 198
196 199 Successful result should be a generator of data chunks.
197 200 """
198 201
199 202 def unbundle(bundle, heads, url):
200 203 """Transfer repository data to the peer.
201 204
202 205 This is how the bulk of data during a push is transferred.
203 206
204 207 Returns the integer number of heads added to the peer.
205 208 """
206 209
207 210
208 211 class ipeerlegacycommands(interfaceutil.Interface):
209 212 """Interface for implementing support for legacy wire protocol commands.
210 213
211 214 Wire protocol commands transition to legacy status when they are no longer
212 215 used by modern clients. To facilitate identifying which commands are
213 216 legacy, the interfaces are split.
214 217 """
215 218
216 219 def between(pairs):
217 220 """Obtain nodes between pairs of nodes.
218 221
219 222 ``pairs`` is an iterable of node pairs.
220 223
221 224 Returns an iterable of iterables of nodes corresponding to each
222 225 requested pair.
223 226 """
224 227
225 228 def branches(nodes):
226 229 """Obtain ancestor changesets of specific nodes back to a branch point.
227 230
228 231 For each requested node, the peer finds the first ancestor node that is
229 232 a DAG root or is a merge.
230 233
231 234 Returns an iterable of iterables with the resolved values for each node.
232 235 """
233 236
234 237 def changegroup(nodes, source):
235 238 """Obtain a changegroup with data for descendants of specified nodes."""
236 239
237 240 def changegroupsubset(bases, heads, source):
238 241 pass
239 242
240 243
241 244 class ipeercommandexecutor(interfaceutil.Interface):
242 245 """Represents a mechanism to execute remote commands.
243 246
244 247 This is the primary interface for requesting that wire protocol commands
245 248 be executed. Instances of this interface are active in a context manager
246 249 and have a well-defined lifetime. When the context manager exits, all
247 250 outstanding requests are waited on.
248 251 """
249 252
250 253 def callcommand(name, args):
251 254 """Request that a named command be executed.
252 255
253 256 Receives the command name and a dictionary of command arguments.
254 257
255 258 Returns a ``concurrent.futures.Future`` that will resolve to the
256 259 result of that command request. That exact value is left up to
257 260 the implementation and possibly varies by command.
258 261
259 262 Not all commands can coexist with other commands in an executor
260 263 instance: it depends on the underlying wire protocol transport being
261 264 used and the command itself.
262 265
263 266 Implementations MAY call ``sendcommands()`` automatically if the
264 267 requested command can not coexist with other commands in this executor.
265 268
266 269 Implementations MAY call ``sendcommands()`` automatically when the
267 270 future's ``result()`` is called. So, consumers using multiple
268 271 commands with an executor MUST ensure that ``result()`` is not called
269 272 until all command requests have been issued.
270 273 """
271 274
272 275 def sendcommands():
273 276 """Trigger submission of queued command requests.
274 277
275 278 Not all transports submit commands as soon as they are requested to
276 279 run. When called, this method forces queued command requests to be
277 280 issued. It will no-op if all commands have already been sent.
278 281
279 282 When called, no more new commands may be issued with this executor.
280 283 """
281 284
282 285 def close():
283 286 """Signal that this command request is finished.
284 287
285 288 When called, no more new commands may be issued. All outstanding
286 289 commands that have previously been issued are waited on before
287 290 returning. This not only includes waiting for the futures to resolve,
288 291 but also waiting for all response data to arrive. In other words,
289 292 calling this waits for all on-wire state for issued command requests
290 293 to finish.
291 294
292 295 When used as a context manager, this method is called when exiting the
293 296 context manager.
294 297
295 298 This method may call ``sendcommands()`` if there are buffered commands.
296 299 """
297 300
298 301
299 302 class ipeerrequests(interfaceutil.Interface):
300 303 """Interface for executing commands on a peer."""
301 304
302 305 limitedarguments = interfaceutil.Attribute(
303 306 """True if the peer cannot receive large argument value for commands."""
304 307 )
305 308
306 309 def commandexecutor():
307 310 """A context manager that resolves to an ipeercommandexecutor.
308 311
309 312 The object this resolves to can be used to issue command requests
310 313 to the peer.
311 314
312 315 Callers should call its ``callcommand`` method to issue command
313 316 requests.
314 317
315 318 A new executor should be obtained for each distinct set of commands
316 319 (possibly just a single command) that the consumer wants to execute
317 320 as part of a single operation or round trip. This is because some
318 321 peers are half-duplex and/or don't support persistent connections.
319 322 e.g. in the case of HTTP peers, commands sent to an executor represent
320 323 a single HTTP request. While some peers may support multiple command
321 324 sends over the wire per executor, consumers need to code to the least
322 325 capable peer. So it should be assumed that command executors buffer
323 326 called commands until they are told to send them and that each
324 327 command executor could result in a new connection or wire-level request
325 328 being issued.
326 329 """
327 330
328 331
329 332 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
330 333 """Unified interface for peer repositories.
331 334
332 335 All peer instances must conform to this interface.
333 336 """
334 337
335 338
336 339 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
337 340 """Unified peer interface for wire protocol version 2 peers."""
338 341
339 342 apidescriptor = interfaceutil.Attribute(
340 343 """Data structure holding description of server API."""
341 344 )
342 345
343 346
344 347 @interfaceutil.implementer(ipeerbase)
345 348 class peer(object):
346 349 """Base class for peer repositories."""
347 350
348 351 limitedarguments = False
349 352
350 353 def capable(self, name):
351 354 caps = self.capabilities()
352 355 if name in caps:
353 356 return True
354 357
355 358 name = b'%s=' % name
356 359 for cap in caps:
357 360 if cap.startswith(name):
358 361 return cap[len(name) :]
359 362
360 363 return False
361 364
362 365 def requirecap(self, name, purpose):
363 366 if self.capable(name):
364 367 return
365 368
366 369 raise error.CapabilityError(
367 370 _(
368 371 b'cannot %s; remote repository does not support the '
369 372 b'\'%s\' capability'
370 373 )
371 374 % (purpose, name)
372 375 )
373 376
374 377
375 378 class iverifyproblem(interfaceutil.Interface):
376 379 """Represents a problem with the integrity of the repository.
377 380
378 381 Instances of this interface are emitted to describe an integrity issue
379 382 with a repository (e.g. corrupt storage, missing data, etc).
380 383
381 384 Instances are essentially messages associated with severity.
382 385 """
383 386
384 387 warning = interfaceutil.Attribute(
385 388 """Message indicating a non-fatal problem."""
386 389 )
387 390
388 391 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
389 392
390 393 node = interfaceutil.Attribute(
391 394 """Revision encountering the problem.
392 395
393 396 ``None`` means the problem doesn't apply to a single revision.
394 397 """
395 398 )
396 399
397 400
398 401 class irevisiondelta(interfaceutil.Interface):
399 402 """Represents a delta between one revision and another.
400 403
401 404 Instances convey enough information to allow a revision to be exchanged
402 405 with another repository.
403 406
404 407 Instances represent the fulltext revision data or a delta against
405 408 another revision. Therefore the ``revision`` and ``delta`` attributes
406 409 are mutually exclusive.
407 410
408 411 Typically used for changegroup generation.
409 412 """
410 413
411 414 node = interfaceutil.Attribute("""20 byte node of this revision.""")
412 415
413 416 p1node = interfaceutil.Attribute(
414 417 """20 byte node of 1st parent of this revision."""
415 418 )
416 419
417 420 p2node = interfaceutil.Attribute(
418 421 """20 byte node of 2nd parent of this revision."""
419 422 )
420 423
421 424 linknode = interfaceutil.Attribute(
422 425 """20 byte node of the changelog revision this node is linked to."""
423 426 )
424 427
425 428 flags = interfaceutil.Attribute(
426 429 """2 bytes of integer flags that apply to this revision.
427 430
428 431 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
429 432 """
430 433 )
431 434
432 435 basenode = interfaceutil.Attribute(
433 436 """20 byte node of the revision this data is a delta against.
434 437
435 438 ``nullid`` indicates that the revision is a full revision and not
436 439 a delta.
437 440 """
438 441 )
439 442
440 443 baserevisionsize = interfaceutil.Attribute(
441 444 """Size of base revision this delta is against.
442 445
443 446 May be ``None`` if ``basenode`` is ``nullid``.
444 447 """
445 448 )
446 449
447 450 revision = interfaceutil.Attribute(
448 451 """Raw fulltext of revision data for this node."""
449 452 )
450 453
451 454 delta = interfaceutil.Attribute(
452 455 """Delta between ``basenode`` and ``node``.
453 456
454 457 Stored in the bdiff delta format.
455 458 """
456 459 )
457 460
458 461
459 462 class ifilerevisionssequence(interfaceutil.Interface):
460 463 """Contains index data for all revisions of a file.
461 464
462 465 Types implementing this behave like lists of tuples. The index
463 466 in the list corresponds to the revision number. The values contain
464 467 index metadata.
465 468
466 469 The *null* revision (revision number -1) is always the last item
467 470 in the index.
468 471 """
469 472
470 473 def __len__():
471 474 """The total number of revisions."""
472 475
473 476 def __getitem__(rev):
474 477 """Returns the object having a specific revision number.
475 478
476 479 Returns an 8-tuple with the following fields:
477 480
478 481 offset+flags
479 482 Contains the offset and flags for the revision. 64-bit unsigned
480 483 integer where first 6 bytes are the offset and the next 2 bytes
481 484 are flags. The offset can be 0 if it is not used by the store.
482 485 compressed size
483 486 Size of the revision data in the store. It can be 0 if it isn't
484 487 needed by the store.
485 488 uncompressed size
486 489 Fulltext size. It can be 0 if it isn't needed by the store.
487 490 base revision
488 491 Revision number of revision the delta for storage is encoded
489 492 against. -1 indicates not encoded against a base revision.
490 493 link revision
491 494 Revision number of changelog revision this entry is related to.
492 495 p1 revision
493 496 Revision number of 1st parent. -1 if no 1st parent.
494 497 p2 revision
495 498 Revision number of 2nd parent. -1 if no 1st parent.
496 499 node
497 500 Binary node value for this revision number.
498 501
499 502 Negative values should index off the end of the sequence. ``-1``
500 503 should return the null revision. ``-2`` should return the most
501 504 recent revision.
502 505 """
503 506
504 507 def __contains__(rev):
505 508 """Whether a revision number exists."""
506 509
507 510 def insert(self, i, entry):
508 511 """Add an item to the index at specific revision."""
509 512
510 513
511 514 class ifileindex(interfaceutil.Interface):
512 515 """Storage interface for index data of a single file.
513 516
514 517 File storage data is divided into index metadata and data storage.
515 518 This interface defines the index portion of the interface.
516 519
517 520 The index logically consists of:
518 521
519 522 * A mapping between revision numbers and nodes.
520 523 * DAG data (storing and querying the relationship between nodes).
521 524 * Metadata to facilitate storage.
522 525 """
523 526
524 527 def __len__():
525 528 """Obtain the number of revisions stored for this file."""
526 529
527 530 def __iter__():
528 531 """Iterate over revision numbers for this file."""
529 532
530 533 def hasnode(node):
531 534 """Returns a bool indicating if a node is known to this store.
532 535
533 536 Implementations must only return True for full, binary node values:
534 537 hex nodes, revision numbers, and partial node matches must be
535 538 rejected.
536 539
537 540 The null node is never present.
538 541 """
539 542
540 543 def revs(start=0, stop=None):
541 544 """Iterate over revision numbers for this file, with control."""
542 545
543 546 def parents(node):
544 547 """Returns a 2-tuple of parent nodes for a revision.
545 548
546 549 Values will be ``nullid`` if the parent is empty.
547 550 """
548 551
549 552 def parentrevs(rev):
550 553 """Like parents() but operates on revision numbers."""
551 554
552 555 def rev(node):
553 556 """Obtain the revision number given a node.
554 557
555 558 Raises ``error.LookupError`` if the node is not known.
556 559 """
557 560
558 561 def node(rev):
559 562 """Obtain the node value given a revision number.
560 563
561 564 Raises ``IndexError`` if the node is not known.
562 565 """
563 566
564 567 def lookup(node):
565 568 """Attempt to resolve a value to a node.
566 569
567 570 Value can be a binary node, hex node, revision number, or a string
568 571 that can be converted to an integer.
569 572
570 573 Raises ``error.LookupError`` if a node could not be resolved.
571 574 """
572 575
573 576 def linkrev(rev):
574 577 """Obtain the changeset revision number a revision is linked to."""
575 578
576 579 def iscensored(rev):
577 580 """Return whether a revision's content has been censored."""
578 581
579 582 def commonancestorsheads(node1, node2):
580 583 """Obtain an iterable of nodes containing heads of common ancestors.
581 584
582 585 See ``ancestor.commonancestorsheads()``.
583 586 """
584 587
585 588 def descendants(revs):
586 589 """Obtain descendant revision numbers for a set of revision numbers.
587 590
588 591 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
589 592 """
590 593
591 594 def heads(start=None, stop=None):
592 595 """Obtain a list of nodes that are DAG heads, with control.
593 596
594 597 The set of revisions examined can be limited by specifying
595 598 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
596 599 iterable of nodes. DAG traversal starts at earlier revision
597 600 ``start`` and iterates forward until any node in ``stop`` is
598 601 encountered.
599 602 """
600 603
601 604 def children(node):
602 605 """Obtain nodes that are children of a node.
603 606
604 607 Returns a list of nodes.
605 608 """
606 609
607 610
608 611 class ifiledata(interfaceutil.Interface):
609 612 """Storage interface for data storage of a specific file.
610 613
611 614 This complements ``ifileindex`` and provides an interface for accessing
612 615 data for a tracked file.
613 616 """
614 617
615 618 def size(rev):
616 619 """Obtain the fulltext size of file data.
617 620
618 621 Any metadata is excluded from size measurements.
619 622 """
620 623
621 624 def revision(node, raw=False):
622 625 """"Obtain fulltext data for a node.
623 626
624 627 By default, any storage transformations are applied before the data
625 628 is returned. If ``raw`` is True, non-raw storage transformations
626 629 are not applied.
627 630
628 631 The fulltext data may contain a header containing metadata. Most
629 632 consumers should use ``read()`` to obtain the actual file data.
630 633 """
631 634
632 635 def rawdata(node):
633 636 """Obtain raw data for a node.
634 637 """
635 638
636 639 def read(node):
637 640 """Resolve file fulltext data.
638 641
639 642 This is similar to ``revision()`` except any metadata in the data
640 643 headers is stripped.
641 644 """
642 645
643 646 def renamed(node):
644 647 """Obtain copy metadata for a node.
645 648
646 649 Returns ``False`` if no copy metadata is stored or a 2-tuple of
647 650 (path, node) from which this revision was copied.
648 651 """
649 652
650 653 def cmp(node, fulltext):
651 654 """Compare fulltext to another revision.
652 655
653 656 Returns True if the fulltext is different from what is stored.
654 657
655 658 This takes copy metadata into account.
656 659
657 660 TODO better document the copy metadata and censoring logic.
658 661 """
659 662
660 663 def emitrevisions(
661 664 nodes,
662 665 nodesorder=None,
663 666 revisiondata=False,
664 667 assumehaveparentrevisions=False,
665 668 deltamode=CG_DELTAMODE_STD,
666 669 ):
667 670 """Produce ``irevisiondelta`` for revisions.
668 671
669 672 Given an iterable of nodes, emits objects conforming to the
670 673 ``irevisiondelta`` interface that describe revisions in storage.
671 674
672 675 This method is a generator.
673 676
674 677 The input nodes may be unordered. Implementations must ensure that a
675 678 node's parents are emitted before the node itself. Transitively, this
676 679 means that a node may only be emitted once all its ancestors in
677 680 ``nodes`` have also been emitted.
678 681
679 682 By default, emits "index" data (the ``node``, ``p1node``, and
680 683 ``p2node`` attributes). If ``revisiondata`` is set, revision data
681 684 will also be present on the emitted objects.
682 685
683 686 With default argument values, implementations can choose to emit
684 687 either fulltext revision data or a delta. When emitting deltas,
685 688 implementations must consider whether the delta's base revision
686 689 fulltext is available to the receiver.
687 690
688 691 The base revision fulltext is guaranteed to be available if any of
689 692 the following are met:
690 693
691 694 * Its fulltext revision was emitted by this method call.
692 695 * A delta for that revision was emitted by this method call.
693 696 * ``assumehaveparentrevisions`` is True and the base revision is a
694 697 parent of the node.
695 698
696 699 ``nodesorder`` can be used to control the order that revisions are
697 700 emitted. By default, revisions can be reordered as long as they are
698 701 in DAG topological order (see above). If the value is ``nodes``,
699 702 the iteration order from ``nodes`` should be used. If the value is
700 703 ``storage``, then the native order from the backing storage layer
701 704 is used. (Not all storage layers will have strong ordering and behavior
702 705 of this mode is storage-dependent.) ``nodes`` ordering can force
703 706 revisions to be emitted before their ancestors, so consumers should
704 707 use it with care.
705 708
706 709 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
707 710 be set and it is the caller's responsibility to resolve it, if needed.
708 711
709 712 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
710 713 all revision data should be emitted as deltas against the revision
711 714 emitted just prior. The initial revision should be a delta against its
712 715 1st parent.
713 716 """
714 717
715 718
716 719 class ifilemutation(interfaceutil.Interface):
717 720 """Storage interface for mutation events of a tracked file."""
718 721
719 722 def add(filedata, meta, transaction, linkrev, p1, p2):
720 723 """Add a new revision to the store.
721 724
722 725 Takes file data, dictionary of metadata, a transaction, linkrev,
723 726 and parent nodes.
724 727
725 728 Returns the node that was added.
726 729
727 730 May no-op if a revision matching the supplied data is already stored.
728 731 """
729 732
730 733 def addrevision(
731 734 revisiondata,
732 735 transaction,
733 736 linkrev,
734 737 p1,
735 738 p2,
736 739 node=None,
737 740 flags=0,
738 741 cachedelta=None,
739 742 ):
740 743 """Add a new revision to the store.
741 744
742 745 This is similar to ``add()`` except it operates at a lower level.
743 746
744 747 The data passed in already contains a metadata header, if any.
745 748
746 749 ``node`` and ``flags`` can be used to define the expected node and
747 750 the flags to use with storage. ``flags`` is a bitwise value composed
748 751 of the various ``REVISION_FLAG_*`` constants.
749 752
750 753 ``add()`` is usually called when adding files from e.g. the working
751 754 directory. ``addrevision()`` is often called by ``add()`` and for
752 755 scenarios where revision data has already been computed, such as when
753 756 applying raw data from a peer repo.
754 757 """
755 758
756 759 def addgroup(
757 760 deltas,
758 761 linkmapper,
759 762 transaction,
760 763 addrevisioncb=None,
761 764 maybemissingparents=False,
762 765 ):
763 766 """Process a series of deltas for storage.
764 767
765 768 ``deltas`` is an iterable of 7-tuples of
766 769 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
767 770 to add.
768 771
769 772 The ``delta`` field contains ``mpatch`` data to apply to a base
770 773 revision, identified by ``deltabase``. The base node can be
771 774 ``nullid``, in which case the header from the delta can be ignored
772 775 and the delta used as the fulltext.
773 776
774 777 ``addrevisioncb`` should be called for each node as it is committed.
775 778
776 779 ``maybemissingparents`` is a bool indicating whether the incoming
777 780 data may reference parents/ancestor revisions that aren't present.
778 781 This flag is set when receiving data into a "shallow" store that
779 782 doesn't hold all history.
780 783
781 784 Returns a list of nodes that were processed. A node will be in the list
782 785 even if it existed in the store previously.
783 786 """
784 787
785 788 def censorrevision(tr, node, tombstone=b''):
786 789 """Remove the content of a single revision.
787 790
788 791 The specified ``node`` will have its content purged from storage.
789 792 Future attempts to access the revision data for this node will
790 793 result in failure.
791 794
792 795 A ``tombstone`` message can optionally be stored. This message may be
793 796 displayed to users when they attempt to access the missing revision
794 797 data.
795 798
796 799 Storage backends may have stored deltas against the previous content
797 800 in this revision. As part of censoring a revision, these storage
798 801 backends are expected to rewrite any internally stored deltas such
799 802 that they no longer reference the deleted content.
800 803 """
801 804
802 805 def getstrippoint(minlink):
803 806 """Find the minimum revision that must be stripped to strip a linkrev.
804 807
805 808 Returns a 2-tuple containing the minimum revision number and a set
806 809 of all revisions numbers that would be broken by this strip.
807 810
808 811 TODO this is highly revlog centric and should be abstracted into
809 812 a higher-level deletion API. ``repair.strip()`` relies on this.
810 813 """
811 814
812 815 def strip(minlink, transaction):
813 816 """Remove storage of items starting at a linkrev.
814 817
815 818 This uses ``getstrippoint()`` to determine the first node to remove.
816 819 Then it effectively truncates storage for all revisions after that.
817 820
818 821 TODO this is highly revlog centric and should be abstracted into a
819 822 higher-level deletion API.
820 823 """
821 824
822 825
823 826 class ifilestorage(ifileindex, ifiledata, ifilemutation):
824 827 """Complete storage interface for a single tracked file."""
825 828
826 829 def files():
827 830 """Obtain paths that are backing storage for this file.
828 831
829 832 TODO this is used heavily by verify code and there should probably
830 833 be a better API for that.
831 834 """
832 835
833 836 def storageinfo(
834 837 exclusivefiles=False,
835 838 sharedfiles=False,
836 839 revisionscount=False,
837 840 trackedsize=False,
838 841 storedsize=False,
839 842 ):
840 843 """Obtain information about storage for this file's data.
841 844
842 845 Returns a dict describing storage for this tracked path. The keys
843 846 in the dict map to arguments of the same. The arguments are bools
844 847 indicating whether to calculate and obtain that data.
845 848
846 849 exclusivefiles
847 850 Iterable of (vfs, path) describing files that are exclusively
848 851 used to back storage for this tracked path.
849 852
850 853 sharedfiles
851 854 Iterable of (vfs, path) describing files that are used to back
852 855 storage for this tracked path. Those files may also provide storage
853 856 for other stored entities.
854 857
855 858 revisionscount
856 859 Number of revisions available for retrieval.
857 860
858 861 trackedsize
859 862 Total size in bytes of all tracked revisions. This is a sum of the
860 863 length of the fulltext of all revisions.
861 864
862 865 storedsize
863 866 Total size in bytes used to store data for all tracked revisions.
864 867 This is commonly less than ``trackedsize`` due to internal usage
865 868 of deltas rather than fulltext revisions.
866 869
867 870 Not all storage backends may support all queries are have a reasonable
868 871 value to use. In that case, the value should be set to ``None`` and
869 872 callers are expected to handle this special value.
870 873 """
871 874
872 875 def verifyintegrity(state):
873 876 """Verifies the integrity of file storage.
874 877
875 878 ``state`` is a dict holding state of the verifier process. It can be
876 879 used to communicate data between invocations of multiple storage
877 880 primitives.
878 881
879 882 If individual revisions cannot have their revision content resolved,
880 883 the method is expected to set the ``skipread`` key to a set of nodes
881 884 that encountered problems. If set, the method can also add the node(s)
882 885 to ``safe_renamed`` in order to indicate nodes that may perform the
883 886 rename checks with currently accessible data.
884 887
885 888 The method yields objects conforming to the ``iverifyproblem``
886 889 interface.
887 890 """
888 891
889 892
890 893 class idirs(interfaceutil.Interface):
891 894 """Interface representing a collection of directories from paths.
892 895
893 896 This interface is essentially a derived data structure representing
894 897 directories from a collection of paths.
895 898 """
896 899
897 900 def addpath(path):
898 901 """Add a path to the collection.
899 902
900 903 All directories in the path will be added to the collection.
901 904 """
902 905
903 906 def delpath(path):
904 907 """Remove a path from the collection.
905 908
906 909 If the removal was the last path in a particular directory, the
907 910 directory is removed from the collection.
908 911 """
909 912
910 913 def __iter__():
911 914 """Iterate over the directories in this collection of paths."""
912 915
913 916 def __contains__(path):
914 917 """Whether a specific directory is in this collection."""
915 918
916 919
917 920 class imanifestdict(interfaceutil.Interface):
918 921 """Interface representing a manifest data structure.
919 922
920 923 A manifest is effectively a dict mapping paths to entries. Each entry
921 924 consists of a binary node and extra flags affecting that entry.
922 925 """
923 926
924 927 def __getitem__(path):
925 928 """Returns the binary node value for a path in the manifest.
926 929
927 930 Raises ``KeyError`` if the path does not exist in the manifest.
928 931
929 932 Equivalent to ``self.find(path)[0]``.
930 933 """
931 934
932 935 def find(path):
933 936 """Returns the entry for a path in the manifest.
934 937
935 938 Returns a 2-tuple of (node, flags).
936 939
937 940 Raises ``KeyError`` if the path does not exist in the manifest.
938 941 """
939 942
940 943 def __len__():
941 944 """Return the number of entries in the manifest."""
942 945
943 946 def __nonzero__():
944 947 """Returns True if the manifest has entries, False otherwise."""
945 948
946 949 __bool__ = __nonzero__
947 950
948 951 def __setitem__(path, node):
949 952 """Define the node value for a path in the manifest.
950 953
951 954 If the path is already in the manifest, its flags will be copied to
952 955 the new entry.
953 956 """
954 957
955 958 def __contains__(path):
956 959 """Whether a path exists in the manifest."""
957 960
958 961 def __delitem__(path):
959 962 """Remove a path from the manifest.
960 963
961 964 Raises ``KeyError`` if the path is not in the manifest.
962 965 """
963 966
964 967 def __iter__():
965 968 """Iterate over paths in the manifest."""
966 969
967 970 def iterkeys():
968 971 """Iterate over paths in the manifest."""
969 972
970 973 def keys():
971 974 """Obtain a list of paths in the manifest."""
972 975
973 976 def filesnotin(other, match=None):
974 977 """Obtain the set of paths in this manifest but not in another.
975 978
976 979 ``match`` is an optional matcher function to be applied to both
977 980 manifests.
978 981
979 982 Returns a set of paths.
980 983 """
981 984
982 985 def dirs():
983 986 """Returns an object implementing the ``idirs`` interface."""
984 987
985 988 def hasdir(dir):
986 989 """Returns a bool indicating if a directory is in this manifest."""
987 990
988 991 def walk(match):
989 992 """Generator of paths in manifest satisfying a matcher.
990 993
991 994 If the matcher has explicit files listed and they don't exist in
992 995 the manifest, ``match.bad()`` is called for each missing file.
993 996 """
994 997
995 998 def diff(other, match=None, clean=False):
996 999 """Find differences between this manifest and another.
997 1000
998 1001 This manifest is compared to ``other``.
999 1002
1000 1003 If ``match`` is provided, the two manifests are filtered against this
1001 1004 matcher and only entries satisfying the matcher are compared.
1002 1005
1003 1006 If ``clean`` is True, unchanged files are included in the returned
1004 1007 object.
1005 1008
1006 1009 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1007 1010 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1008 1011 represents the node and flags for this manifest and ``(node2, flag2)``
1009 1012 are the same for the other manifest.
1010 1013 """
1011 1014
1012 1015 def setflag(path, flag):
1013 1016 """Set the flag value for a given path.
1014 1017
1015 1018 Raises ``KeyError`` if the path is not already in the manifest.
1016 1019 """
1017 1020
1018 1021 def get(path, default=None):
1019 1022 """Obtain the node value for a path or a default value if missing."""
1020 1023
1021 1024 def flags(path):
1022 1025 """Return the flags value for a path (default: empty bytestring)."""
1023 1026
1024 1027 def copy():
1025 1028 """Return a copy of this manifest."""
1026 1029
1027 1030 def items():
1028 1031 """Returns an iterable of (path, node) for items in this manifest."""
1029 1032
1030 1033 def iteritems():
1031 1034 """Identical to items()."""
1032 1035
1033 1036 def iterentries():
1034 1037 """Returns an iterable of (path, node, flags) for this manifest.
1035 1038
1036 1039 Similar to ``iteritems()`` except items are a 3-tuple and include
1037 1040 flags.
1038 1041 """
1039 1042
1040 1043 def text():
1041 1044 """Obtain the raw data representation for this manifest.
1042 1045
1043 1046 Result is used to create a manifest revision.
1044 1047 """
1045 1048
1046 1049 def fastdelta(base, changes):
1047 1050 """Obtain a delta between this manifest and another given changes.
1048 1051
1049 1052 ``base`` in the raw data representation for another manifest.
1050 1053
1051 1054 ``changes`` is an iterable of ``(path, to_delete)``.
1052 1055
1053 1056 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1054 1057 delta between ``base`` and this manifest.
1055 1058
1056 1059 If this manifest implementation can't support ``fastdelta()``,
1057 1060 raise ``mercurial.manifest.FastdeltaUnavailable``.
1058 1061 """
1059 1062
1060 1063
1061 1064 class imanifestrevisionbase(interfaceutil.Interface):
1062 1065 """Base interface representing a single revision of a manifest.
1063 1066
1064 1067 Should not be used as a primary interface: should always be inherited
1065 1068 as part of a larger interface.
1066 1069 """
1067 1070
1068 1071 def copy():
1069 1072 """Obtain a copy of this manifest instance.
1070 1073
1071 1074 Returns an object conforming to the ``imanifestrevisionwritable``
1072 1075 interface. The instance will be associated with the same
1073 1076 ``imanifestlog`` collection as this instance.
1074 1077 """
1075 1078
1076 1079 def read():
1077 1080 """Obtain the parsed manifest data structure.
1078 1081
1079 1082 The returned object conforms to the ``imanifestdict`` interface.
1080 1083 """
1081 1084
1082 1085
1083 1086 class imanifestrevisionstored(imanifestrevisionbase):
1084 1087 """Interface representing a manifest revision committed to storage."""
1085 1088
1086 1089 def node():
1087 1090 """The binary node for this manifest."""
1088 1091
1089 1092 parents = interfaceutil.Attribute(
1090 1093 """List of binary nodes that are parents for this manifest revision."""
1091 1094 )
1092 1095
1093 1096 def readdelta(shallow=False):
1094 1097 """Obtain the manifest data structure representing changes from parent.
1095 1098
1096 1099 This manifest is compared to its 1st parent. A new manifest representing
1097 1100 those differences is constructed.
1098 1101
1099 1102 The returned object conforms to the ``imanifestdict`` interface.
1100 1103 """
1101 1104
1102 1105 def readfast(shallow=False):
1103 1106 """Calls either ``read()`` or ``readdelta()``.
1104 1107
1105 1108 The faster of the two options is called.
1106 1109 """
1107 1110
1108 1111 def find(key):
1109 1112 """Calls self.read().find(key)``.
1110 1113
1111 1114 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1112 1115 """
1113 1116
1114 1117
1115 1118 class imanifestrevisionwritable(imanifestrevisionbase):
1116 1119 """Interface representing a manifest revision that can be committed."""
1117 1120
1118 1121 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1119 1122 """Add this revision to storage.
1120 1123
1121 1124 Takes a transaction object, the changeset revision number it will
1122 1125 be associated with, its parent nodes, and lists of added and
1123 1126 removed paths.
1124 1127
1125 1128 If match is provided, storage can choose not to inspect or write out
1126 1129 items that do not match. Storage is still required to be able to provide
1127 1130 the full manifest in the future for any directories written (these
1128 1131 manifests should not be "narrowed on disk").
1129 1132
1130 1133 Returns the binary node of the created revision.
1131 1134 """
1132 1135
1133 1136
1134 1137 class imanifeststorage(interfaceutil.Interface):
1135 1138 """Storage interface for manifest data."""
1136 1139
1137 1140 tree = interfaceutil.Attribute(
1138 1141 """The path to the directory this manifest tracks.
1139 1142
1140 1143 The empty bytestring represents the root manifest.
1141 1144 """
1142 1145 )
1143 1146
1144 1147 index = interfaceutil.Attribute(
1145 1148 """An ``ifilerevisionssequence`` instance."""
1146 1149 )
1147 1150
1148 1151 indexfile = interfaceutil.Attribute(
1149 1152 """Path of revlog index file.
1150 1153
1151 1154 TODO this is revlog specific and should not be exposed.
1152 1155 """
1153 1156 )
1154 1157
1155 1158 opener = interfaceutil.Attribute(
1156 1159 """VFS opener to use to access underlying files used for storage.
1157 1160
1158 1161 TODO this is revlog specific and should not be exposed.
1159 1162 """
1160 1163 )
1161 1164
1162 1165 version = interfaceutil.Attribute(
1163 1166 """Revlog version number.
1164 1167
1165 1168 TODO this is revlog specific and should not be exposed.
1166 1169 """
1167 1170 )
1168 1171
1169 1172 _generaldelta = interfaceutil.Attribute(
1170 1173 """Whether generaldelta storage is being used.
1171 1174
1172 1175 TODO this is revlog specific and should not be exposed.
1173 1176 """
1174 1177 )
1175 1178
1176 1179 fulltextcache = interfaceutil.Attribute(
1177 1180 """Dict with cache of fulltexts.
1178 1181
1179 1182 TODO this doesn't feel appropriate for the storage interface.
1180 1183 """
1181 1184 )
1182 1185
1183 1186 def __len__():
1184 1187 """Obtain the number of revisions stored for this manifest."""
1185 1188
1186 1189 def __iter__():
1187 1190 """Iterate over revision numbers for this manifest."""
1188 1191
1189 1192 def rev(node):
1190 1193 """Obtain the revision number given a binary node.
1191 1194
1192 1195 Raises ``error.LookupError`` if the node is not known.
1193 1196 """
1194 1197
1195 1198 def node(rev):
1196 1199 """Obtain the node value given a revision number.
1197 1200
1198 1201 Raises ``error.LookupError`` if the revision is not known.
1199 1202 """
1200 1203
1201 1204 def lookup(value):
1202 1205 """Attempt to resolve a value to a node.
1203 1206
1204 1207 Value can be a binary node, hex node, revision number, or a bytes
1205 1208 that can be converted to an integer.
1206 1209
1207 1210 Raises ``error.LookupError`` if a ndoe could not be resolved.
1208 1211 """
1209 1212
1210 1213 def parents(node):
1211 1214 """Returns a 2-tuple of parent nodes for a node.
1212 1215
1213 1216 Values will be ``nullid`` if the parent is empty.
1214 1217 """
1215 1218
1216 1219 def parentrevs(rev):
1217 1220 """Like parents() but operates on revision numbers."""
1218 1221
1219 1222 def linkrev(rev):
1220 1223 """Obtain the changeset revision number a revision is linked to."""
1221 1224
1222 1225 def revision(node, _df=None, raw=False):
1223 1226 """Obtain fulltext data for a node."""
1224 1227
1225 1228 def rawdata(node, _df=None):
1226 1229 """Obtain raw data for a node."""
1227 1230
1228 1231 def revdiff(rev1, rev2):
1229 1232 """Obtain a delta between two revision numbers.
1230 1233
1231 1234 The returned data is the result of ``bdiff.bdiff()`` on the raw
1232 1235 revision data.
1233 1236 """
1234 1237
1235 1238 def cmp(node, fulltext):
1236 1239 """Compare fulltext to another revision.
1237 1240
1238 1241 Returns True if the fulltext is different from what is stored.
1239 1242 """
1240 1243
1241 1244 def emitrevisions(
1242 1245 nodes,
1243 1246 nodesorder=None,
1244 1247 revisiondata=False,
1245 1248 assumehaveparentrevisions=False,
1246 1249 ):
1247 1250 """Produce ``irevisiondelta`` describing revisions.
1248 1251
1249 1252 See the documentation for ``ifiledata`` for more.
1250 1253 """
1251 1254
1252 1255 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1253 1256 """Process a series of deltas for storage.
1254 1257
1255 1258 See the documentation in ``ifilemutation`` for more.
1256 1259 """
1257 1260
1258 1261 def rawsize(rev):
1259 1262 """Obtain the size of tracked data.
1260 1263
1261 1264 Is equivalent to ``len(m.rawdata(node))``.
1262 1265
1263 1266 TODO this method is only used by upgrade code and may be removed.
1264 1267 """
1265 1268
1266 1269 def getstrippoint(minlink):
1267 1270 """Find minimum revision that must be stripped to strip a linkrev.
1268 1271
1269 1272 See the documentation in ``ifilemutation`` for more.
1270 1273 """
1271 1274
1272 1275 def strip(minlink, transaction):
1273 1276 """Remove storage of items starting at a linkrev.
1274 1277
1275 1278 See the documentation in ``ifilemutation`` for more.
1276 1279 """
1277 1280
1278 1281 def checksize():
1279 1282 """Obtain the expected sizes of backing files.
1280 1283
1281 1284 TODO this is used by verify and it should not be part of the interface.
1282 1285 """
1283 1286
1284 1287 def files():
1285 1288 """Obtain paths that are backing storage for this manifest.
1286 1289
1287 1290 TODO this is used by verify and there should probably be a better API
1288 1291 for this functionality.
1289 1292 """
1290 1293
1291 1294 def deltaparent(rev):
1292 1295 """Obtain the revision that a revision is delta'd against.
1293 1296
1294 1297 TODO delta encoding is an implementation detail of storage and should
1295 1298 not be exposed to the storage interface.
1296 1299 """
1297 1300
1298 1301 def clone(tr, dest, **kwargs):
1299 1302 """Clone this instance to another."""
1300 1303
1301 1304 def clearcaches(clear_persisted_data=False):
1302 1305 """Clear any caches associated with this instance."""
1303 1306
1304 1307 def dirlog(d):
1305 1308 """Obtain a manifest storage instance for a tree."""
1306 1309
1307 1310 def add(
1308 1311 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1309 1312 ):
1310 1313 """Add a revision to storage.
1311 1314
1312 1315 ``m`` is an object conforming to ``imanifestdict``.
1313 1316
1314 1317 ``link`` is the linkrev revision number.
1315 1318
1316 1319 ``p1`` and ``p2`` are the parent revision numbers.
1317 1320
1318 1321 ``added`` and ``removed`` are iterables of added and removed paths,
1319 1322 respectively.
1320 1323
1321 1324 ``readtree`` is a function that can be used to read the child tree(s)
1322 1325 when recursively writing the full tree structure when using
1323 1326 treemanifets.
1324 1327
1325 1328 ``match`` is a matcher that can be used to hint to storage that not all
1326 1329 paths must be inspected; this is an optimization and can be safely
1327 1330 ignored. Note that the storage must still be able to reproduce a full
1328 1331 manifest including files that did not match.
1329 1332 """
1330 1333
1331 1334 def storageinfo(
1332 1335 exclusivefiles=False,
1333 1336 sharedfiles=False,
1334 1337 revisionscount=False,
1335 1338 trackedsize=False,
1336 1339 storedsize=False,
1337 1340 ):
1338 1341 """Obtain information about storage for this manifest's data.
1339 1342
1340 1343 See ``ifilestorage.storageinfo()`` for a description of this method.
1341 1344 This one behaves the same way, except for manifest data.
1342 1345 """
1343 1346
1344 1347
1345 1348 class imanifestlog(interfaceutil.Interface):
1346 1349 """Interface representing a collection of manifest snapshots.
1347 1350
1348 1351 Represents the root manifest in a repository.
1349 1352
1350 1353 Also serves as a means to access nested tree manifests and to cache
1351 1354 tree manifests.
1352 1355 """
1353 1356
1354 1357 def __getitem__(node):
1355 1358 """Obtain a manifest instance for a given binary node.
1356 1359
1357 1360 Equivalent to calling ``self.get('', node)``.
1358 1361
1359 1362 The returned object conforms to the ``imanifestrevisionstored``
1360 1363 interface.
1361 1364 """
1362 1365
1363 1366 def get(tree, node, verify=True):
1364 1367 """Retrieve the manifest instance for a given directory and binary node.
1365 1368
1366 1369 ``node`` always refers to the node of the root manifest (which will be
1367 1370 the only manifest if flat manifests are being used).
1368 1371
1369 1372 If ``tree`` is the empty string, the root manifest is returned.
1370 1373 Otherwise the manifest for the specified directory will be returned
1371 1374 (requires tree manifests).
1372 1375
1373 1376 If ``verify`` is True, ``LookupError`` is raised if the node is not
1374 1377 known.
1375 1378
1376 1379 The returned object conforms to the ``imanifestrevisionstored``
1377 1380 interface.
1378 1381 """
1379 1382
1380 1383 def getstorage(tree):
1381 1384 """Retrieve an interface to storage for a particular tree.
1382 1385
1383 1386 If ``tree`` is the empty bytestring, storage for the root manifest will
1384 1387 be returned. Otherwise storage for a tree manifest is returned.
1385 1388
1386 1389 TODO formalize interface for returned object.
1387 1390 """
1388 1391
1389 1392 def clearcaches():
1390 1393 """Clear caches associated with this collection."""
1391 1394
1392 1395 def rev(node):
1393 1396 """Obtain the revision number for a binary node.
1394 1397
1395 1398 Raises ``error.LookupError`` if the node is not known.
1396 1399 """
1397 1400
1398 1401 def update_caches(transaction):
1399 1402 """update whatever cache are relevant for the used storage."""
1400 1403
1401 1404
1402 1405 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1403 1406 """Local repository sub-interface providing access to tracked file storage.
1404 1407
1405 1408 This interface defines how a repository accesses storage for a single
1406 1409 tracked file path.
1407 1410 """
1408 1411
1409 1412 def file(f):
1410 1413 """Obtain a filelog for a tracked path.
1411 1414
1412 1415 The returned type conforms to the ``ifilestorage`` interface.
1413 1416 """
1414 1417
1415 1418
1416 1419 class ilocalrepositorymain(interfaceutil.Interface):
1417 1420 """Main interface for local repositories.
1418 1421
1419 1422 This currently captures the reality of things - not how things should be.
1420 1423 """
1421 1424
1422 1425 supportedformats = interfaceutil.Attribute(
1423 1426 """Set of requirements that apply to stream clone.
1424 1427
1425 1428 This is actually a class attribute and is shared among all instances.
1426 1429 """
1427 1430 )
1428 1431
1429 1432 supported = interfaceutil.Attribute(
1430 1433 """Set of requirements that this repo is capable of opening."""
1431 1434 )
1432 1435
1433 1436 requirements = interfaceutil.Attribute(
1434 1437 """Set of requirements this repo uses."""
1435 1438 )
1436 1439
1437 1440 features = interfaceutil.Attribute(
1438 1441 """Set of "features" this repository supports.
1439 1442
1440 1443 A "feature" is a loosely-defined term. It can refer to a feature
1441 1444 in the classical sense or can describe an implementation detail
1442 1445 of the repository. For example, a ``readonly`` feature may denote
1443 1446 the repository as read-only. Or a ``revlogfilestore`` feature may
1444 1447 denote that the repository is using revlogs for file storage.
1445 1448
1446 1449 The intent of features is to provide a machine-queryable mechanism
1447 1450 for repo consumers to test for various repository characteristics.
1448 1451
1449 1452 Features are similar to ``requirements``. The main difference is that
1450 1453 requirements are stored on-disk and represent requirements to open the
1451 1454 repository. Features are more run-time capabilities of the repository
1452 1455 and more granular capabilities (which may be derived from requirements).
1453 1456 """
1454 1457 )
1455 1458
1456 1459 filtername = interfaceutil.Attribute(
1457 1460 """Name of the repoview that is active on this repo."""
1458 1461 )
1459 1462
1460 1463 wvfs = interfaceutil.Attribute(
1461 1464 """VFS used to access the working directory."""
1462 1465 )
1463 1466
1464 1467 vfs = interfaceutil.Attribute(
1465 1468 """VFS rooted at the .hg directory.
1466 1469
1467 1470 Used to access repository data not in the store.
1468 1471 """
1469 1472 )
1470 1473
1471 1474 svfs = interfaceutil.Attribute(
1472 1475 """VFS rooted at the store.
1473 1476
1474 1477 Used to access repository data in the store. Typically .hg/store.
1475 1478 But can point elsewhere if the store is shared.
1476 1479 """
1477 1480 )
1478 1481
1479 1482 root = interfaceutil.Attribute(
1480 1483 """Path to the root of the working directory."""
1481 1484 )
1482 1485
1483 1486 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1484 1487
1485 1488 origroot = interfaceutil.Attribute(
1486 1489 """The filesystem path that was used to construct the repo."""
1487 1490 )
1488 1491
1489 1492 auditor = interfaceutil.Attribute(
1490 1493 """A pathauditor for the working directory.
1491 1494
1492 1495 This checks if a path refers to a nested repository.
1493 1496
1494 1497 Operates on the filesystem.
1495 1498 """
1496 1499 )
1497 1500
1498 1501 nofsauditor = interfaceutil.Attribute(
1499 1502 """A pathauditor for the working directory.
1500 1503
1501 1504 This is like ``auditor`` except it doesn't do filesystem checks.
1502 1505 """
1503 1506 )
1504 1507
1505 1508 baseui = interfaceutil.Attribute(
1506 1509 """Original ui instance passed into constructor."""
1507 1510 )
1508 1511
1509 1512 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1510 1513
1511 1514 sharedpath = interfaceutil.Attribute(
1512 1515 """Path to the .hg directory of the repo this repo was shared from."""
1513 1516 )
1514 1517
1515 1518 store = interfaceutil.Attribute("""A store instance.""")
1516 1519
1517 1520 spath = interfaceutil.Attribute("""Path to the store.""")
1518 1521
1519 1522 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1520 1523
1521 1524 cachevfs = interfaceutil.Attribute(
1522 1525 """A VFS used to access the cache directory.
1523 1526
1524 1527 Typically .hg/cache.
1525 1528 """
1526 1529 )
1527 1530
1528 1531 wcachevfs = interfaceutil.Attribute(
1529 1532 """A VFS used to access the cache directory dedicated to working copy
1530 1533
1531 1534 Typically .hg/wcache.
1532 1535 """
1533 1536 )
1534 1537
1535 1538 filteredrevcache = interfaceutil.Attribute(
1536 1539 """Holds sets of revisions to be filtered."""
1537 1540 )
1538 1541
1539 1542 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1540 1543
1541 1544 filecopiesmode = interfaceutil.Attribute(
1542 1545 """The way files copies should be dealt with in this repo."""
1543 1546 )
1544 1547
1545 1548 def close():
1546 1549 """Close the handle on this repository."""
1547 1550
1548 1551 def peer():
1549 1552 """Obtain an object conforming to the ``peer`` interface."""
1550 1553
1551 1554 def unfiltered():
1552 1555 """Obtain an unfiltered/raw view of this repo."""
1553 1556
1554 1557 def filtered(name, visibilityexceptions=None):
1555 1558 """Obtain a named view of this repository."""
1556 1559
1557 1560 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1558 1561
1559 1562 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1560 1563
1561 1564 manifestlog = interfaceutil.Attribute(
1562 1565 """An instance conforming to the ``imanifestlog`` interface.
1563 1566
1564 1567 Provides access to manifests for the repository.
1565 1568 """
1566 1569 )
1567 1570
1568 1571 dirstate = interfaceutil.Attribute("""Working directory state.""")
1569 1572
1570 1573 narrowpats = interfaceutil.Attribute(
1571 1574 """Matcher patterns for this repository's narrowspec."""
1572 1575 )
1573 1576
1574 1577 def narrowmatch(match=None, includeexact=False):
1575 1578 """Obtain a matcher for the narrowspec."""
1576 1579
1577 1580 def setnarrowpats(newincludes, newexcludes):
1578 1581 """Define the narrowspec for this repository."""
1579 1582
1580 1583 def __getitem__(changeid):
1581 1584 """Try to resolve a changectx."""
1582 1585
1583 1586 def __contains__(changeid):
1584 1587 """Whether a changeset exists."""
1585 1588
1586 1589 def __nonzero__():
1587 1590 """Always returns True."""
1588 1591 return True
1589 1592
1590 1593 __bool__ = __nonzero__
1591 1594
1592 1595 def __len__():
1593 1596 """Returns the number of changesets in the repo."""
1594 1597
1595 1598 def __iter__():
1596 1599 """Iterate over revisions in the changelog."""
1597 1600
1598 1601 def revs(expr, *args):
1599 1602 """Evaluate a revset.
1600 1603
1601 1604 Emits revisions.
1602 1605 """
1603 1606
1604 1607 def set(expr, *args):
1605 1608 """Evaluate a revset.
1606 1609
1607 1610 Emits changectx instances.
1608 1611 """
1609 1612
1610 1613 def anyrevs(specs, user=False, localalias=None):
1611 1614 """Find revisions matching one of the given revsets."""
1612 1615
1613 1616 def url():
1614 1617 """Returns a string representing the location of this repo."""
1615 1618
1616 1619 def hook(name, throw=False, **args):
1617 1620 """Call a hook."""
1618 1621
1619 1622 def tags():
1620 1623 """Return a mapping of tag to node."""
1621 1624
1622 1625 def tagtype(tagname):
1623 1626 """Return the type of a given tag."""
1624 1627
1625 1628 def tagslist():
1626 1629 """Return a list of tags ordered by revision."""
1627 1630
1628 1631 def nodetags(node):
1629 1632 """Return the tags associated with a node."""
1630 1633
1631 1634 def nodebookmarks(node):
1632 1635 """Return the list of bookmarks pointing to the specified node."""
1633 1636
1634 1637 def branchmap():
1635 1638 """Return a mapping of branch to heads in that branch."""
1636 1639
1637 1640 def revbranchcache():
1638 1641 pass
1639 1642
1640 1643 def branchtip(branchtip, ignoremissing=False):
1641 1644 """Return the tip node for a given branch."""
1642 1645
1643 1646 def lookup(key):
1644 1647 """Resolve the node for a revision."""
1645 1648
1646 1649 def lookupbranch(key):
1647 1650 """Look up the branch name of the given revision or branch name."""
1648 1651
1649 1652 def known(nodes):
1650 1653 """Determine whether a series of nodes is known.
1651 1654
1652 1655 Returns a list of bools.
1653 1656 """
1654 1657
1655 1658 def local():
1656 1659 """Whether the repository is local."""
1657 1660 return True
1658 1661
1659 1662 def publishing():
1660 1663 """Whether the repository is a publishing repository."""
1661 1664
1662 1665 def cancopy():
1663 1666 pass
1664 1667
1665 1668 def shared():
1666 1669 """The type of shared repository or None."""
1667 1670
1668 1671 def wjoin(f, *insidef):
1669 1672 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1670 1673
1671 1674 def setparents(p1, p2):
1672 1675 """Set the parent nodes of the working directory."""
1673 1676
1674 1677 def filectx(path, changeid=None, fileid=None):
1675 1678 """Obtain a filectx for the given file revision."""
1676 1679
1677 1680 def getcwd():
1678 1681 """Obtain the current working directory from the dirstate."""
1679 1682
1680 1683 def pathto(f, cwd=None):
1681 1684 """Obtain the relative path to a file."""
1682 1685
1683 1686 def adddatafilter(name, fltr):
1684 1687 pass
1685 1688
1686 1689 def wread(filename):
1687 1690 """Read a file from wvfs, using data filters."""
1688 1691
1689 1692 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1690 1693 """Write data to a file in the wvfs, using data filters."""
1691 1694
1692 1695 def wwritedata(filename, data):
1693 1696 """Resolve data for writing to the wvfs, using data filters."""
1694 1697
1695 1698 def currenttransaction():
1696 1699 """Obtain the current transaction instance or None."""
1697 1700
1698 1701 def transaction(desc, report=None):
1699 1702 """Open a new transaction to write to the repository."""
1700 1703
1701 1704 def undofiles():
1702 1705 """Returns a list of (vfs, path) for files to undo transactions."""
1703 1706
1704 1707 def recover():
1705 1708 """Roll back an interrupted transaction."""
1706 1709
1707 1710 def rollback(dryrun=False, force=False):
1708 1711 """Undo the last transaction.
1709 1712
1710 1713 DANGEROUS.
1711 1714 """
1712 1715
1713 1716 def updatecaches(tr=None, full=False):
1714 1717 """Warm repo caches."""
1715 1718
1716 1719 def invalidatecaches():
1717 1720 """Invalidate cached data due to the repository mutating."""
1718 1721
1719 1722 def invalidatevolatilesets():
1720 1723 pass
1721 1724
1722 1725 def invalidatedirstate():
1723 1726 """Invalidate the dirstate."""
1724 1727
1725 1728 def invalidate(clearfilecache=False):
1726 1729 pass
1727 1730
1728 1731 def invalidateall():
1729 1732 pass
1730 1733
1731 1734 def lock(wait=True):
1732 1735 """Lock the repository store and return a lock instance."""
1733 1736
1734 1737 def wlock(wait=True):
1735 1738 """Lock the non-store parts of the repository."""
1736 1739
1737 1740 def currentwlock():
1738 1741 """Return the wlock if it's held or None."""
1739 1742
1740 1743 def checkcommitpatterns(wctx, match, status, fail):
1741 1744 pass
1742 1745
1743 1746 def commit(
1744 1747 text=b'',
1745 1748 user=None,
1746 1749 date=None,
1747 1750 match=None,
1748 1751 force=False,
1749 1752 editor=False,
1750 1753 extra=None,
1751 1754 ):
1752 1755 """Add a new revision to the repository."""
1753 1756
1754 1757 def commitctx(ctx, error=False, origctx=None):
1755 1758 """Commit a commitctx instance to the repository."""
1756 1759
1757 1760 def destroying():
1758 1761 """Inform the repository that nodes are about to be destroyed."""
1759 1762
1760 1763 def destroyed():
1761 1764 """Inform the repository that nodes have been destroyed."""
1762 1765
1763 1766 def status(
1764 1767 node1=b'.',
1765 1768 node2=None,
1766 1769 match=None,
1767 1770 ignored=False,
1768 1771 clean=False,
1769 1772 unknown=False,
1770 1773 listsubrepos=False,
1771 1774 ):
1772 1775 """Convenience method to call repo[x].status()."""
1773 1776
1774 1777 def addpostdsstatus(ps):
1775 1778 pass
1776 1779
1777 1780 def postdsstatus():
1778 1781 pass
1779 1782
1780 1783 def clearpostdsstatus():
1781 1784 pass
1782 1785
1783 1786 def heads(start=None):
1784 1787 """Obtain list of nodes that are DAG heads."""
1785 1788
1786 1789 def branchheads(branch=None, start=None, closed=False):
1787 1790 pass
1788 1791
1789 1792 def branches(nodes):
1790 1793 pass
1791 1794
1792 1795 def between(pairs):
1793 1796 pass
1794 1797
1795 1798 def checkpush(pushop):
1796 1799 pass
1797 1800
1798 1801 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1799 1802
1800 1803 def pushkey(namespace, key, old, new):
1801 1804 pass
1802 1805
1803 1806 def listkeys(namespace):
1804 1807 pass
1805 1808
1806 1809 def debugwireargs(one, two, three=None, four=None, five=None):
1807 1810 pass
1808 1811
1809 1812 def savecommitmessage(text):
1810 1813 pass
1811 1814
1812 1815
1813 1816 class completelocalrepository(
1814 1817 ilocalrepositorymain, ilocalrepositoryfilestorage
1815 1818 ):
1816 1819 """Complete interface for a local repository."""
1817 1820
1818 1821
1819 1822 class iwireprotocolcommandcacher(interfaceutil.Interface):
1820 1823 """Represents a caching backend for wire protocol commands.
1821 1824
1822 1825 Wire protocol version 2 supports transparent caching of many commands.
1823 1826 To leverage this caching, servers can activate objects that cache
1824 1827 command responses. Objects handle both cache writing and reading.
1825 1828 This interface defines how that response caching mechanism works.
1826 1829
1827 1830 Wire protocol version 2 commands emit a series of objects that are
1828 1831 serialized and sent to the client. The caching layer exists between
1829 1832 the invocation of the command function and the sending of its output
1830 1833 objects to an output layer.
1831 1834
1832 1835 Instances of this interface represent a binding to a cache that
1833 1836 can serve a response (in place of calling a command function) and/or
1834 1837 write responses to a cache for subsequent use.
1835 1838
1836 1839 When a command request arrives, the following happens with regards
1837 1840 to this interface:
1838 1841
1839 1842 1. The server determines whether the command request is cacheable.
1840 1843 2. If it is, an instance of this interface is spawned.
1841 1844 3. The cacher is activated in a context manager (``__enter__`` is called).
1842 1845 4. A cache *key* for that request is derived. This will call the
1843 1846 instance's ``adjustcachekeystate()`` method so the derivation
1844 1847 can be influenced.
1845 1848 5. The cacher is informed of the derived cache key via a call to
1846 1849 ``setcachekey()``.
1847 1850 6. The cacher's ``lookup()`` method is called to test for presence of
1848 1851 the derived key in the cache.
1849 1852 7. If ``lookup()`` returns a hit, that cached result is used in place
1850 1853 of invoking the command function. ``__exit__`` is called and the instance
1851 1854 is discarded.
1852 1855 8. The command function is invoked.
1853 1856 9. ``onobject()`` is called for each object emitted by the command
1854 1857 function.
1855 1858 10. After the final object is seen, ``onfinished()`` is called.
1856 1859 11. ``__exit__`` is called to signal the end of use of the instance.
1857 1860
1858 1861 Cache *key* derivation can be influenced by the instance.
1859 1862
1860 1863 Cache keys are initially derived by a deterministic representation of
1861 1864 the command request. This includes the command name, arguments, protocol
1862 1865 version, etc. This initial key derivation is performed by CBOR-encoding a
1863 1866 data structure and feeding that output into a hasher.
1864 1867
1865 1868 Instances of this interface can influence this initial key derivation
1866 1869 via ``adjustcachekeystate()``.
1867 1870
1868 1871 The instance is informed of the derived cache key via a call to
1869 1872 ``setcachekey()``. The instance must store the key locally so it can
1870 1873 be consulted on subsequent operations that may require it.
1871 1874
1872 1875 When constructed, the instance has access to a callable that can be used
1873 1876 for encoding response objects. This callable receives as its single
1874 1877 argument an object emitted by a command function. It returns an iterable
1875 1878 of bytes chunks representing the encoded object. Unless the cacher is
1876 1879 caching native Python objects in memory or has a way of reconstructing
1877 1880 the original Python objects, implementations typically call this function
1878 1881 to produce bytes from the output objects and then store those bytes in
1879 1882 the cache. When it comes time to re-emit those bytes, they are wrapped
1880 1883 in a ``wireprototypes.encodedresponse`` instance to tell the output
1881 1884 layer that they are pre-encoded.
1882 1885
1883 1886 When receiving the objects emitted by the command function, instances
1884 1887 can choose what to do with those objects. The simplest thing to do is
1885 1888 re-emit the original objects. They will be forwarded to the output
1886 1889 layer and will be processed as if the cacher did not exist.
1887 1890
1888 1891 Implementations could also choose to not emit objects - instead locally
1889 1892 buffering objects or their encoded representation. They could then emit
1890 1893 a single "coalesced" object when ``onfinished()`` is called. In
1891 1894 this way, the implementation would function as a filtering layer of
1892 1895 sorts.
1893 1896
1894 1897 When caching objects, typically the encoded form of the object will
1895 1898 be stored. Keep in mind that if the original object is forwarded to
1896 1899 the output layer, it will need to be encoded there as well. For large
1897 1900 output, this redundant encoding could add overhead. Implementations
1898 1901 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1899 1902 instances to avoid this overhead.
1900 1903 """
1901 1904
1902 1905 def __enter__():
1903 1906 """Marks the instance as active.
1904 1907
1905 1908 Should return self.
1906 1909 """
1907 1910
1908 1911 def __exit__(exctype, excvalue, exctb):
1909 1912 """Called when cacher is no longer used.
1910 1913
1911 1914 This can be used by implementations to perform cleanup actions (e.g.
1912 1915 disconnecting network sockets, aborting a partially cached response.
1913 1916 """
1914 1917
1915 1918 def adjustcachekeystate(state):
1916 1919 """Influences cache key derivation by adjusting state to derive key.
1917 1920
1918 1921 A dict defining the state used to derive the cache key is passed.
1919 1922
1920 1923 Implementations can modify this dict to record additional state that
1921 1924 is wanted to influence key derivation.
1922 1925
1923 1926 Implementations are *highly* encouraged to not modify or delete
1924 1927 existing keys.
1925 1928 """
1926 1929
1927 1930 def setcachekey(key):
1928 1931 """Record the derived cache key for this request.
1929 1932
1930 1933 Instances may mutate the key for internal usage, as desired. e.g.
1931 1934 instances may wish to prepend the repo name, introduce path
1932 1935 components for filesystem or URL addressing, etc. Behavior is up to
1933 1936 the cache.
1934 1937
1935 1938 Returns a bool indicating if the request is cacheable by this
1936 1939 instance.
1937 1940 """
1938 1941
1939 1942 def lookup():
1940 1943 """Attempt to resolve an entry in the cache.
1941 1944
1942 1945 The instance is instructed to look for the cache key that it was
1943 1946 informed about via the call to ``setcachekey()``.
1944 1947
1945 1948 If there's no cache hit or the cacher doesn't wish to use the cached
1946 1949 entry, ``None`` should be returned.
1947 1950
1948 1951 Else, a dict defining the cached result should be returned. The
1949 1952 dict may have the following keys:
1950 1953
1951 1954 objs
1952 1955 An iterable of objects that should be sent to the client. That
1953 1956 iterable of objects is expected to be what the command function
1954 1957 would return if invoked or an equivalent representation thereof.
1955 1958 """
1956 1959
1957 1960 def onobject(obj):
1958 1961 """Called when a new object is emitted from the command function.
1959 1962
1960 1963 Receives as its argument the object that was emitted from the
1961 1964 command function.
1962 1965
1963 1966 This method returns an iterator of objects to forward to the output
1964 1967 layer. The easiest implementation is a generator that just
1965 1968 ``yield obj``.
1966 1969 """
1967 1970
1968 1971 def onfinished():
1969 1972 """Called after all objects have been emitted from the command function.
1970 1973
1971 1974 Implementations should return an iterator of objects to forward to
1972 1975 the output layer.
1973 1976
1974 1977 This method can be a generator.
1975 1978 """
@@ -1,3530 +1,3530
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import random
13 13 import sys
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 bin,
20 20 hex,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 )
25 25 from .pycompat import (
26 26 delattr,
27 27 getattr,
28 28 )
29 29 from . import (
30 30 bookmarks,
31 31 branchmap,
32 32 bundle2,
33 33 changegroup,
34 34 color,
35 35 commit,
36 36 context,
37 37 dirstate,
38 38 dirstateguard,
39 39 discovery,
40 40 encoding,
41 41 error,
42 42 exchange,
43 43 extensions,
44 44 filelog,
45 45 hook,
46 46 lock as lockmod,
47 47 match as matchmod,
48 48 mergestate as mergestatemod,
49 49 mergeutil,
50 50 namespaces,
51 51 narrowspec,
52 52 obsolete,
53 53 pathutil,
54 54 phases,
55 55 pushkey,
56 56 pycompat,
57 57 rcutil,
58 58 repoview,
59 59 revset,
60 60 revsetlang,
61 61 scmutil,
62 62 sparse,
63 63 store as storemod,
64 64 subrepoutil,
65 65 tags as tagsmod,
66 66 transaction,
67 67 txnutil,
68 68 util,
69 69 vfs as vfsmod,
70 70 )
71 71
72 72 from .interfaces import (
73 73 repository,
74 74 util as interfaceutil,
75 75 )
76 76
77 77 from .utils import (
78 78 hashutil,
79 79 procutil,
80 80 stringutil,
81 81 )
82 82
83 83 from .revlogutils import constants as revlogconst
84 84
85 85 release = lockmod.release
86 86 urlerr = util.urlerr
87 87 urlreq = util.urlreq
88 88
89 89 # set of (path, vfs-location) tuples. vfs-location is:
90 90 # - 'plain for vfs relative paths
91 91 # - '' for svfs relative paths
92 92 _cachedfiles = set()
93 93
94 94
95 95 class _basefilecache(scmutil.filecache):
96 96 """All filecache usage on repo are done for logic that should be unfiltered
97 97 """
98 98
99 99 def __get__(self, repo, type=None):
100 100 if repo is None:
101 101 return self
102 102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 103 unfi = repo.unfiltered()
104 104 try:
105 105 return unfi.__dict__[self.sname]
106 106 except KeyError:
107 107 pass
108 108 return super(_basefilecache, self).__get__(unfi, type)
109 109
110 110 def set(self, repo, value):
111 111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112 112
113 113
114 114 class repofilecache(_basefilecache):
115 115 """filecache for files in .hg but outside of .hg/store"""
116 116
117 117 def __init__(self, *paths):
118 118 super(repofilecache, self).__init__(*paths)
119 119 for path in paths:
120 120 _cachedfiles.add((path, b'plain'))
121 121
122 122 def join(self, obj, fname):
123 123 return obj.vfs.join(fname)
124 124
125 125
126 126 class storecache(_basefilecache):
127 127 """filecache for files in the store"""
128 128
129 129 def __init__(self, *paths):
130 130 super(storecache, self).__init__(*paths)
131 131 for path in paths:
132 132 _cachedfiles.add((path, b''))
133 133
134 134 def join(self, obj, fname):
135 135 return obj.sjoin(fname)
136 136
137 137
138 138 class mixedrepostorecache(_basefilecache):
139 139 """filecache for a mix files in .hg/store and outside"""
140 140
141 141 def __init__(self, *pathsandlocations):
142 142 # scmutil.filecache only uses the path for passing back into our
143 143 # join(), so we can safely pass a list of paths and locations
144 144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 145 _cachedfiles.update(pathsandlocations)
146 146
147 147 def join(self, obj, fnameandlocation):
148 148 fname, location = fnameandlocation
149 149 if location == b'plain':
150 150 return obj.vfs.join(fname)
151 151 else:
152 152 if location != b'':
153 153 raise error.ProgrammingError(
154 154 b'unexpected location: %s' % location
155 155 )
156 156 return obj.sjoin(fname)
157 157
158 158
159 159 def isfilecached(repo, name):
160 160 """check if a repo has already cached "name" filecache-ed property
161 161
162 162 This returns (cachedobj-or-None, iscached) tuple.
163 163 """
164 164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 165 if not cacheentry:
166 166 return None, False
167 167 return cacheentry.obj, True
168 168
169 169
170 170 class unfilteredpropertycache(util.propertycache):
171 171 """propertycache that apply to unfiltered repo only"""
172 172
173 173 def __get__(self, repo, type=None):
174 174 unfi = repo.unfiltered()
175 175 if unfi is repo:
176 176 return super(unfilteredpropertycache, self).__get__(unfi)
177 177 return getattr(unfi, self.name)
178 178
179 179
180 180 class filteredpropertycache(util.propertycache):
181 181 """propertycache that must take filtering in account"""
182 182
183 183 def cachevalue(self, obj, value):
184 184 object.__setattr__(obj, self.name, value)
185 185
186 186
187 187 def hasunfilteredcache(repo, name):
188 188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 189 return name in vars(repo.unfiltered())
190 190
191 191
192 192 def unfilteredmethod(orig):
193 193 """decorate method that always need to be run on unfiltered version"""
194 194
195 195 def wrapper(repo, *args, **kwargs):
196 196 return orig(repo.unfiltered(), *args, **kwargs)
197 197
198 198 return wrapper
199 199
200 200
201 201 moderncaps = {
202 202 b'lookup',
203 203 b'branchmap',
204 204 b'pushkey',
205 205 b'known',
206 206 b'getbundle',
207 207 b'unbundle',
208 208 }
209 209 legacycaps = moderncaps.union({b'changegroupsubset'})
210 210
211 211
212 212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 213 class localcommandexecutor(object):
214 214 def __init__(self, peer):
215 215 self._peer = peer
216 216 self._sent = False
217 217 self._closed = False
218 218
219 219 def __enter__(self):
220 220 return self
221 221
222 222 def __exit__(self, exctype, excvalue, exctb):
223 223 self.close()
224 224
225 225 def callcommand(self, command, args):
226 226 if self._sent:
227 227 raise error.ProgrammingError(
228 228 b'callcommand() cannot be used after sendcommands()'
229 229 )
230 230
231 231 if self._closed:
232 232 raise error.ProgrammingError(
233 233 b'callcommand() cannot be used after close()'
234 234 )
235 235
236 236 # We don't need to support anything fancy. Just call the named
237 237 # method on the peer and return a resolved future.
238 238 fn = getattr(self._peer, pycompat.sysstr(command))
239 239
240 240 f = pycompat.futures.Future()
241 241
242 242 try:
243 243 result = fn(**pycompat.strkwargs(args))
244 244 except Exception:
245 245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 246 else:
247 247 f.set_result(result)
248 248
249 249 return f
250 250
251 251 def sendcommands(self):
252 252 self._sent = True
253 253
254 254 def close(self):
255 255 self._closed = True
256 256
257 257
258 258 @interfaceutil.implementer(repository.ipeercommands)
259 259 class localpeer(repository.peer):
260 260 '''peer for a local repo; reflects only the most recent API'''
261 261
262 262 def __init__(self, repo, caps=None):
263 263 super(localpeer, self).__init__()
264 264
265 265 if caps is None:
266 266 caps = moderncaps.copy()
267 267 self._repo = repo.filtered(b'served')
268 268 self.ui = repo.ui
269 269 self._caps = repo._restrictcapabilities(caps)
270 270
271 271 # Begin of _basepeer interface.
272 272
273 273 def url(self):
274 274 return self._repo.url()
275 275
276 276 def local(self):
277 277 return self._repo
278 278
279 279 def peer(self):
280 280 return self
281 281
282 282 def canpush(self):
283 283 return True
284 284
285 285 def close(self):
286 286 self._repo.close()
287 287
288 288 # End of _basepeer interface.
289 289
290 290 # Begin of _basewirecommands interface.
291 291
292 292 def branchmap(self):
293 293 return self._repo.branchmap()
294 294
295 295 def capabilities(self):
296 296 return self._caps
297 297
298 298 def clonebundles(self):
299 299 return self._repo.tryread(b'clonebundles.manifest')
300 300
301 301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 302 """Used to test argument passing over the wire"""
303 303 return b"%s %s %s %s %s" % (
304 304 one,
305 305 two,
306 306 pycompat.bytestr(three),
307 307 pycompat.bytestr(four),
308 308 pycompat.bytestr(five),
309 309 )
310 310
311 311 def getbundle(
312 312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 313 ):
314 314 chunks = exchange.getbundlechunks(
315 315 self._repo,
316 316 source,
317 317 heads=heads,
318 318 common=common,
319 319 bundlecaps=bundlecaps,
320 320 **kwargs
321 321 )[1]
322 322 cb = util.chunkbuffer(chunks)
323 323
324 324 if exchange.bundle2requested(bundlecaps):
325 325 # When requesting a bundle2, getbundle returns a stream to make the
326 326 # wire level function happier. We need to build a proper object
327 327 # from it in local peer.
328 328 return bundle2.getunbundler(self.ui, cb)
329 329 else:
330 330 return changegroup.getunbundler(b'01', cb, None)
331 331
332 332 def heads(self):
333 333 return self._repo.heads()
334 334
335 335 def known(self, nodes):
336 336 return self._repo.known(nodes)
337 337
338 338 def listkeys(self, namespace):
339 339 return self._repo.listkeys(namespace)
340 340
341 341 def lookup(self, key):
342 342 return self._repo.lookup(key)
343 343
344 344 def pushkey(self, namespace, key, old, new):
345 345 return self._repo.pushkey(namespace, key, old, new)
346 346
347 347 def stream_out(self):
348 348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349 349
350 350 def unbundle(self, bundle, heads, url):
351 351 """apply a bundle on a repo
352 352
353 353 This function handles the repo locking itself."""
354 354 try:
355 355 try:
356 356 bundle = exchange.readbundle(self.ui, bundle, None)
357 357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 358 if util.safehasattr(ret, b'getchunks'):
359 359 # This is a bundle20 object, turn it into an unbundler.
360 360 # This little dance should be dropped eventually when the
361 361 # API is finally improved.
362 362 stream = util.chunkbuffer(ret.getchunks())
363 363 ret = bundle2.getunbundler(self.ui, stream)
364 364 return ret
365 365 except Exception as exc:
366 366 # If the exception contains output salvaged from a bundle2
367 367 # reply, we need to make sure it is printed before continuing
368 368 # to fail. So we build a bundle2 with such output and consume
369 369 # it directly.
370 370 #
371 371 # This is not very elegant but allows a "simple" solution for
372 372 # issue4594
373 373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 374 if output:
375 375 bundler = bundle2.bundle20(self._repo.ui)
376 376 for out in output:
377 377 bundler.addpart(out)
378 378 stream = util.chunkbuffer(bundler.getchunks())
379 379 b = bundle2.getunbundler(self.ui, stream)
380 380 bundle2.processbundle(self._repo, b)
381 381 raise
382 382 except error.PushRaced as exc:
383 383 raise error.ResponseError(
384 384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 385 )
386 386
387 387 # End of _basewirecommands interface.
388 388
389 389 # Begin of peer interface.
390 390
391 391 def commandexecutor(self):
392 392 return localcommandexecutor(self)
393 393
394 394 # End of peer interface.
395 395
396 396
397 397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 398 class locallegacypeer(localpeer):
399 399 '''peer extension which implements legacy methods too; used for tests with
400 400 restricted capabilities'''
401 401
402 402 def __init__(self, repo):
403 403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404 404
405 405 # Begin of baselegacywirecommands interface.
406 406
407 407 def between(self, pairs):
408 408 return self._repo.between(pairs)
409 409
410 410 def branches(self, nodes):
411 411 return self._repo.branches(nodes)
412 412
413 413 def changegroup(self, nodes, source):
414 414 outgoing = discovery.outgoing(
415 415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
416 416 )
417 417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418 418
419 419 def changegroupsubset(self, bases, heads, source):
420 420 outgoing = discovery.outgoing(
421 421 self._repo, missingroots=bases, ancestorsof=heads
422 422 )
423 423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424 424
425 425 # End of baselegacywirecommands interface.
426 426
427 427
428 428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 429 # clients.
430 430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431 431
432 432 # A repository with the sparserevlog feature will have delta chains that
433 433 # can spread over a larger span. Sparse reading cuts these large spans into
434 434 # pieces, so that each piece isn't too big.
435 435 # Without the sparserevlog capability, reading from the repository could use
436 436 # huge amounts of memory, because the whole span would be read at once,
437 437 # including all the intermediate revisions that aren't pertinent for the chain.
438 438 # This is why once a repository has enabled sparse-read, it becomes required.
439 439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440 440
441 441 # A repository with the sidedataflag requirement will allow to store extra
442 442 # information for revision without altering their original hashes.
443 443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444 444
445 445 # A repository with the the copies-sidedata-changeset requirement will store
446 446 # copies related information in changeset's sidedata.
447 447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448 448
449 449 # The repository use persistent nodemap for the changelog and the manifest.
450 450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451 451
452 452 # Functions receiving (ui, features) that extensions can register to impact
453 453 # the ability to load repositories with custom requirements. Only
454 454 # functions defined in loaded extensions are called.
455 455 #
456 456 # The function receives a set of requirement strings that the repository
457 457 # is capable of opening. Functions will typically add elements to the
458 458 # set to reflect that the extension knows how to handle that requirements.
459 459 featuresetupfuncs = set()
460 460
461 461
462 462 def _getsharedvfs(hgvfs, requirements):
463 463 """ returns the vfs object pointing to root of shared source
464 464 repo for a shared repository
465 465
466 466 hgvfs is vfs pointing at .hg/ of current repo (shared one)
467 467 requirements is a set of requirements of current repo (shared one)
468 468 """
469 469 # The ``shared`` or ``relshared`` requirements indicate the
470 470 # store lives in the path contained in the ``.hg/sharedpath`` file.
471 471 # This is an absolute path for ``shared`` and relative to
472 472 # ``.hg/`` for ``relshared``.
473 473 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
474 474 if b'relshared' in requirements:
475 475 sharedpath = hgvfs.join(sharedpath)
476 476
477 477 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
478 478
479 479 if not sharedvfs.exists():
480 480 raise error.RepoError(
481 481 _(b'.hg/sharedpath points to nonexistent directory %s')
482 482 % sharedvfs.base
483 483 )
484 484 return sharedvfs
485 485
486 486
487 487 def _readrequires(vfs, allowmissing):
488 488 """ reads the require file present at root of this vfs
489 489 and return a set of requirements
490 490
491 491 If allowmissing is True, we suppress ENOENT if raised"""
492 492 # requires file contains a newline-delimited list of
493 493 # features/capabilities the opener (us) must have in order to use
494 494 # the repository. This file was introduced in Mercurial 0.9.2,
495 495 # which means very old repositories may not have one. We assume
496 496 # a missing file translates to no requirements.
497 497 try:
498 498 requirements = set(vfs.read(b'requires').splitlines())
499 499 except IOError as e:
500 500 if not (allowmissing and e.errno == errno.ENOENT):
501 501 raise
502 502 requirements = set()
503 503 return requirements
504 504
505 505
506 506 def makelocalrepository(baseui, path, intents=None):
507 507 """Create a local repository object.
508 508
509 509 Given arguments needed to construct a local repository, this function
510 510 performs various early repository loading functionality (such as
511 511 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
512 512 the repository can be opened, derives a type suitable for representing
513 513 that repository, and returns an instance of it.
514 514
515 515 The returned object conforms to the ``repository.completelocalrepository``
516 516 interface.
517 517
518 518 The repository type is derived by calling a series of factory functions
519 519 for each aspect/interface of the final repository. These are defined by
520 520 ``REPO_INTERFACES``.
521 521
522 522 Each factory function is called to produce a type implementing a specific
523 523 interface. The cumulative list of returned types will be combined into a
524 524 new type and that type will be instantiated to represent the local
525 525 repository.
526 526
527 527 The factory functions each receive various state that may be consulted
528 528 as part of deriving a type.
529 529
530 530 Extensions should wrap these factory functions to customize repository type
531 531 creation. Note that an extension's wrapped function may be called even if
532 532 that extension is not loaded for the repo being constructed. Extensions
533 533 should check if their ``__name__`` appears in the
534 534 ``extensionmodulenames`` set passed to the factory function and no-op if
535 535 not.
536 536 """
537 537 ui = baseui.copy()
538 538 # Prevent copying repo configuration.
539 539 ui.copy = baseui.copy
540 540
541 541 # Working directory VFS rooted at repository root.
542 542 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
543 543
544 544 # Main VFS for .hg/ directory.
545 545 hgpath = wdirvfs.join(b'.hg')
546 546 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
547 547 # Whether this repository is shared one or not
548 548 shared = False
549 549 # If this repository is shared, vfs pointing to shared repo
550 550 sharedvfs = None
551 551
552 552 # The .hg/ path should exist and should be a directory. All other
553 553 # cases are errors.
554 554 if not hgvfs.isdir():
555 555 try:
556 556 hgvfs.stat()
557 557 except OSError as e:
558 558 if e.errno != errno.ENOENT:
559 559 raise
560 560 except ValueError as e:
561 561 # Can be raised on Python 3.8 when path is invalid.
562 562 raise error.Abort(
563 563 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
564 564 )
565 565
566 566 raise error.RepoError(_(b'repository %s not found') % path)
567 567
568 568 requirements = _readrequires(hgvfs, True)
569 569
570 570 # The .hg/hgrc file may load extensions or contain config options
571 571 # that influence repository construction. Attempt to load it and
572 572 # process any new extensions that it may have pulled in.
573 573 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
574 574 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
575 575 extensions.loadall(ui)
576 576 extensions.populateui(ui)
577 577
578 578 # Set of module names of extensions loaded for this repository.
579 579 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
580 580
581 581 supportedrequirements = gathersupportedrequirements(ui)
582 582
583 583 # We first validate the requirements are known.
584 584 ensurerequirementsrecognized(requirements, supportedrequirements)
585 585
586 586 # Then we validate that the known set is reasonable to use together.
587 587 ensurerequirementscompatible(ui, requirements)
588 588
589 589 # TODO there are unhandled edge cases related to opening repositories with
590 590 # shared storage. If storage is shared, we should also test for requirements
591 591 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
592 592 # that repo, as that repo may load extensions needed to open it. This is a
593 593 # bit complicated because we don't want the other hgrc to overwrite settings
594 594 # in this hgrc.
595 595 #
596 596 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
597 597 # file when sharing repos. But if a requirement is added after the share is
598 598 # performed, thereby introducing a new requirement for the opener, we may
599 599 # will not see that and could encounter a run-time error interacting with
600 600 # that shared store since it has an unknown-to-us requirement.
601 601
602 602 # At this point, we know we should be capable of opening the repository.
603 603 # Now get on with doing that.
604 604
605 605 features = set()
606 606
607 607 # The "store" part of the repository holds versioned data. How it is
608 608 # accessed is determined by various requirements. If `shared` or
609 609 # `relshared` requirements are present, this indicates current repository
610 610 # is a share and store exists in path mentioned in `.hg/sharedpath`
611 611 shared = b'shared' in requirements or b'relshared' in requirements
612 612 if shared:
613 613 sharedvfs = _getsharedvfs(hgvfs, requirements)
614 614 storebasepath = sharedvfs.base
615 615 cachepath = sharedvfs.join(b'cache')
616 616 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
617 617 else:
618 618 storebasepath = hgvfs.base
619 619 cachepath = hgvfs.join(b'cache')
620 620 wcachepath = hgvfs.join(b'wcache')
621 621
622 622 # The store has changed over time and the exact layout is dictated by
623 623 # requirements. The store interface abstracts differences across all
624 624 # of them.
625 625 store = makestore(
626 626 requirements,
627 627 storebasepath,
628 628 lambda base: vfsmod.vfs(base, cacheaudited=True),
629 629 )
630 630 hgvfs.createmode = store.createmode
631 631
632 632 storevfs = store.vfs
633 633 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
634 634
635 635 # The cache vfs is used to manage cache files.
636 636 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
637 637 cachevfs.createmode = store.createmode
638 638 # The cache vfs is used to manage cache files related to the working copy
639 639 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
640 640 wcachevfs.createmode = store.createmode
641 641
642 642 # Now resolve the type for the repository object. We do this by repeatedly
643 643 # calling a factory function to produces types for specific aspects of the
644 644 # repo's operation. The aggregate returned types are used as base classes
645 645 # for a dynamically-derived type, which will represent our new repository.
646 646
647 647 bases = []
648 648 extrastate = {}
649 649
650 650 for iface, fn in REPO_INTERFACES:
651 651 # We pass all potentially useful state to give extensions tons of
652 652 # flexibility.
653 653 typ = fn()(
654 654 ui=ui,
655 655 intents=intents,
656 656 requirements=requirements,
657 657 features=features,
658 658 wdirvfs=wdirvfs,
659 659 hgvfs=hgvfs,
660 660 store=store,
661 661 storevfs=storevfs,
662 662 storeoptions=storevfs.options,
663 663 cachevfs=cachevfs,
664 664 wcachevfs=wcachevfs,
665 665 extensionmodulenames=extensionmodulenames,
666 666 extrastate=extrastate,
667 667 baseclasses=bases,
668 668 )
669 669
670 670 if not isinstance(typ, type):
671 671 raise error.ProgrammingError(
672 672 b'unable to construct type for %s' % iface
673 673 )
674 674
675 675 bases.append(typ)
676 676
677 677 # type() allows you to use characters in type names that wouldn't be
678 678 # recognized as Python symbols in source code. We abuse that to add
679 679 # rich information about our constructed repo.
680 680 name = pycompat.sysstr(
681 681 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
682 682 )
683 683
684 684 cls = type(name, tuple(bases), {})
685 685
686 686 return cls(
687 687 baseui=baseui,
688 688 ui=ui,
689 689 origroot=path,
690 690 wdirvfs=wdirvfs,
691 691 hgvfs=hgvfs,
692 692 requirements=requirements,
693 693 supportedrequirements=supportedrequirements,
694 694 sharedpath=storebasepath,
695 695 store=store,
696 696 cachevfs=cachevfs,
697 697 wcachevfs=wcachevfs,
698 698 features=features,
699 699 intents=intents,
700 700 )
701 701
702 702
703 703 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
704 704 """Load hgrc files/content into a ui instance.
705 705
706 706 This is called during repository opening to load any additional
707 707 config files or settings relevant to the current repository.
708 708
709 709 Returns a bool indicating whether any additional configs were loaded.
710 710
711 711 Extensions should monkeypatch this function to modify how per-repo
712 712 configs are loaded. For example, an extension may wish to pull in
713 713 configs from alternate files or sources.
714 714 """
715 715 if not rcutil.use_repo_hgrc():
716 716 return False
717 717 try:
718 718 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
719 719 return True
720 720 except IOError:
721 721 return False
722 722
723 723
724 724 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
725 725 """Perform additional actions after .hg/hgrc is loaded.
726 726
727 727 This function is called during repository loading immediately after
728 728 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
729 729
730 730 The function can be used to validate configs, automatically add
731 731 options (including extensions) based on requirements, etc.
732 732 """
733 733
734 734 # Map of requirements to list of extensions to load automatically when
735 735 # requirement is present.
736 736 autoextensions = {
737 737 b'git': [b'git'],
738 738 b'largefiles': [b'largefiles'],
739 739 b'lfs': [b'lfs'],
740 740 }
741 741
742 742 for requirement, names in sorted(autoextensions.items()):
743 743 if requirement not in requirements:
744 744 continue
745 745
746 746 for name in names:
747 747 if not ui.hasconfig(b'extensions', name):
748 748 ui.setconfig(b'extensions', name, b'', source=b'autoload')
749 749
750 750
751 751 def gathersupportedrequirements(ui):
752 752 """Determine the complete set of recognized requirements."""
753 753 # Start with all requirements supported by this file.
754 754 supported = set(localrepository._basesupported)
755 755
756 756 # Execute ``featuresetupfuncs`` entries if they belong to an extension
757 757 # relevant to this ui instance.
758 758 modules = {m.__name__ for n, m in extensions.extensions(ui)}
759 759
760 760 for fn in featuresetupfuncs:
761 761 if fn.__module__ in modules:
762 762 fn(ui, supported)
763 763
764 764 # Add derived requirements from registered compression engines.
765 765 for name in util.compengines:
766 766 engine = util.compengines[name]
767 767 if engine.available() and engine.revlogheader():
768 768 supported.add(b'exp-compression-%s' % name)
769 769 if engine.name() == b'zstd':
770 770 supported.add(b'revlog-compression-zstd')
771 771
772 772 return supported
773 773
774 774
775 775 def ensurerequirementsrecognized(requirements, supported):
776 776 """Validate that a set of local requirements is recognized.
777 777
778 778 Receives a set of requirements. Raises an ``error.RepoError`` if there
779 779 exists any requirement in that set that currently loaded code doesn't
780 780 recognize.
781 781
782 782 Returns a set of supported requirements.
783 783 """
784 784 missing = set()
785 785
786 786 for requirement in requirements:
787 787 if requirement in supported:
788 788 continue
789 789
790 790 if not requirement or not requirement[0:1].isalnum():
791 791 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
792 792
793 793 missing.add(requirement)
794 794
795 795 if missing:
796 796 raise error.RequirementError(
797 797 _(b'repository requires features unknown to this Mercurial: %s')
798 798 % b' '.join(sorted(missing)),
799 799 hint=_(
800 800 b'see https://mercurial-scm.org/wiki/MissingRequirement '
801 801 b'for more information'
802 802 ),
803 803 )
804 804
805 805
806 806 def ensurerequirementscompatible(ui, requirements):
807 807 """Validates that a set of recognized requirements is mutually compatible.
808 808
809 809 Some requirements may not be compatible with others or require
810 810 config options that aren't enabled. This function is called during
811 811 repository opening to ensure that the set of requirements needed
812 812 to open a repository is sane and compatible with config options.
813 813
814 814 Extensions can monkeypatch this function to perform additional
815 815 checking.
816 816
817 817 ``error.RepoError`` should be raised on failure.
818 818 """
819 if b'exp-sparse' in requirements and not sparse.enabled:
819 if repository.SPARSE_REQUIREMENT in requirements and not sparse.enabled:
820 820 raise error.RepoError(
821 821 _(
822 822 b'repository is using sparse feature but '
823 823 b'sparse is not enabled; enable the '
824 824 b'"sparse" extensions to access'
825 825 )
826 826 )
827 827
828 828
829 829 def makestore(requirements, path, vfstype):
830 830 """Construct a storage object for a repository."""
831 831 if b'store' in requirements:
832 832 if b'fncache' in requirements:
833 833 return storemod.fncachestore(
834 834 path, vfstype, b'dotencode' in requirements
835 835 )
836 836
837 837 return storemod.encodedstore(path, vfstype)
838 838
839 839 return storemod.basicstore(path, vfstype)
840 840
841 841
842 842 def resolvestorevfsoptions(ui, requirements, features):
843 843 """Resolve the options to pass to the store vfs opener.
844 844
845 845 The returned dict is used to influence behavior of the storage layer.
846 846 """
847 847 options = {}
848 848
849 849 if b'treemanifest' in requirements:
850 850 options[b'treemanifest'] = True
851 851
852 852 # experimental config: format.manifestcachesize
853 853 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
854 854 if manifestcachesize is not None:
855 855 options[b'manifestcachesize'] = manifestcachesize
856 856
857 857 # In the absence of another requirement superseding a revlog-related
858 858 # requirement, we have to assume the repo is using revlog version 0.
859 859 # This revlog format is super old and we don't bother trying to parse
860 860 # opener options for it because those options wouldn't do anything
861 861 # meaningful on such old repos.
862 862 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
863 863 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
864 864 else: # explicitly mark repo as using revlogv0
865 865 options[b'revlogv0'] = True
866 866
867 867 if COPIESSDC_REQUIREMENT in requirements:
868 868 options[b'copies-storage'] = b'changeset-sidedata'
869 869 else:
870 870 writecopiesto = ui.config(b'experimental', b'copies.write-to')
871 871 copiesextramode = (b'changeset-only', b'compatibility')
872 872 if writecopiesto in copiesextramode:
873 873 options[b'copies-storage'] = b'extra'
874 874
875 875 return options
876 876
877 877
878 878 def resolverevlogstorevfsoptions(ui, requirements, features):
879 879 """Resolve opener options specific to revlogs."""
880 880
881 881 options = {}
882 882 options[b'flagprocessors'] = {}
883 883
884 884 if b'revlogv1' in requirements:
885 885 options[b'revlogv1'] = True
886 886 if REVLOGV2_REQUIREMENT in requirements:
887 887 options[b'revlogv2'] = True
888 888
889 889 if b'generaldelta' in requirements:
890 890 options[b'generaldelta'] = True
891 891
892 892 # experimental config: format.chunkcachesize
893 893 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
894 894 if chunkcachesize is not None:
895 895 options[b'chunkcachesize'] = chunkcachesize
896 896
897 897 deltabothparents = ui.configbool(
898 898 b'storage', b'revlog.optimize-delta-parent-choice'
899 899 )
900 900 options[b'deltabothparents'] = deltabothparents
901 901
902 902 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
903 903 lazydeltabase = False
904 904 if lazydelta:
905 905 lazydeltabase = ui.configbool(
906 906 b'storage', b'revlog.reuse-external-delta-parent'
907 907 )
908 908 if lazydeltabase is None:
909 909 lazydeltabase = not scmutil.gddeltaconfig(ui)
910 910 options[b'lazydelta'] = lazydelta
911 911 options[b'lazydeltabase'] = lazydeltabase
912 912
913 913 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
914 914 if 0 <= chainspan:
915 915 options[b'maxdeltachainspan'] = chainspan
916 916
917 917 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
918 918 if mmapindexthreshold is not None:
919 919 options[b'mmapindexthreshold'] = mmapindexthreshold
920 920
921 921 withsparseread = ui.configbool(b'experimental', b'sparse-read')
922 922 srdensitythres = float(
923 923 ui.config(b'experimental', b'sparse-read.density-threshold')
924 924 )
925 925 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
926 926 options[b'with-sparse-read'] = withsparseread
927 927 options[b'sparse-read-density-threshold'] = srdensitythres
928 928 options[b'sparse-read-min-gap-size'] = srmingapsize
929 929
930 930 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
931 931 options[b'sparse-revlog'] = sparserevlog
932 932 if sparserevlog:
933 933 options[b'generaldelta'] = True
934 934
935 935 sidedata = SIDEDATA_REQUIREMENT in requirements
936 936 options[b'side-data'] = sidedata
937 937
938 938 maxchainlen = None
939 939 if sparserevlog:
940 940 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
941 941 # experimental config: format.maxchainlen
942 942 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
943 943 if maxchainlen is not None:
944 944 options[b'maxchainlen'] = maxchainlen
945 945
946 946 for r in requirements:
947 947 # we allow multiple compression engine requirement to co-exist because
948 948 # strickly speaking, revlog seems to support mixed compression style.
949 949 #
950 950 # The compression used for new entries will be "the last one"
951 951 prefix = r.startswith
952 952 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
953 953 options[b'compengine'] = r.split(b'-', 2)[2]
954 954
955 955 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
956 956 if options[b'zlib.level'] is not None:
957 957 if not (0 <= options[b'zlib.level'] <= 9):
958 958 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
959 959 raise error.Abort(msg % options[b'zlib.level'])
960 960 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
961 961 if options[b'zstd.level'] is not None:
962 962 if not (0 <= options[b'zstd.level'] <= 22):
963 963 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
964 964 raise error.Abort(msg % options[b'zstd.level'])
965 965
966 966 if repository.NARROW_REQUIREMENT in requirements:
967 967 options[b'enableellipsis'] = True
968 968
969 969 if ui.configbool(b'experimental', b'rust.index'):
970 970 options[b'rust.index'] = True
971 971 if NODEMAP_REQUIREMENT in requirements:
972 972 options[b'persistent-nodemap'] = True
973 973 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
974 974 options[b'persistent-nodemap.mmap'] = True
975 975 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
976 976 options[b'persistent-nodemap.mode'] = epnm
977 977 if ui.configbool(b'devel', b'persistent-nodemap'):
978 978 options[b'devel-force-nodemap'] = True
979 979
980 980 return options
981 981
982 982
983 983 def makemain(**kwargs):
984 984 """Produce a type conforming to ``ilocalrepositorymain``."""
985 985 return localrepository
986 986
987 987
988 988 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
989 989 class revlogfilestorage(object):
990 990 """File storage when using revlogs."""
991 991
992 992 def file(self, path):
993 993 if path[0] == b'/':
994 994 path = path[1:]
995 995
996 996 return filelog.filelog(self.svfs, path)
997 997
998 998
999 999 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1000 1000 class revlognarrowfilestorage(object):
1001 1001 """File storage when using revlogs and narrow files."""
1002 1002
1003 1003 def file(self, path):
1004 1004 if path[0] == b'/':
1005 1005 path = path[1:]
1006 1006
1007 1007 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1008 1008
1009 1009
1010 1010 def makefilestorage(requirements, features, **kwargs):
1011 1011 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1012 1012 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1013 1013 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1014 1014
1015 1015 if repository.NARROW_REQUIREMENT in requirements:
1016 1016 return revlognarrowfilestorage
1017 1017 else:
1018 1018 return revlogfilestorage
1019 1019
1020 1020
1021 1021 # List of repository interfaces and factory functions for them. Each
1022 1022 # will be called in order during ``makelocalrepository()`` to iteratively
1023 1023 # derive the final type for a local repository instance. We capture the
1024 1024 # function as a lambda so we don't hold a reference and the module-level
1025 1025 # functions can be wrapped.
1026 1026 REPO_INTERFACES = [
1027 1027 (repository.ilocalrepositorymain, lambda: makemain),
1028 1028 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1029 1029 ]
1030 1030
1031 1031
1032 1032 @interfaceutil.implementer(repository.ilocalrepositorymain)
1033 1033 class localrepository(object):
1034 1034 """Main class for representing local repositories.
1035 1035
1036 1036 All local repositories are instances of this class.
1037 1037
1038 1038 Constructed on its own, instances of this class are not usable as
1039 1039 repository objects. To obtain a usable repository object, call
1040 1040 ``hg.repository()``, ``localrepo.instance()``, or
1041 1041 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1042 1042 ``instance()`` adds support for creating new repositories.
1043 1043 ``hg.repository()`` adds more extension integration, including calling
1044 1044 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1045 1045 used.
1046 1046 """
1047 1047
1048 1048 # obsolete experimental requirements:
1049 1049 # - manifestv2: An experimental new manifest format that allowed
1050 1050 # for stem compression of long paths. Experiment ended up not
1051 1051 # being successful (repository sizes went up due to worse delta
1052 1052 # chains), and the code was deleted in 4.6.
1053 1053 supportedformats = {
1054 1054 b'revlogv1',
1055 1055 b'generaldelta',
1056 1056 b'treemanifest',
1057 1057 COPIESSDC_REQUIREMENT,
1058 1058 REVLOGV2_REQUIREMENT,
1059 1059 SIDEDATA_REQUIREMENT,
1060 1060 SPARSEREVLOG_REQUIREMENT,
1061 1061 NODEMAP_REQUIREMENT,
1062 1062 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1063 1063 }
1064 1064 _basesupported = supportedformats | {
1065 1065 b'store',
1066 1066 b'fncache',
1067 1067 b'shared',
1068 1068 b'relshared',
1069 1069 b'dotencode',
1070 b'exp-sparse',
1070 repository.SPARSE_REQUIREMENT,
1071 1071 b'internal-phase',
1072 1072 }
1073 1073
1074 1074 # list of prefix for file which can be written without 'wlock'
1075 1075 # Extensions should extend this list when needed
1076 1076 _wlockfreeprefix = {
1077 1077 # We migh consider requiring 'wlock' for the next
1078 1078 # two, but pretty much all the existing code assume
1079 1079 # wlock is not needed so we keep them excluded for
1080 1080 # now.
1081 1081 b'hgrc',
1082 1082 b'requires',
1083 1083 # XXX cache is a complicatged business someone
1084 1084 # should investigate this in depth at some point
1085 1085 b'cache/',
1086 1086 # XXX shouldn't be dirstate covered by the wlock?
1087 1087 b'dirstate',
1088 1088 # XXX bisect was still a bit too messy at the time
1089 1089 # this changeset was introduced. Someone should fix
1090 1090 # the remainig bit and drop this line
1091 1091 b'bisect.state',
1092 1092 }
1093 1093
1094 1094 def __init__(
1095 1095 self,
1096 1096 baseui,
1097 1097 ui,
1098 1098 origroot,
1099 1099 wdirvfs,
1100 1100 hgvfs,
1101 1101 requirements,
1102 1102 supportedrequirements,
1103 1103 sharedpath,
1104 1104 store,
1105 1105 cachevfs,
1106 1106 wcachevfs,
1107 1107 features,
1108 1108 intents=None,
1109 1109 ):
1110 1110 """Create a new local repository instance.
1111 1111
1112 1112 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1113 1113 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1114 1114 object.
1115 1115
1116 1116 Arguments:
1117 1117
1118 1118 baseui
1119 1119 ``ui.ui`` instance that ``ui`` argument was based off of.
1120 1120
1121 1121 ui
1122 1122 ``ui.ui`` instance for use by the repository.
1123 1123
1124 1124 origroot
1125 1125 ``bytes`` path to working directory root of this repository.
1126 1126
1127 1127 wdirvfs
1128 1128 ``vfs.vfs`` rooted at the working directory.
1129 1129
1130 1130 hgvfs
1131 1131 ``vfs.vfs`` rooted at .hg/
1132 1132
1133 1133 requirements
1134 1134 ``set`` of bytestrings representing repository opening requirements.
1135 1135
1136 1136 supportedrequirements
1137 1137 ``set`` of bytestrings representing repository requirements that we
1138 1138 know how to open. May be a supetset of ``requirements``.
1139 1139
1140 1140 sharedpath
1141 1141 ``bytes`` Defining path to storage base directory. Points to a
1142 1142 ``.hg/`` directory somewhere.
1143 1143
1144 1144 store
1145 1145 ``store.basicstore`` (or derived) instance providing access to
1146 1146 versioned storage.
1147 1147
1148 1148 cachevfs
1149 1149 ``vfs.vfs`` used for cache files.
1150 1150
1151 1151 wcachevfs
1152 1152 ``vfs.vfs`` used for cache files related to the working copy.
1153 1153
1154 1154 features
1155 1155 ``set`` of bytestrings defining features/capabilities of this
1156 1156 instance.
1157 1157
1158 1158 intents
1159 1159 ``set`` of system strings indicating what this repo will be used
1160 1160 for.
1161 1161 """
1162 1162 self.baseui = baseui
1163 1163 self.ui = ui
1164 1164 self.origroot = origroot
1165 1165 # vfs rooted at working directory.
1166 1166 self.wvfs = wdirvfs
1167 1167 self.root = wdirvfs.base
1168 1168 # vfs rooted at .hg/. Used to access most non-store paths.
1169 1169 self.vfs = hgvfs
1170 1170 self.path = hgvfs.base
1171 1171 self.requirements = requirements
1172 1172 self.supported = supportedrequirements
1173 1173 self.sharedpath = sharedpath
1174 1174 self.store = store
1175 1175 self.cachevfs = cachevfs
1176 1176 self.wcachevfs = wcachevfs
1177 1177 self.features = features
1178 1178
1179 1179 self.filtername = None
1180 1180
1181 1181 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1182 1182 b'devel', b'check-locks'
1183 1183 ):
1184 1184 self.vfs.audit = self._getvfsward(self.vfs.audit)
1185 1185 # A list of callback to shape the phase if no data were found.
1186 1186 # Callback are in the form: func(repo, roots) --> processed root.
1187 1187 # This list it to be filled by extension during repo setup
1188 1188 self._phasedefaults = []
1189 1189
1190 1190 color.setup(self.ui)
1191 1191
1192 1192 self.spath = self.store.path
1193 1193 self.svfs = self.store.vfs
1194 1194 self.sjoin = self.store.join
1195 1195 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1196 1196 b'devel', b'check-locks'
1197 1197 ):
1198 1198 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1199 1199 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1200 1200 else: # standard vfs
1201 1201 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1202 1202
1203 1203 self._dirstatevalidatewarned = False
1204 1204
1205 1205 self._branchcaches = branchmap.BranchMapCache()
1206 1206 self._revbranchcache = None
1207 1207 self._filterpats = {}
1208 1208 self._datafilters = {}
1209 1209 self._transref = self._lockref = self._wlockref = None
1210 1210
1211 1211 # A cache for various files under .hg/ that tracks file changes,
1212 1212 # (used by the filecache decorator)
1213 1213 #
1214 1214 # Maps a property name to its util.filecacheentry
1215 1215 self._filecache = {}
1216 1216
1217 1217 # hold sets of revision to be filtered
1218 1218 # should be cleared when something might have changed the filter value:
1219 1219 # - new changesets,
1220 1220 # - phase change,
1221 1221 # - new obsolescence marker,
1222 1222 # - working directory parent change,
1223 1223 # - bookmark changes
1224 1224 self.filteredrevcache = {}
1225 1225
1226 1226 # post-dirstate-status hooks
1227 1227 self._postdsstatus = []
1228 1228
1229 1229 # generic mapping between names and nodes
1230 1230 self.names = namespaces.namespaces()
1231 1231
1232 1232 # Key to signature value.
1233 1233 self._sparsesignaturecache = {}
1234 1234 # Signature to cached matcher instance.
1235 1235 self._sparsematchercache = {}
1236 1236
1237 1237 self._extrafilterid = repoview.extrafilter(ui)
1238 1238
1239 1239 self.filecopiesmode = None
1240 1240 if COPIESSDC_REQUIREMENT in self.requirements:
1241 1241 self.filecopiesmode = b'changeset-sidedata'
1242 1242
1243 1243 def _getvfsward(self, origfunc):
1244 1244 """build a ward for self.vfs"""
1245 1245 rref = weakref.ref(self)
1246 1246
1247 1247 def checkvfs(path, mode=None):
1248 1248 ret = origfunc(path, mode=mode)
1249 1249 repo = rref()
1250 1250 if (
1251 1251 repo is None
1252 1252 or not util.safehasattr(repo, b'_wlockref')
1253 1253 or not util.safehasattr(repo, b'_lockref')
1254 1254 ):
1255 1255 return
1256 1256 if mode in (None, b'r', b'rb'):
1257 1257 return
1258 1258 if path.startswith(repo.path):
1259 1259 # truncate name relative to the repository (.hg)
1260 1260 path = path[len(repo.path) + 1 :]
1261 1261 if path.startswith(b'cache/'):
1262 1262 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1263 1263 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1264 1264 # path prefixes covered by 'lock'
1265 1265 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1266 1266 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1267 1267 if repo._currentlock(repo._lockref) is None:
1268 1268 repo.ui.develwarn(
1269 1269 b'write with no lock: "%s"' % path,
1270 1270 stacklevel=3,
1271 1271 config=b'check-locks',
1272 1272 )
1273 1273 elif repo._currentlock(repo._wlockref) is None:
1274 1274 # rest of vfs files are covered by 'wlock'
1275 1275 #
1276 1276 # exclude special files
1277 1277 for prefix in self._wlockfreeprefix:
1278 1278 if path.startswith(prefix):
1279 1279 return
1280 1280 repo.ui.develwarn(
1281 1281 b'write with no wlock: "%s"' % path,
1282 1282 stacklevel=3,
1283 1283 config=b'check-locks',
1284 1284 )
1285 1285 return ret
1286 1286
1287 1287 return checkvfs
1288 1288
1289 1289 def _getsvfsward(self, origfunc):
1290 1290 """build a ward for self.svfs"""
1291 1291 rref = weakref.ref(self)
1292 1292
1293 1293 def checksvfs(path, mode=None):
1294 1294 ret = origfunc(path, mode=mode)
1295 1295 repo = rref()
1296 1296 if repo is None or not util.safehasattr(repo, b'_lockref'):
1297 1297 return
1298 1298 if mode in (None, b'r', b'rb'):
1299 1299 return
1300 1300 if path.startswith(repo.sharedpath):
1301 1301 # truncate name relative to the repository (.hg)
1302 1302 path = path[len(repo.sharedpath) + 1 :]
1303 1303 if repo._currentlock(repo._lockref) is None:
1304 1304 repo.ui.develwarn(
1305 1305 b'write with no lock: "%s"' % path, stacklevel=4
1306 1306 )
1307 1307 return ret
1308 1308
1309 1309 return checksvfs
1310 1310
1311 1311 def close(self):
1312 1312 self._writecaches()
1313 1313
1314 1314 def _writecaches(self):
1315 1315 if self._revbranchcache:
1316 1316 self._revbranchcache.write()
1317 1317
1318 1318 def _restrictcapabilities(self, caps):
1319 1319 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1320 1320 caps = set(caps)
1321 1321 capsblob = bundle2.encodecaps(
1322 1322 bundle2.getrepocaps(self, role=b'client')
1323 1323 )
1324 1324 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1325 1325 return caps
1326 1326
1327 1327 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1328 1328 # self -> auditor -> self._checknested -> self
1329 1329
1330 1330 @property
1331 1331 def auditor(self):
1332 1332 # This is only used by context.workingctx.match in order to
1333 1333 # detect files in subrepos.
1334 1334 return pathutil.pathauditor(self.root, callback=self._checknested)
1335 1335
1336 1336 @property
1337 1337 def nofsauditor(self):
1338 1338 # This is only used by context.basectx.match in order to detect
1339 1339 # files in subrepos.
1340 1340 return pathutil.pathauditor(
1341 1341 self.root, callback=self._checknested, realfs=False, cached=True
1342 1342 )
1343 1343
1344 1344 def _checknested(self, path):
1345 1345 """Determine if path is a legal nested repository."""
1346 1346 if not path.startswith(self.root):
1347 1347 return False
1348 1348 subpath = path[len(self.root) + 1 :]
1349 1349 normsubpath = util.pconvert(subpath)
1350 1350
1351 1351 # XXX: Checking against the current working copy is wrong in
1352 1352 # the sense that it can reject things like
1353 1353 #
1354 1354 # $ hg cat -r 10 sub/x.txt
1355 1355 #
1356 1356 # if sub/ is no longer a subrepository in the working copy
1357 1357 # parent revision.
1358 1358 #
1359 1359 # However, it can of course also allow things that would have
1360 1360 # been rejected before, such as the above cat command if sub/
1361 1361 # is a subrepository now, but was a normal directory before.
1362 1362 # The old path auditor would have rejected by mistake since it
1363 1363 # panics when it sees sub/.hg/.
1364 1364 #
1365 1365 # All in all, checking against the working copy seems sensible
1366 1366 # since we want to prevent access to nested repositories on
1367 1367 # the filesystem *now*.
1368 1368 ctx = self[None]
1369 1369 parts = util.splitpath(subpath)
1370 1370 while parts:
1371 1371 prefix = b'/'.join(parts)
1372 1372 if prefix in ctx.substate:
1373 1373 if prefix == normsubpath:
1374 1374 return True
1375 1375 else:
1376 1376 sub = ctx.sub(prefix)
1377 1377 return sub.checknested(subpath[len(prefix) + 1 :])
1378 1378 else:
1379 1379 parts.pop()
1380 1380 return False
1381 1381
1382 1382 def peer(self):
1383 1383 return localpeer(self) # not cached to avoid reference cycle
1384 1384
1385 1385 def unfiltered(self):
1386 1386 """Return unfiltered version of the repository
1387 1387
1388 1388 Intended to be overwritten by filtered repo."""
1389 1389 return self
1390 1390
1391 1391 def filtered(self, name, visibilityexceptions=None):
1392 1392 """Return a filtered version of a repository
1393 1393
1394 1394 The `name` parameter is the identifier of the requested view. This
1395 1395 will return a repoview object set "exactly" to the specified view.
1396 1396
1397 1397 This function does not apply recursive filtering to a repository. For
1398 1398 example calling `repo.filtered("served")` will return a repoview using
1399 1399 the "served" view, regardless of the initial view used by `repo`.
1400 1400
1401 1401 In other word, there is always only one level of `repoview` "filtering".
1402 1402 """
1403 1403 if self._extrafilterid is not None and b'%' not in name:
1404 1404 name = name + b'%' + self._extrafilterid
1405 1405
1406 1406 cls = repoview.newtype(self.unfiltered().__class__)
1407 1407 return cls(self, name, visibilityexceptions)
1408 1408
1409 1409 @mixedrepostorecache(
1410 1410 (b'bookmarks', b'plain'),
1411 1411 (b'bookmarks.current', b'plain'),
1412 1412 (b'bookmarks', b''),
1413 1413 (b'00changelog.i', b''),
1414 1414 )
1415 1415 def _bookmarks(self):
1416 1416 # Since the multiple files involved in the transaction cannot be
1417 1417 # written atomically (with current repository format), there is a race
1418 1418 # condition here.
1419 1419 #
1420 1420 # 1) changelog content A is read
1421 1421 # 2) outside transaction update changelog to content B
1422 1422 # 3) outside transaction update bookmark file referring to content B
1423 1423 # 4) bookmarks file content is read and filtered against changelog-A
1424 1424 #
1425 1425 # When this happens, bookmarks against nodes missing from A are dropped.
1426 1426 #
1427 1427 # Having this happening during read is not great, but it become worse
1428 1428 # when this happen during write because the bookmarks to the "unknown"
1429 1429 # nodes will be dropped for good. However, writes happen within locks.
1430 1430 # This locking makes it possible to have a race free consistent read.
1431 1431 # For this purpose data read from disc before locking are
1432 1432 # "invalidated" right after the locks are taken. This invalidations are
1433 1433 # "light", the `filecache` mechanism keep the data in memory and will
1434 1434 # reuse them if the underlying files did not changed. Not parsing the
1435 1435 # same data multiple times helps performances.
1436 1436 #
1437 1437 # Unfortunately in the case describe above, the files tracked by the
1438 1438 # bookmarks file cache might not have changed, but the in-memory
1439 1439 # content is still "wrong" because we used an older changelog content
1440 1440 # to process the on-disk data. So after locking, the changelog would be
1441 1441 # refreshed but `_bookmarks` would be preserved.
1442 1442 # Adding `00changelog.i` to the list of tracked file is not
1443 1443 # enough, because at the time we build the content for `_bookmarks` in
1444 1444 # (4), the changelog file has already diverged from the content used
1445 1445 # for loading `changelog` in (1)
1446 1446 #
1447 1447 # To prevent the issue, we force the changelog to be explicitly
1448 1448 # reloaded while computing `_bookmarks`. The data race can still happen
1449 1449 # without the lock (with a narrower window), but it would no longer go
1450 1450 # undetected during the lock time refresh.
1451 1451 #
1452 1452 # The new schedule is as follow
1453 1453 #
1454 1454 # 1) filecache logic detect that `_bookmarks` needs to be computed
1455 1455 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1456 1456 # 3) We force `changelog` filecache to be tested
1457 1457 # 4) cachestat for `changelog` are captured (for changelog)
1458 1458 # 5) `_bookmarks` is computed and cached
1459 1459 #
1460 1460 # The step in (3) ensure we have a changelog at least as recent as the
1461 1461 # cache stat computed in (1). As a result at locking time:
1462 1462 # * if the changelog did not changed since (1) -> we can reuse the data
1463 1463 # * otherwise -> the bookmarks get refreshed.
1464 1464 self._refreshchangelog()
1465 1465 return bookmarks.bmstore(self)
1466 1466
1467 1467 def _refreshchangelog(self):
1468 1468 """make sure the in memory changelog match the on-disk one"""
1469 1469 if 'changelog' in vars(self) and self.currenttransaction() is None:
1470 1470 del self.changelog
1471 1471
1472 1472 @property
1473 1473 def _activebookmark(self):
1474 1474 return self._bookmarks.active
1475 1475
1476 1476 # _phasesets depend on changelog. what we need is to call
1477 1477 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1478 1478 # can't be easily expressed in filecache mechanism.
1479 1479 @storecache(b'phaseroots', b'00changelog.i')
1480 1480 def _phasecache(self):
1481 1481 return phases.phasecache(self, self._phasedefaults)
1482 1482
1483 1483 @storecache(b'obsstore')
1484 1484 def obsstore(self):
1485 1485 return obsolete.makestore(self.ui, self)
1486 1486
1487 1487 @storecache(b'00changelog.i')
1488 1488 def changelog(self):
1489 1489 # load dirstate before changelog to avoid race see issue6303
1490 1490 self.dirstate.prefetch_parents()
1491 1491 return self.store.changelog(txnutil.mayhavepending(self.root))
1492 1492
1493 1493 @storecache(b'00manifest.i')
1494 1494 def manifestlog(self):
1495 1495 return self.store.manifestlog(self, self._storenarrowmatch)
1496 1496
1497 1497 @repofilecache(b'dirstate')
1498 1498 def dirstate(self):
1499 1499 return self._makedirstate()
1500 1500
1501 1501 def _makedirstate(self):
1502 1502 """Extension point for wrapping the dirstate per-repo."""
1503 1503 sparsematchfn = lambda: sparse.matcher(self)
1504 1504
1505 1505 return dirstate.dirstate(
1506 1506 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1507 1507 )
1508 1508
1509 1509 def _dirstatevalidate(self, node):
1510 1510 try:
1511 1511 self.changelog.rev(node)
1512 1512 return node
1513 1513 except error.LookupError:
1514 1514 if not self._dirstatevalidatewarned:
1515 1515 self._dirstatevalidatewarned = True
1516 1516 self.ui.warn(
1517 1517 _(b"warning: ignoring unknown working parent %s!\n")
1518 1518 % short(node)
1519 1519 )
1520 1520 return nullid
1521 1521
1522 1522 @storecache(narrowspec.FILENAME)
1523 1523 def narrowpats(self):
1524 1524 """matcher patterns for this repository's narrowspec
1525 1525
1526 1526 A tuple of (includes, excludes).
1527 1527 """
1528 1528 return narrowspec.load(self)
1529 1529
1530 1530 @storecache(narrowspec.FILENAME)
1531 1531 def _storenarrowmatch(self):
1532 1532 if repository.NARROW_REQUIREMENT not in self.requirements:
1533 1533 return matchmod.always()
1534 1534 include, exclude = self.narrowpats
1535 1535 return narrowspec.match(self.root, include=include, exclude=exclude)
1536 1536
1537 1537 @storecache(narrowspec.FILENAME)
1538 1538 def _narrowmatch(self):
1539 1539 if repository.NARROW_REQUIREMENT not in self.requirements:
1540 1540 return matchmod.always()
1541 1541 narrowspec.checkworkingcopynarrowspec(self)
1542 1542 include, exclude = self.narrowpats
1543 1543 return narrowspec.match(self.root, include=include, exclude=exclude)
1544 1544
1545 1545 def narrowmatch(self, match=None, includeexact=False):
1546 1546 """matcher corresponding the the repo's narrowspec
1547 1547
1548 1548 If `match` is given, then that will be intersected with the narrow
1549 1549 matcher.
1550 1550
1551 1551 If `includeexact` is True, then any exact matches from `match` will
1552 1552 be included even if they're outside the narrowspec.
1553 1553 """
1554 1554 if match:
1555 1555 if includeexact and not self._narrowmatch.always():
1556 1556 # do not exclude explicitly-specified paths so that they can
1557 1557 # be warned later on
1558 1558 em = matchmod.exact(match.files())
1559 1559 nm = matchmod.unionmatcher([self._narrowmatch, em])
1560 1560 return matchmod.intersectmatchers(match, nm)
1561 1561 return matchmod.intersectmatchers(match, self._narrowmatch)
1562 1562 return self._narrowmatch
1563 1563
1564 1564 def setnarrowpats(self, newincludes, newexcludes):
1565 1565 narrowspec.save(self, newincludes, newexcludes)
1566 1566 self.invalidate(clearfilecache=True)
1567 1567
1568 1568 @unfilteredpropertycache
1569 1569 def _quick_access_changeid_null(self):
1570 1570 return {
1571 1571 b'null': (nullrev, nullid),
1572 1572 nullrev: (nullrev, nullid),
1573 1573 nullid: (nullrev, nullid),
1574 1574 }
1575 1575
1576 1576 @unfilteredpropertycache
1577 1577 def _quick_access_changeid_wc(self):
1578 1578 # also fast path access to the working copy parents
1579 1579 # however, only do it for filter that ensure wc is visible.
1580 1580 quick = {}
1581 1581 cl = self.unfiltered().changelog
1582 1582 for node in self.dirstate.parents():
1583 1583 if node == nullid:
1584 1584 continue
1585 1585 rev = cl.index.get_rev(node)
1586 1586 if rev is None:
1587 1587 # unknown working copy parent case:
1588 1588 #
1589 1589 # skip the fast path and let higher code deal with it
1590 1590 continue
1591 1591 pair = (rev, node)
1592 1592 quick[rev] = pair
1593 1593 quick[node] = pair
1594 1594 # also add the parents of the parents
1595 1595 for r in cl.parentrevs(rev):
1596 1596 if r == nullrev:
1597 1597 continue
1598 1598 n = cl.node(r)
1599 1599 pair = (r, n)
1600 1600 quick[r] = pair
1601 1601 quick[n] = pair
1602 1602 p1node = self.dirstate.p1()
1603 1603 if p1node != nullid:
1604 1604 quick[b'.'] = quick[p1node]
1605 1605 return quick
1606 1606
1607 1607 @unfilteredmethod
1608 1608 def _quick_access_changeid_invalidate(self):
1609 1609 if '_quick_access_changeid_wc' in vars(self):
1610 1610 del self.__dict__['_quick_access_changeid_wc']
1611 1611
1612 1612 @property
1613 1613 def _quick_access_changeid(self):
1614 1614 """an helper dictionnary for __getitem__ calls
1615 1615
1616 1616 This contains a list of symbol we can recognise right away without
1617 1617 further processing.
1618 1618 """
1619 1619 mapping = self._quick_access_changeid_null
1620 1620 if self.filtername in repoview.filter_has_wc:
1621 1621 mapping = mapping.copy()
1622 1622 mapping.update(self._quick_access_changeid_wc)
1623 1623 return mapping
1624 1624
1625 1625 def __getitem__(self, changeid):
1626 1626 # dealing with special cases
1627 1627 if changeid is None:
1628 1628 return context.workingctx(self)
1629 1629 if isinstance(changeid, context.basectx):
1630 1630 return changeid
1631 1631
1632 1632 # dealing with multiple revisions
1633 1633 if isinstance(changeid, slice):
1634 1634 # wdirrev isn't contiguous so the slice shouldn't include it
1635 1635 return [
1636 1636 self[i]
1637 1637 for i in pycompat.xrange(*changeid.indices(len(self)))
1638 1638 if i not in self.changelog.filteredrevs
1639 1639 ]
1640 1640
1641 1641 # dealing with some special values
1642 1642 quick_access = self._quick_access_changeid.get(changeid)
1643 1643 if quick_access is not None:
1644 1644 rev, node = quick_access
1645 1645 return context.changectx(self, rev, node, maybe_filtered=False)
1646 1646 if changeid == b'tip':
1647 1647 node = self.changelog.tip()
1648 1648 rev = self.changelog.rev(node)
1649 1649 return context.changectx(self, rev, node)
1650 1650
1651 1651 # dealing with arbitrary values
1652 1652 try:
1653 1653 if isinstance(changeid, int):
1654 1654 node = self.changelog.node(changeid)
1655 1655 rev = changeid
1656 1656 elif changeid == b'.':
1657 1657 # this is a hack to delay/avoid loading obsmarkers
1658 1658 # when we know that '.' won't be hidden
1659 1659 node = self.dirstate.p1()
1660 1660 rev = self.unfiltered().changelog.rev(node)
1661 1661 elif len(changeid) == 20:
1662 1662 try:
1663 1663 node = changeid
1664 1664 rev = self.changelog.rev(changeid)
1665 1665 except error.FilteredLookupError:
1666 1666 changeid = hex(changeid) # for the error message
1667 1667 raise
1668 1668 except LookupError:
1669 1669 # check if it might have come from damaged dirstate
1670 1670 #
1671 1671 # XXX we could avoid the unfiltered if we had a recognizable
1672 1672 # exception for filtered changeset access
1673 1673 if (
1674 1674 self.local()
1675 1675 and changeid in self.unfiltered().dirstate.parents()
1676 1676 ):
1677 1677 msg = _(b"working directory has unknown parent '%s'!")
1678 1678 raise error.Abort(msg % short(changeid))
1679 1679 changeid = hex(changeid) # for the error message
1680 1680 raise
1681 1681
1682 1682 elif len(changeid) == 40:
1683 1683 node = bin(changeid)
1684 1684 rev = self.changelog.rev(node)
1685 1685 else:
1686 1686 raise error.ProgrammingError(
1687 1687 b"unsupported changeid '%s' of type %s"
1688 1688 % (changeid, pycompat.bytestr(type(changeid)))
1689 1689 )
1690 1690
1691 1691 return context.changectx(self, rev, node)
1692 1692
1693 1693 except (error.FilteredIndexError, error.FilteredLookupError):
1694 1694 raise error.FilteredRepoLookupError(
1695 1695 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1696 1696 )
1697 1697 except (IndexError, LookupError):
1698 1698 raise error.RepoLookupError(
1699 1699 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1700 1700 )
1701 1701 except error.WdirUnsupported:
1702 1702 return context.workingctx(self)
1703 1703
1704 1704 def __contains__(self, changeid):
1705 1705 """True if the given changeid exists
1706 1706
1707 1707 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1708 1708 specified.
1709 1709 """
1710 1710 try:
1711 1711 self[changeid]
1712 1712 return True
1713 1713 except error.RepoLookupError:
1714 1714 return False
1715 1715
1716 1716 def __nonzero__(self):
1717 1717 return True
1718 1718
1719 1719 __bool__ = __nonzero__
1720 1720
1721 1721 def __len__(self):
1722 1722 # no need to pay the cost of repoview.changelog
1723 1723 unfi = self.unfiltered()
1724 1724 return len(unfi.changelog)
1725 1725
1726 1726 def __iter__(self):
1727 1727 return iter(self.changelog)
1728 1728
1729 1729 def revs(self, expr, *args):
1730 1730 '''Find revisions matching a revset.
1731 1731
1732 1732 The revset is specified as a string ``expr`` that may contain
1733 1733 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1734 1734
1735 1735 Revset aliases from the configuration are not expanded. To expand
1736 1736 user aliases, consider calling ``scmutil.revrange()`` or
1737 1737 ``repo.anyrevs([expr], user=True)``.
1738 1738
1739 1739 Returns a smartset.abstractsmartset, which is a list-like interface
1740 1740 that contains integer revisions.
1741 1741 '''
1742 1742 tree = revsetlang.spectree(expr, *args)
1743 1743 return revset.makematcher(tree)(self)
1744 1744
1745 1745 def set(self, expr, *args):
1746 1746 '''Find revisions matching a revset and emit changectx instances.
1747 1747
1748 1748 This is a convenience wrapper around ``revs()`` that iterates the
1749 1749 result and is a generator of changectx instances.
1750 1750
1751 1751 Revset aliases from the configuration are not expanded. To expand
1752 1752 user aliases, consider calling ``scmutil.revrange()``.
1753 1753 '''
1754 1754 for r in self.revs(expr, *args):
1755 1755 yield self[r]
1756 1756
1757 1757 def anyrevs(self, specs, user=False, localalias=None):
1758 1758 '''Find revisions matching one of the given revsets.
1759 1759
1760 1760 Revset aliases from the configuration are not expanded by default. To
1761 1761 expand user aliases, specify ``user=True``. To provide some local
1762 1762 definitions overriding user aliases, set ``localalias`` to
1763 1763 ``{name: definitionstring}``.
1764 1764 '''
1765 1765 if specs == [b'null']:
1766 1766 return revset.baseset([nullrev])
1767 1767 if specs == [b'.']:
1768 1768 quick_data = self._quick_access_changeid.get(b'.')
1769 1769 if quick_data is not None:
1770 1770 return revset.baseset([quick_data[0]])
1771 1771 if user:
1772 1772 m = revset.matchany(
1773 1773 self.ui,
1774 1774 specs,
1775 1775 lookup=revset.lookupfn(self),
1776 1776 localalias=localalias,
1777 1777 )
1778 1778 else:
1779 1779 m = revset.matchany(None, specs, localalias=localalias)
1780 1780 return m(self)
1781 1781
1782 1782 def url(self):
1783 1783 return b'file:' + self.root
1784 1784
1785 1785 def hook(self, name, throw=False, **args):
1786 1786 """Call a hook, passing this repo instance.
1787 1787
1788 1788 This a convenience method to aid invoking hooks. Extensions likely
1789 1789 won't call this unless they have registered a custom hook or are
1790 1790 replacing code that is expected to call a hook.
1791 1791 """
1792 1792 return hook.hook(self.ui, self, name, throw, **args)
1793 1793
1794 1794 @filteredpropertycache
1795 1795 def _tagscache(self):
1796 1796 '''Returns a tagscache object that contains various tags related
1797 1797 caches.'''
1798 1798
1799 1799 # This simplifies its cache management by having one decorated
1800 1800 # function (this one) and the rest simply fetch things from it.
1801 1801 class tagscache(object):
1802 1802 def __init__(self):
1803 1803 # These two define the set of tags for this repository. tags
1804 1804 # maps tag name to node; tagtypes maps tag name to 'global' or
1805 1805 # 'local'. (Global tags are defined by .hgtags across all
1806 1806 # heads, and local tags are defined in .hg/localtags.)
1807 1807 # They constitute the in-memory cache of tags.
1808 1808 self.tags = self.tagtypes = None
1809 1809
1810 1810 self.nodetagscache = self.tagslist = None
1811 1811
1812 1812 cache = tagscache()
1813 1813 cache.tags, cache.tagtypes = self._findtags()
1814 1814
1815 1815 return cache
1816 1816
1817 1817 def tags(self):
1818 1818 '''return a mapping of tag to node'''
1819 1819 t = {}
1820 1820 if self.changelog.filteredrevs:
1821 1821 tags, tt = self._findtags()
1822 1822 else:
1823 1823 tags = self._tagscache.tags
1824 1824 rev = self.changelog.rev
1825 1825 for k, v in pycompat.iteritems(tags):
1826 1826 try:
1827 1827 # ignore tags to unknown nodes
1828 1828 rev(v)
1829 1829 t[k] = v
1830 1830 except (error.LookupError, ValueError):
1831 1831 pass
1832 1832 return t
1833 1833
1834 1834 def _findtags(self):
1835 1835 '''Do the hard work of finding tags. Return a pair of dicts
1836 1836 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1837 1837 maps tag name to a string like \'global\' or \'local\'.
1838 1838 Subclasses or extensions are free to add their own tags, but
1839 1839 should be aware that the returned dicts will be retained for the
1840 1840 duration of the localrepo object.'''
1841 1841
1842 1842 # XXX what tagtype should subclasses/extensions use? Currently
1843 1843 # mq and bookmarks add tags, but do not set the tagtype at all.
1844 1844 # Should each extension invent its own tag type? Should there
1845 1845 # be one tagtype for all such "virtual" tags? Or is the status
1846 1846 # quo fine?
1847 1847
1848 1848 # map tag name to (node, hist)
1849 1849 alltags = tagsmod.findglobaltags(self.ui, self)
1850 1850 # map tag name to tag type
1851 1851 tagtypes = {tag: b'global' for tag in alltags}
1852 1852
1853 1853 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1854 1854
1855 1855 # Build the return dicts. Have to re-encode tag names because
1856 1856 # the tags module always uses UTF-8 (in order not to lose info
1857 1857 # writing to the cache), but the rest of Mercurial wants them in
1858 1858 # local encoding.
1859 1859 tags = {}
1860 1860 for (name, (node, hist)) in pycompat.iteritems(alltags):
1861 1861 if node != nullid:
1862 1862 tags[encoding.tolocal(name)] = node
1863 1863 tags[b'tip'] = self.changelog.tip()
1864 1864 tagtypes = {
1865 1865 encoding.tolocal(name): value
1866 1866 for (name, value) in pycompat.iteritems(tagtypes)
1867 1867 }
1868 1868 return (tags, tagtypes)
1869 1869
1870 1870 def tagtype(self, tagname):
1871 1871 '''
1872 1872 return the type of the given tag. result can be:
1873 1873
1874 1874 'local' : a local tag
1875 1875 'global' : a global tag
1876 1876 None : tag does not exist
1877 1877 '''
1878 1878
1879 1879 return self._tagscache.tagtypes.get(tagname)
1880 1880
1881 1881 def tagslist(self):
1882 1882 '''return a list of tags ordered by revision'''
1883 1883 if not self._tagscache.tagslist:
1884 1884 l = []
1885 1885 for t, n in pycompat.iteritems(self.tags()):
1886 1886 l.append((self.changelog.rev(n), t, n))
1887 1887 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1888 1888
1889 1889 return self._tagscache.tagslist
1890 1890
1891 1891 def nodetags(self, node):
1892 1892 '''return the tags associated with a node'''
1893 1893 if not self._tagscache.nodetagscache:
1894 1894 nodetagscache = {}
1895 1895 for t, n in pycompat.iteritems(self._tagscache.tags):
1896 1896 nodetagscache.setdefault(n, []).append(t)
1897 1897 for tags in pycompat.itervalues(nodetagscache):
1898 1898 tags.sort()
1899 1899 self._tagscache.nodetagscache = nodetagscache
1900 1900 return self._tagscache.nodetagscache.get(node, [])
1901 1901
1902 1902 def nodebookmarks(self, node):
1903 1903 """return the list of bookmarks pointing to the specified node"""
1904 1904 return self._bookmarks.names(node)
1905 1905
1906 1906 def branchmap(self):
1907 1907 '''returns a dictionary {branch: [branchheads]} with branchheads
1908 1908 ordered by increasing revision number'''
1909 1909 return self._branchcaches[self]
1910 1910
1911 1911 @unfilteredmethod
1912 1912 def revbranchcache(self):
1913 1913 if not self._revbranchcache:
1914 1914 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1915 1915 return self._revbranchcache
1916 1916
1917 1917 def branchtip(self, branch, ignoremissing=False):
1918 1918 '''return the tip node for a given branch
1919 1919
1920 1920 If ignoremissing is True, then this method will not raise an error.
1921 1921 This is helpful for callers that only expect None for a missing branch
1922 1922 (e.g. namespace).
1923 1923
1924 1924 '''
1925 1925 try:
1926 1926 return self.branchmap().branchtip(branch)
1927 1927 except KeyError:
1928 1928 if not ignoremissing:
1929 1929 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1930 1930 else:
1931 1931 pass
1932 1932
1933 1933 def lookup(self, key):
1934 1934 node = scmutil.revsymbol(self, key).node()
1935 1935 if node is None:
1936 1936 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1937 1937 return node
1938 1938
1939 1939 def lookupbranch(self, key):
1940 1940 if self.branchmap().hasbranch(key):
1941 1941 return key
1942 1942
1943 1943 return scmutil.revsymbol(self, key).branch()
1944 1944
1945 1945 def known(self, nodes):
1946 1946 cl = self.changelog
1947 1947 get_rev = cl.index.get_rev
1948 1948 filtered = cl.filteredrevs
1949 1949 result = []
1950 1950 for n in nodes:
1951 1951 r = get_rev(n)
1952 1952 resp = not (r is None or r in filtered)
1953 1953 result.append(resp)
1954 1954 return result
1955 1955
1956 1956 def local(self):
1957 1957 return self
1958 1958
1959 1959 def publishing(self):
1960 1960 # it's safe (and desirable) to trust the publish flag unconditionally
1961 1961 # so that we don't finalize changes shared between users via ssh or nfs
1962 1962 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1963 1963
1964 1964 def cancopy(self):
1965 1965 # so statichttprepo's override of local() works
1966 1966 if not self.local():
1967 1967 return False
1968 1968 if not self.publishing():
1969 1969 return True
1970 1970 # if publishing we can't copy if there is filtered content
1971 1971 return not self.filtered(b'visible').changelog.filteredrevs
1972 1972
1973 1973 def shared(self):
1974 1974 '''the type of shared repository (None if not shared)'''
1975 1975 if self.sharedpath != self.path:
1976 1976 return b'store'
1977 1977 return None
1978 1978
1979 1979 def wjoin(self, f, *insidef):
1980 1980 return self.vfs.reljoin(self.root, f, *insidef)
1981 1981
1982 1982 def setparents(self, p1, p2=nullid):
1983 1983 self[None].setparents(p1, p2)
1984 1984 self._quick_access_changeid_invalidate()
1985 1985
1986 1986 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1987 1987 """changeid must be a changeset revision, if specified.
1988 1988 fileid can be a file revision or node."""
1989 1989 return context.filectx(
1990 1990 self, path, changeid, fileid, changectx=changectx
1991 1991 )
1992 1992
1993 1993 def getcwd(self):
1994 1994 return self.dirstate.getcwd()
1995 1995
1996 1996 def pathto(self, f, cwd=None):
1997 1997 return self.dirstate.pathto(f, cwd)
1998 1998
1999 1999 def _loadfilter(self, filter):
2000 2000 if filter not in self._filterpats:
2001 2001 l = []
2002 2002 for pat, cmd in self.ui.configitems(filter):
2003 2003 if cmd == b'!':
2004 2004 continue
2005 2005 mf = matchmod.match(self.root, b'', [pat])
2006 2006 fn = None
2007 2007 params = cmd
2008 2008 for name, filterfn in pycompat.iteritems(self._datafilters):
2009 2009 if cmd.startswith(name):
2010 2010 fn = filterfn
2011 2011 params = cmd[len(name) :].lstrip()
2012 2012 break
2013 2013 if not fn:
2014 2014 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2015 2015 fn.__name__ = 'commandfilter'
2016 2016 # Wrap old filters not supporting keyword arguments
2017 2017 if not pycompat.getargspec(fn)[2]:
2018 2018 oldfn = fn
2019 2019 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2020 2020 fn.__name__ = 'compat-' + oldfn.__name__
2021 2021 l.append((mf, fn, params))
2022 2022 self._filterpats[filter] = l
2023 2023 return self._filterpats[filter]
2024 2024
2025 2025 def _filter(self, filterpats, filename, data):
2026 2026 for mf, fn, cmd in filterpats:
2027 2027 if mf(filename):
2028 2028 self.ui.debug(
2029 2029 b"filtering %s through %s\n"
2030 2030 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2031 2031 )
2032 2032 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2033 2033 break
2034 2034
2035 2035 return data
2036 2036
2037 2037 @unfilteredpropertycache
2038 2038 def _encodefilterpats(self):
2039 2039 return self._loadfilter(b'encode')
2040 2040
2041 2041 @unfilteredpropertycache
2042 2042 def _decodefilterpats(self):
2043 2043 return self._loadfilter(b'decode')
2044 2044
2045 2045 def adddatafilter(self, name, filter):
2046 2046 self._datafilters[name] = filter
2047 2047
2048 2048 def wread(self, filename):
2049 2049 if self.wvfs.islink(filename):
2050 2050 data = self.wvfs.readlink(filename)
2051 2051 else:
2052 2052 data = self.wvfs.read(filename)
2053 2053 return self._filter(self._encodefilterpats, filename, data)
2054 2054
2055 2055 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2056 2056 """write ``data`` into ``filename`` in the working directory
2057 2057
2058 2058 This returns length of written (maybe decoded) data.
2059 2059 """
2060 2060 data = self._filter(self._decodefilterpats, filename, data)
2061 2061 if b'l' in flags:
2062 2062 self.wvfs.symlink(data, filename)
2063 2063 else:
2064 2064 self.wvfs.write(
2065 2065 filename, data, backgroundclose=backgroundclose, **kwargs
2066 2066 )
2067 2067 if b'x' in flags:
2068 2068 self.wvfs.setflags(filename, False, True)
2069 2069 else:
2070 2070 self.wvfs.setflags(filename, False, False)
2071 2071 return len(data)
2072 2072
2073 2073 def wwritedata(self, filename, data):
2074 2074 return self._filter(self._decodefilterpats, filename, data)
2075 2075
2076 2076 def currenttransaction(self):
2077 2077 """return the current transaction or None if non exists"""
2078 2078 if self._transref:
2079 2079 tr = self._transref()
2080 2080 else:
2081 2081 tr = None
2082 2082
2083 2083 if tr and tr.running():
2084 2084 return tr
2085 2085 return None
2086 2086
2087 2087 def transaction(self, desc, report=None):
2088 2088 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2089 2089 b'devel', b'check-locks'
2090 2090 ):
2091 2091 if self._currentlock(self._lockref) is None:
2092 2092 raise error.ProgrammingError(b'transaction requires locking')
2093 2093 tr = self.currenttransaction()
2094 2094 if tr is not None:
2095 2095 return tr.nest(name=desc)
2096 2096
2097 2097 # abort here if the journal already exists
2098 2098 if self.svfs.exists(b"journal"):
2099 2099 raise error.RepoError(
2100 2100 _(b"abandoned transaction found"),
2101 2101 hint=_(b"run 'hg recover' to clean up transaction"),
2102 2102 )
2103 2103
2104 2104 idbase = b"%.40f#%f" % (random.random(), time.time())
2105 2105 ha = hex(hashutil.sha1(idbase).digest())
2106 2106 txnid = b'TXN:' + ha
2107 2107 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2108 2108
2109 2109 self._writejournal(desc)
2110 2110 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2111 2111 if report:
2112 2112 rp = report
2113 2113 else:
2114 2114 rp = self.ui.warn
2115 2115 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2116 2116 # we must avoid cyclic reference between repo and transaction.
2117 2117 reporef = weakref.ref(self)
2118 2118 # Code to track tag movement
2119 2119 #
2120 2120 # Since tags are all handled as file content, it is actually quite hard
2121 2121 # to track these movement from a code perspective. So we fallback to a
2122 2122 # tracking at the repository level. One could envision to track changes
2123 2123 # to the '.hgtags' file through changegroup apply but that fails to
2124 2124 # cope with case where transaction expose new heads without changegroup
2125 2125 # being involved (eg: phase movement).
2126 2126 #
2127 2127 # For now, We gate the feature behind a flag since this likely comes
2128 2128 # with performance impacts. The current code run more often than needed
2129 2129 # and do not use caches as much as it could. The current focus is on
2130 2130 # the behavior of the feature so we disable it by default. The flag
2131 2131 # will be removed when we are happy with the performance impact.
2132 2132 #
2133 2133 # Once this feature is no longer experimental move the following
2134 2134 # documentation to the appropriate help section:
2135 2135 #
2136 2136 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2137 2137 # tags (new or changed or deleted tags). In addition the details of
2138 2138 # these changes are made available in a file at:
2139 2139 # ``REPOROOT/.hg/changes/tags.changes``.
2140 2140 # Make sure you check for HG_TAG_MOVED before reading that file as it
2141 2141 # might exist from a previous transaction even if no tag were touched
2142 2142 # in this one. Changes are recorded in a line base format::
2143 2143 #
2144 2144 # <action> <hex-node> <tag-name>\n
2145 2145 #
2146 2146 # Actions are defined as follow:
2147 2147 # "-R": tag is removed,
2148 2148 # "+A": tag is added,
2149 2149 # "-M": tag is moved (old value),
2150 2150 # "+M": tag is moved (new value),
2151 2151 tracktags = lambda x: None
2152 2152 # experimental config: experimental.hook-track-tags
2153 2153 shouldtracktags = self.ui.configbool(
2154 2154 b'experimental', b'hook-track-tags'
2155 2155 )
2156 2156 if desc != b'strip' and shouldtracktags:
2157 2157 oldheads = self.changelog.headrevs()
2158 2158
2159 2159 def tracktags(tr2):
2160 2160 repo = reporef()
2161 2161 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2162 2162 newheads = repo.changelog.headrevs()
2163 2163 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2164 2164 # notes: we compare lists here.
2165 2165 # As we do it only once buiding set would not be cheaper
2166 2166 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2167 2167 if changes:
2168 2168 tr2.hookargs[b'tag_moved'] = b'1'
2169 2169 with repo.vfs(
2170 2170 b'changes/tags.changes', b'w', atomictemp=True
2171 2171 ) as changesfile:
2172 2172 # note: we do not register the file to the transaction
2173 2173 # because we needs it to still exist on the transaction
2174 2174 # is close (for txnclose hooks)
2175 2175 tagsmod.writediff(changesfile, changes)
2176 2176
2177 2177 def validate(tr2):
2178 2178 """will run pre-closing hooks"""
2179 2179 # XXX the transaction API is a bit lacking here so we take a hacky
2180 2180 # path for now
2181 2181 #
2182 2182 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2183 2183 # dict is copied before these run. In addition we needs the data
2184 2184 # available to in memory hooks too.
2185 2185 #
2186 2186 # Moreover, we also need to make sure this runs before txnclose
2187 2187 # hooks and there is no "pending" mechanism that would execute
2188 2188 # logic only if hooks are about to run.
2189 2189 #
2190 2190 # Fixing this limitation of the transaction is also needed to track
2191 2191 # other families of changes (bookmarks, phases, obsolescence).
2192 2192 #
2193 2193 # This will have to be fixed before we remove the experimental
2194 2194 # gating.
2195 2195 tracktags(tr2)
2196 2196 repo = reporef()
2197 2197
2198 2198 singleheadopt = (b'experimental', b'single-head-per-branch')
2199 2199 singlehead = repo.ui.configbool(*singleheadopt)
2200 2200 if singlehead:
2201 2201 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2202 2202 accountclosed = singleheadsub.get(
2203 2203 b"account-closed-heads", False
2204 2204 )
2205 2205 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2206 2206 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2207 2207 for name, (old, new) in sorted(
2208 2208 tr.changes[b'bookmarks'].items()
2209 2209 ):
2210 2210 args = tr.hookargs.copy()
2211 2211 args.update(bookmarks.preparehookargs(name, old, new))
2212 2212 repo.hook(
2213 2213 b'pretxnclose-bookmark',
2214 2214 throw=True,
2215 2215 **pycompat.strkwargs(args)
2216 2216 )
2217 2217 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2218 2218 cl = repo.unfiltered().changelog
2219 2219 for revs, (old, new) in tr.changes[b'phases']:
2220 2220 for rev in revs:
2221 2221 args = tr.hookargs.copy()
2222 2222 node = hex(cl.node(rev))
2223 2223 args.update(phases.preparehookargs(node, old, new))
2224 2224 repo.hook(
2225 2225 b'pretxnclose-phase',
2226 2226 throw=True,
2227 2227 **pycompat.strkwargs(args)
2228 2228 )
2229 2229
2230 2230 repo.hook(
2231 2231 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2232 2232 )
2233 2233
2234 2234 def releasefn(tr, success):
2235 2235 repo = reporef()
2236 2236 if repo is None:
2237 2237 # If the repo has been GC'd (and this release function is being
2238 2238 # called from transaction.__del__), there's not much we can do,
2239 2239 # so just leave the unfinished transaction there and let the
2240 2240 # user run `hg recover`.
2241 2241 return
2242 2242 if success:
2243 2243 # this should be explicitly invoked here, because
2244 2244 # in-memory changes aren't written out at closing
2245 2245 # transaction, if tr.addfilegenerator (via
2246 2246 # dirstate.write or so) isn't invoked while
2247 2247 # transaction running
2248 2248 repo.dirstate.write(None)
2249 2249 else:
2250 2250 # discard all changes (including ones already written
2251 2251 # out) in this transaction
2252 2252 narrowspec.restorebackup(self, b'journal.narrowspec')
2253 2253 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2254 2254 repo.dirstate.restorebackup(None, b'journal.dirstate')
2255 2255
2256 2256 repo.invalidate(clearfilecache=True)
2257 2257
2258 2258 tr = transaction.transaction(
2259 2259 rp,
2260 2260 self.svfs,
2261 2261 vfsmap,
2262 2262 b"journal",
2263 2263 b"undo",
2264 2264 aftertrans(renames),
2265 2265 self.store.createmode,
2266 2266 validator=validate,
2267 2267 releasefn=releasefn,
2268 2268 checkambigfiles=_cachedfiles,
2269 2269 name=desc,
2270 2270 )
2271 2271 tr.changes[b'origrepolen'] = len(self)
2272 2272 tr.changes[b'obsmarkers'] = set()
2273 2273 tr.changes[b'phases'] = []
2274 2274 tr.changes[b'bookmarks'] = {}
2275 2275
2276 2276 tr.hookargs[b'txnid'] = txnid
2277 2277 tr.hookargs[b'txnname'] = desc
2278 2278 tr.hookargs[b'changes'] = tr.changes
2279 2279 # note: writing the fncache only during finalize mean that the file is
2280 2280 # outdated when running hooks. As fncache is used for streaming clone,
2281 2281 # this is not expected to break anything that happen during the hooks.
2282 2282 tr.addfinalize(b'flush-fncache', self.store.write)
2283 2283
2284 2284 def txnclosehook(tr2):
2285 2285 """To be run if transaction is successful, will schedule a hook run
2286 2286 """
2287 2287 # Don't reference tr2 in hook() so we don't hold a reference.
2288 2288 # This reduces memory consumption when there are multiple
2289 2289 # transactions per lock. This can likely go away if issue5045
2290 2290 # fixes the function accumulation.
2291 2291 hookargs = tr2.hookargs
2292 2292
2293 2293 def hookfunc(unused_success):
2294 2294 repo = reporef()
2295 2295 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2296 2296 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2297 2297 for name, (old, new) in bmchanges:
2298 2298 args = tr.hookargs.copy()
2299 2299 args.update(bookmarks.preparehookargs(name, old, new))
2300 2300 repo.hook(
2301 2301 b'txnclose-bookmark',
2302 2302 throw=False,
2303 2303 **pycompat.strkwargs(args)
2304 2304 )
2305 2305
2306 2306 if hook.hashook(repo.ui, b'txnclose-phase'):
2307 2307 cl = repo.unfiltered().changelog
2308 2308 phasemv = sorted(
2309 2309 tr.changes[b'phases'], key=lambda r: r[0][0]
2310 2310 )
2311 2311 for revs, (old, new) in phasemv:
2312 2312 for rev in revs:
2313 2313 args = tr.hookargs.copy()
2314 2314 node = hex(cl.node(rev))
2315 2315 args.update(phases.preparehookargs(node, old, new))
2316 2316 repo.hook(
2317 2317 b'txnclose-phase',
2318 2318 throw=False,
2319 2319 **pycompat.strkwargs(args)
2320 2320 )
2321 2321
2322 2322 repo.hook(
2323 2323 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2324 2324 )
2325 2325
2326 2326 reporef()._afterlock(hookfunc)
2327 2327
2328 2328 tr.addfinalize(b'txnclose-hook', txnclosehook)
2329 2329 # Include a leading "-" to make it happen before the transaction summary
2330 2330 # reports registered via scmutil.registersummarycallback() whose names
2331 2331 # are 00-txnreport etc. That way, the caches will be warm when the
2332 2332 # callbacks run.
2333 2333 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2334 2334
2335 2335 def txnaborthook(tr2):
2336 2336 """To be run if transaction is aborted
2337 2337 """
2338 2338 reporef().hook(
2339 2339 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2340 2340 )
2341 2341
2342 2342 tr.addabort(b'txnabort-hook', txnaborthook)
2343 2343 # avoid eager cache invalidation. in-memory data should be identical
2344 2344 # to stored data if transaction has no error.
2345 2345 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2346 2346 self._transref = weakref.ref(tr)
2347 2347 scmutil.registersummarycallback(self, tr, desc)
2348 2348 return tr
2349 2349
2350 2350 def _journalfiles(self):
2351 2351 return (
2352 2352 (self.svfs, b'journal'),
2353 2353 (self.svfs, b'journal.narrowspec'),
2354 2354 (self.vfs, b'journal.narrowspec.dirstate'),
2355 2355 (self.vfs, b'journal.dirstate'),
2356 2356 (self.vfs, b'journal.branch'),
2357 2357 (self.vfs, b'journal.desc'),
2358 2358 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2359 2359 (self.svfs, b'journal.phaseroots'),
2360 2360 )
2361 2361
2362 2362 def undofiles(self):
2363 2363 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2364 2364
2365 2365 @unfilteredmethod
2366 2366 def _writejournal(self, desc):
2367 2367 self.dirstate.savebackup(None, b'journal.dirstate')
2368 2368 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2369 2369 narrowspec.savebackup(self, b'journal.narrowspec')
2370 2370 self.vfs.write(
2371 2371 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2372 2372 )
2373 2373 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2374 2374 bookmarksvfs = bookmarks.bookmarksvfs(self)
2375 2375 bookmarksvfs.write(
2376 2376 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2377 2377 )
2378 2378 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2379 2379
2380 2380 def recover(self):
2381 2381 with self.lock():
2382 2382 if self.svfs.exists(b"journal"):
2383 2383 self.ui.status(_(b"rolling back interrupted transaction\n"))
2384 2384 vfsmap = {
2385 2385 b'': self.svfs,
2386 2386 b'plain': self.vfs,
2387 2387 }
2388 2388 transaction.rollback(
2389 2389 self.svfs,
2390 2390 vfsmap,
2391 2391 b"journal",
2392 2392 self.ui.warn,
2393 2393 checkambigfiles=_cachedfiles,
2394 2394 )
2395 2395 self.invalidate()
2396 2396 return True
2397 2397 else:
2398 2398 self.ui.warn(_(b"no interrupted transaction available\n"))
2399 2399 return False
2400 2400
2401 2401 def rollback(self, dryrun=False, force=False):
2402 2402 wlock = lock = dsguard = None
2403 2403 try:
2404 2404 wlock = self.wlock()
2405 2405 lock = self.lock()
2406 2406 if self.svfs.exists(b"undo"):
2407 2407 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2408 2408
2409 2409 return self._rollback(dryrun, force, dsguard)
2410 2410 else:
2411 2411 self.ui.warn(_(b"no rollback information available\n"))
2412 2412 return 1
2413 2413 finally:
2414 2414 release(dsguard, lock, wlock)
2415 2415
2416 2416 @unfilteredmethod # Until we get smarter cache management
2417 2417 def _rollback(self, dryrun, force, dsguard):
2418 2418 ui = self.ui
2419 2419 try:
2420 2420 args = self.vfs.read(b'undo.desc').splitlines()
2421 2421 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2422 2422 if len(args) >= 3:
2423 2423 detail = args[2]
2424 2424 oldtip = oldlen - 1
2425 2425
2426 2426 if detail and ui.verbose:
2427 2427 msg = _(
2428 2428 b'repository tip rolled back to revision %d'
2429 2429 b' (undo %s: %s)\n'
2430 2430 ) % (oldtip, desc, detail)
2431 2431 else:
2432 2432 msg = _(
2433 2433 b'repository tip rolled back to revision %d (undo %s)\n'
2434 2434 ) % (oldtip, desc)
2435 2435 except IOError:
2436 2436 msg = _(b'rolling back unknown transaction\n')
2437 2437 desc = None
2438 2438
2439 2439 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2440 2440 raise error.Abort(
2441 2441 _(
2442 2442 b'rollback of last commit while not checked out '
2443 2443 b'may lose data'
2444 2444 ),
2445 2445 hint=_(b'use -f to force'),
2446 2446 )
2447 2447
2448 2448 ui.status(msg)
2449 2449 if dryrun:
2450 2450 return 0
2451 2451
2452 2452 parents = self.dirstate.parents()
2453 2453 self.destroying()
2454 2454 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2455 2455 transaction.rollback(
2456 2456 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2457 2457 )
2458 2458 bookmarksvfs = bookmarks.bookmarksvfs(self)
2459 2459 if bookmarksvfs.exists(b'undo.bookmarks'):
2460 2460 bookmarksvfs.rename(
2461 2461 b'undo.bookmarks', b'bookmarks', checkambig=True
2462 2462 )
2463 2463 if self.svfs.exists(b'undo.phaseroots'):
2464 2464 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2465 2465 self.invalidate()
2466 2466
2467 2467 has_node = self.changelog.index.has_node
2468 2468 parentgone = any(not has_node(p) for p in parents)
2469 2469 if parentgone:
2470 2470 # prevent dirstateguard from overwriting already restored one
2471 2471 dsguard.close()
2472 2472
2473 2473 narrowspec.restorebackup(self, b'undo.narrowspec')
2474 2474 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2475 2475 self.dirstate.restorebackup(None, b'undo.dirstate')
2476 2476 try:
2477 2477 branch = self.vfs.read(b'undo.branch')
2478 2478 self.dirstate.setbranch(encoding.tolocal(branch))
2479 2479 except IOError:
2480 2480 ui.warn(
2481 2481 _(
2482 2482 b'named branch could not be reset: '
2483 2483 b'current branch is still \'%s\'\n'
2484 2484 )
2485 2485 % self.dirstate.branch()
2486 2486 )
2487 2487
2488 2488 parents = tuple([p.rev() for p in self[None].parents()])
2489 2489 if len(parents) > 1:
2490 2490 ui.status(
2491 2491 _(
2492 2492 b'working directory now based on '
2493 2493 b'revisions %d and %d\n'
2494 2494 )
2495 2495 % parents
2496 2496 )
2497 2497 else:
2498 2498 ui.status(
2499 2499 _(b'working directory now based on revision %d\n') % parents
2500 2500 )
2501 2501 mergestatemod.mergestate.clean(self, self[b'.'].node())
2502 2502
2503 2503 # TODO: if we know which new heads may result from this rollback, pass
2504 2504 # them to destroy(), which will prevent the branchhead cache from being
2505 2505 # invalidated.
2506 2506 self.destroyed()
2507 2507 return 0
2508 2508
2509 2509 def _buildcacheupdater(self, newtransaction):
2510 2510 """called during transaction to build the callback updating cache
2511 2511
2512 2512 Lives on the repository to help extension who might want to augment
2513 2513 this logic. For this purpose, the created transaction is passed to the
2514 2514 method.
2515 2515 """
2516 2516 # we must avoid cyclic reference between repo and transaction.
2517 2517 reporef = weakref.ref(self)
2518 2518
2519 2519 def updater(tr):
2520 2520 repo = reporef()
2521 2521 repo.updatecaches(tr)
2522 2522
2523 2523 return updater
2524 2524
2525 2525 @unfilteredmethod
2526 2526 def updatecaches(self, tr=None, full=False):
2527 2527 """warm appropriate caches
2528 2528
2529 2529 If this function is called after a transaction closed. The transaction
2530 2530 will be available in the 'tr' argument. This can be used to selectively
2531 2531 update caches relevant to the changes in that transaction.
2532 2532
2533 2533 If 'full' is set, make sure all caches the function knows about have
2534 2534 up-to-date data. Even the ones usually loaded more lazily.
2535 2535 """
2536 2536 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2537 2537 # During strip, many caches are invalid but
2538 2538 # later call to `destroyed` will refresh them.
2539 2539 return
2540 2540
2541 2541 if tr is None or tr.changes[b'origrepolen'] < len(self):
2542 2542 # accessing the 'ser ved' branchmap should refresh all the others,
2543 2543 self.ui.debug(b'updating the branch cache\n')
2544 2544 self.filtered(b'served').branchmap()
2545 2545 self.filtered(b'served.hidden').branchmap()
2546 2546
2547 2547 if full:
2548 2548 unfi = self.unfiltered()
2549 2549
2550 2550 self.changelog.update_caches(transaction=tr)
2551 2551 self.manifestlog.update_caches(transaction=tr)
2552 2552
2553 2553 rbc = unfi.revbranchcache()
2554 2554 for r in unfi.changelog:
2555 2555 rbc.branchinfo(r)
2556 2556 rbc.write()
2557 2557
2558 2558 # ensure the working copy parents are in the manifestfulltextcache
2559 2559 for ctx in self[b'.'].parents():
2560 2560 ctx.manifest() # accessing the manifest is enough
2561 2561
2562 2562 # accessing fnode cache warms the cache
2563 2563 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2564 2564 # accessing tags warm the cache
2565 2565 self.tags()
2566 2566 self.filtered(b'served').tags()
2567 2567
2568 2568 # The `full` arg is documented as updating even the lazily-loaded
2569 2569 # caches immediately, so we're forcing a write to cause these caches
2570 2570 # to be warmed up even if they haven't explicitly been requested
2571 2571 # yet (if they've never been used by hg, they won't ever have been
2572 2572 # written, even if they're a subset of another kind of cache that
2573 2573 # *has* been used).
2574 2574 for filt in repoview.filtertable.keys():
2575 2575 filtered = self.filtered(filt)
2576 2576 filtered.branchmap().write(filtered)
2577 2577
2578 2578 def invalidatecaches(self):
2579 2579
2580 2580 if '_tagscache' in vars(self):
2581 2581 # can't use delattr on proxy
2582 2582 del self.__dict__['_tagscache']
2583 2583
2584 2584 self._branchcaches.clear()
2585 2585 self.invalidatevolatilesets()
2586 2586 self._sparsesignaturecache.clear()
2587 2587
2588 2588 def invalidatevolatilesets(self):
2589 2589 self.filteredrevcache.clear()
2590 2590 obsolete.clearobscaches(self)
2591 2591 self._quick_access_changeid_invalidate()
2592 2592
2593 2593 def invalidatedirstate(self):
2594 2594 '''Invalidates the dirstate, causing the next call to dirstate
2595 2595 to check if it was modified since the last time it was read,
2596 2596 rereading it if it has.
2597 2597
2598 2598 This is different to dirstate.invalidate() that it doesn't always
2599 2599 rereads the dirstate. Use dirstate.invalidate() if you want to
2600 2600 explicitly read the dirstate again (i.e. restoring it to a previous
2601 2601 known good state).'''
2602 2602 if hasunfilteredcache(self, 'dirstate'):
2603 2603 for k in self.dirstate._filecache:
2604 2604 try:
2605 2605 delattr(self.dirstate, k)
2606 2606 except AttributeError:
2607 2607 pass
2608 2608 delattr(self.unfiltered(), 'dirstate')
2609 2609
2610 2610 def invalidate(self, clearfilecache=False):
2611 2611 '''Invalidates both store and non-store parts other than dirstate
2612 2612
2613 2613 If a transaction is running, invalidation of store is omitted,
2614 2614 because discarding in-memory changes might cause inconsistency
2615 2615 (e.g. incomplete fncache causes unintentional failure, but
2616 2616 redundant one doesn't).
2617 2617 '''
2618 2618 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2619 2619 for k in list(self._filecache.keys()):
2620 2620 # dirstate is invalidated separately in invalidatedirstate()
2621 2621 if k == b'dirstate':
2622 2622 continue
2623 2623 if (
2624 2624 k == b'changelog'
2625 2625 and self.currenttransaction()
2626 2626 and self.changelog._delayed
2627 2627 ):
2628 2628 # The changelog object may store unwritten revisions. We don't
2629 2629 # want to lose them.
2630 2630 # TODO: Solve the problem instead of working around it.
2631 2631 continue
2632 2632
2633 2633 if clearfilecache:
2634 2634 del self._filecache[k]
2635 2635 try:
2636 2636 delattr(unfiltered, k)
2637 2637 except AttributeError:
2638 2638 pass
2639 2639 self.invalidatecaches()
2640 2640 if not self.currenttransaction():
2641 2641 # TODO: Changing contents of store outside transaction
2642 2642 # causes inconsistency. We should make in-memory store
2643 2643 # changes detectable, and abort if changed.
2644 2644 self.store.invalidatecaches()
2645 2645
2646 2646 def invalidateall(self):
2647 2647 '''Fully invalidates both store and non-store parts, causing the
2648 2648 subsequent operation to reread any outside changes.'''
2649 2649 # extension should hook this to invalidate its caches
2650 2650 self.invalidate()
2651 2651 self.invalidatedirstate()
2652 2652
2653 2653 @unfilteredmethod
2654 2654 def _refreshfilecachestats(self, tr):
2655 2655 """Reload stats of cached files so that they are flagged as valid"""
2656 2656 for k, ce in self._filecache.items():
2657 2657 k = pycompat.sysstr(k)
2658 2658 if k == 'dirstate' or k not in self.__dict__:
2659 2659 continue
2660 2660 ce.refresh()
2661 2661
2662 2662 def _lock(
2663 2663 self,
2664 2664 vfs,
2665 2665 lockname,
2666 2666 wait,
2667 2667 releasefn,
2668 2668 acquirefn,
2669 2669 desc,
2670 2670 inheritchecker=None,
2671 2671 parentenvvar=None,
2672 2672 ):
2673 2673 parentlock = None
2674 2674 # the contents of parentenvvar are used by the underlying lock to
2675 2675 # determine whether it can be inherited
2676 2676 if parentenvvar is not None:
2677 2677 parentlock = encoding.environ.get(parentenvvar)
2678 2678
2679 2679 timeout = 0
2680 2680 warntimeout = 0
2681 2681 if wait:
2682 2682 timeout = self.ui.configint(b"ui", b"timeout")
2683 2683 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2684 2684 # internal config: ui.signal-safe-lock
2685 2685 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2686 2686
2687 2687 l = lockmod.trylock(
2688 2688 self.ui,
2689 2689 vfs,
2690 2690 lockname,
2691 2691 timeout,
2692 2692 warntimeout,
2693 2693 releasefn=releasefn,
2694 2694 acquirefn=acquirefn,
2695 2695 desc=desc,
2696 2696 inheritchecker=inheritchecker,
2697 2697 parentlock=parentlock,
2698 2698 signalsafe=signalsafe,
2699 2699 )
2700 2700 return l
2701 2701
2702 2702 def _afterlock(self, callback):
2703 2703 """add a callback to be run when the repository is fully unlocked
2704 2704
2705 2705 The callback will be executed when the outermost lock is released
2706 2706 (with wlock being higher level than 'lock')."""
2707 2707 for ref in (self._wlockref, self._lockref):
2708 2708 l = ref and ref()
2709 2709 if l and l.held:
2710 2710 l.postrelease.append(callback)
2711 2711 break
2712 2712 else: # no lock have been found.
2713 2713 callback(True)
2714 2714
2715 2715 def lock(self, wait=True):
2716 2716 '''Lock the repository store (.hg/store) and return a weak reference
2717 2717 to the lock. Use this before modifying the store (e.g. committing or
2718 2718 stripping). If you are opening a transaction, get a lock as well.)
2719 2719
2720 2720 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2721 2721 'wlock' first to avoid a dead-lock hazard.'''
2722 2722 l = self._currentlock(self._lockref)
2723 2723 if l is not None:
2724 2724 l.lock()
2725 2725 return l
2726 2726
2727 2727 l = self._lock(
2728 2728 vfs=self.svfs,
2729 2729 lockname=b"lock",
2730 2730 wait=wait,
2731 2731 releasefn=None,
2732 2732 acquirefn=self.invalidate,
2733 2733 desc=_(b'repository %s') % self.origroot,
2734 2734 )
2735 2735 self._lockref = weakref.ref(l)
2736 2736 return l
2737 2737
2738 2738 def _wlockchecktransaction(self):
2739 2739 if self.currenttransaction() is not None:
2740 2740 raise error.LockInheritanceContractViolation(
2741 2741 b'wlock cannot be inherited in the middle of a transaction'
2742 2742 )
2743 2743
2744 2744 def wlock(self, wait=True):
2745 2745 '''Lock the non-store parts of the repository (everything under
2746 2746 .hg except .hg/store) and return a weak reference to the lock.
2747 2747
2748 2748 Use this before modifying files in .hg.
2749 2749
2750 2750 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2751 2751 'wlock' first to avoid a dead-lock hazard.'''
2752 2752 l = self._wlockref and self._wlockref()
2753 2753 if l is not None and l.held:
2754 2754 l.lock()
2755 2755 return l
2756 2756
2757 2757 # We do not need to check for non-waiting lock acquisition. Such
2758 2758 # acquisition would not cause dead-lock as they would just fail.
2759 2759 if wait and (
2760 2760 self.ui.configbool(b'devel', b'all-warnings')
2761 2761 or self.ui.configbool(b'devel', b'check-locks')
2762 2762 ):
2763 2763 if self._currentlock(self._lockref) is not None:
2764 2764 self.ui.develwarn(b'"wlock" acquired after "lock"')
2765 2765
2766 2766 def unlock():
2767 2767 if self.dirstate.pendingparentchange():
2768 2768 self.dirstate.invalidate()
2769 2769 else:
2770 2770 self.dirstate.write(None)
2771 2771
2772 2772 self._filecache[b'dirstate'].refresh()
2773 2773
2774 2774 l = self._lock(
2775 2775 self.vfs,
2776 2776 b"wlock",
2777 2777 wait,
2778 2778 unlock,
2779 2779 self.invalidatedirstate,
2780 2780 _(b'working directory of %s') % self.origroot,
2781 2781 inheritchecker=self._wlockchecktransaction,
2782 2782 parentenvvar=b'HG_WLOCK_LOCKER',
2783 2783 )
2784 2784 self._wlockref = weakref.ref(l)
2785 2785 return l
2786 2786
2787 2787 def _currentlock(self, lockref):
2788 2788 """Returns the lock if it's held, or None if it's not."""
2789 2789 if lockref is None:
2790 2790 return None
2791 2791 l = lockref()
2792 2792 if l is None or not l.held:
2793 2793 return None
2794 2794 return l
2795 2795
2796 2796 def currentwlock(self):
2797 2797 """Returns the wlock if it's held, or None if it's not."""
2798 2798 return self._currentlock(self._wlockref)
2799 2799
2800 2800 def checkcommitpatterns(self, wctx, match, status, fail):
2801 2801 """check for commit arguments that aren't committable"""
2802 2802 if match.isexact() or match.prefix():
2803 2803 matched = set(status.modified + status.added + status.removed)
2804 2804
2805 2805 for f in match.files():
2806 2806 f = self.dirstate.normalize(f)
2807 2807 if f == b'.' or f in matched or f in wctx.substate:
2808 2808 continue
2809 2809 if f in status.deleted:
2810 2810 fail(f, _(b'file not found!'))
2811 2811 # Is it a directory that exists or used to exist?
2812 2812 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2813 2813 d = f + b'/'
2814 2814 for mf in matched:
2815 2815 if mf.startswith(d):
2816 2816 break
2817 2817 else:
2818 2818 fail(f, _(b"no match under directory!"))
2819 2819 elif f not in self.dirstate:
2820 2820 fail(f, _(b"file not tracked!"))
2821 2821
2822 2822 @unfilteredmethod
2823 2823 def commit(
2824 2824 self,
2825 2825 text=b"",
2826 2826 user=None,
2827 2827 date=None,
2828 2828 match=None,
2829 2829 force=False,
2830 2830 editor=None,
2831 2831 extra=None,
2832 2832 ):
2833 2833 """Add a new revision to current repository.
2834 2834
2835 2835 Revision information is gathered from the working directory,
2836 2836 match can be used to filter the committed files. If editor is
2837 2837 supplied, it is called to get a commit message.
2838 2838 """
2839 2839 if extra is None:
2840 2840 extra = {}
2841 2841
2842 2842 def fail(f, msg):
2843 2843 raise error.Abort(b'%s: %s' % (f, msg))
2844 2844
2845 2845 if not match:
2846 2846 match = matchmod.always()
2847 2847
2848 2848 if not force:
2849 2849 match.bad = fail
2850 2850
2851 2851 # lock() for recent changelog (see issue4368)
2852 2852 with self.wlock(), self.lock():
2853 2853 wctx = self[None]
2854 2854 merge = len(wctx.parents()) > 1
2855 2855
2856 2856 if not force and merge and not match.always():
2857 2857 raise error.Abort(
2858 2858 _(
2859 2859 b'cannot partially commit a merge '
2860 2860 b'(do not specify files or patterns)'
2861 2861 )
2862 2862 )
2863 2863
2864 2864 status = self.status(match=match, clean=force)
2865 2865 if force:
2866 2866 status.modified.extend(
2867 2867 status.clean
2868 2868 ) # mq may commit clean files
2869 2869
2870 2870 # check subrepos
2871 2871 subs, commitsubs, newstate = subrepoutil.precommit(
2872 2872 self.ui, wctx, status, match, force=force
2873 2873 )
2874 2874
2875 2875 # make sure all explicit patterns are matched
2876 2876 if not force:
2877 2877 self.checkcommitpatterns(wctx, match, status, fail)
2878 2878
2879 2879 cctx = context.workingcommitctx(
2880 2880 self, status, text, user, date, extra
2881 2881 )
2882 2882
2883 2883 ms = mergestatemod.mergestate.read(self)
2884 2884 mergeutil.checkunresolved(ms)
2885 2885
2886 2886 # internal config: ui.allowemptycommit
2887 2887 if cctx.isempty() and not self.ui.configbool(
2888 2888 b'ui', b'allowemptycommit'
2889 2889 ):
2890 2890 self.ui.debug(b'nothing to commit, clearing merge state\n')
2891 2891 ms.reset()
2892 2892 return None
2893 2893
2894 2894 if merge and cctx.deleted():
2895 2895 raise error.Abort(_(b"cannot commit merge with missing files"))
2896 2896
2897 2897 if editor:
2898 2898 cctx._text = editor(self, cctx, subs)
2899 2899 edited = text != cctx._text
2900 2900
2901 2901 # Save commit message in case this transaction gets rolled back
2902 2902 # (e.g. by a pretxncommit hook). Leave the content alone on
2903 2903 # the assumption that the user will use the same editor again.
2904 2904 msgfn = self.savecommitmessage(cctx._text)
2905 2905
2906 2906 # commit subs and write new state
2907 2907 if subs:
2908 2908 uipathfn = scmutil.getuipathfn(self)
2909 2909 for s in sorted(commitsubs):
2910 2910 sub = wctx.sub(s)
2911 2911 self.ui.status(
2912 2912 _(b'committing subrepository %s\n')
2913 2913 % uipathfn(subrepoutil.subrelpath(sub))
2914 2914 )
2915 2915 sr = sub.commit(cctx._text, user, date)
2916 2916 newstate[s] = (newstate[s][0], sr)
2917 2917 subrepoutil.writestate(self, newstate)
2918 2918
2919 2919 p1, p2 = self.dirstate.parents()
2920 2920 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2921 2921 try:
2922 2922 self.hook(
2923 2923 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2924 2924 )
2925 2925 with self.transaction(b'commit'):
2926 2926 ret = self.commitctx(cctx, True)
2927 2927 # update bookmarks, dirstate and mergestate
2928 2928 bookmarks.update(self, [p1, p2], ret)
2929 2929 cctx.markcommitted(ret)
2930 2930 ms.reset()
2931 2931 except: # re-raises
2932 2932 if edited:
2933 2933 self.ui.write(
2934 2934 _(b'note: commit message saved in %s\n') % msgfn
2935 2935 )
2936 2936 self.ui.write(
2937 2937 _(
2938 2938 b"note: use 'hg commit --logfile "
2939 2939 b".hg/last-message.txt --edit' to reuse it\n"
2940 2940 )
2941 2941 )
2942 2942 raise
2943 2943
2944 2944 def commithook(unused_success):
2945 2945 # hack for command that use a temporary commit (eg: histedit)
2946 2946 # temporary commit got stripped before hook release
2947 2947 if self.changelog.hasnode(ret):
2948 2948 self.hook(
2949 2949 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2950 2950 )
2951 2951
2952 2952 self._afterlock(commithook)
2953 2953 return ret
2954 2954
2955 2955 @unfilteredmethod
2956 2956 def commitctx(self, ctx, error=False, origctx=None):
2957 2957 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2958 2958
2959 2959 @unfilteredmethod
2960 2960 def destroying(self):
2961 2961 '''Inform the repository that nodes are about to be destroyed.
2962 2962 Intended for use by strip and rollback, so there's a common
2963 2963 place for anything that has to be done before destroying history.
2964 2964
2965 2965 This is mostly useful for saving state that is in memory and waiting
2966 2966 to be flushed when the current lock is released. Because a call to
2967 2967 destroyed is imminent, the repo will be invalidated causing those
2968 2968 changes to stay in memory (waiting for the next unlock), or vanish
2969 2969 completely.
2970 2970 '''
2971 2971 # When using the same lock to commit and strip, the phasecache is left
2972 2972 # dirty after committing. Then when we strip, the repo is invalidated,
2973 2973 # causing those changes to disappear.
2974 2974 if '_phasecache' in vars(self):
2975 2975 self._phasecache.write()
2976 2976
2977 2977 @unfilteredmethod
2978 2978 def destroyed(self):
2979 2979 '''Inform the repository that nodes have been destroyed.
2980 2980 Intended for use by strip and rollback, so there's a common
2981 2981 place for anything that has to be done after destroying history.
2982 2982 '''
2983 2983 # When one tries to:
2984 2984 # 1) destroy nodes thus calling this method (e.g. strip)
2985 2985 # 2) use phasecache somewhere (e.g. commit)
2986 2986 #
2987 2987 # then 2) will fail because the phasecache contains nodes that were
2988 2988 # removed. We can either remove phasecache from the filecache,
2989 2989 # causing it to reload next time it is accessed, or simply filter
2990 2990 # the removed nodes now and write the updated cache.
2991 2991 self._phasecache.filterunknown(self)
2992 2992 self._phasecache.write()
2993 2993
2994 2994 # refresh all repository caches
2995 2995 self.updatecaches()
2996 2996
2997 2997 # Ensure the persistent tag cache is updated. Doing it now
2998 2998 # means that the tag cache only has to worry about destroyed
2999 2999 # heads immediately after a strip/rollback. That in turn
3000 3000 # guarantees that "cachetip == currenttip" (comparing both rev
3001 3001 # and node) always means no nodes have been added or destroyed.
3002 3002
3003 3003 # XXX this is suboptimal when qrefresh'ing: we strip the current
3004 3004 # head, refresh the tag cache, then immediately add a new head.
3005 3005 # But I think doing it this way is necessary for the "instant
3006 3006 # tag cache retrieval" case to work.
3007 3007 self.invalidate()
3008 3008
3009 3009 def status(
3010 3010 self,
3011 3011 node1=b'.',
3012 3012 node2=None,
3013 3013 match=None,
3014 3014 ignored=False,
3015 3015 clean=False,
3016 3016 unknown=False,
3017 3017 listsubrepos=False,
3018 3018 ):
3019 3019 '''a convenience method that calls node1.status(node2)'''
3020 3020 return self[node1].status(
3021 3021 node2, match, ignored, clean, unknown, listsubrepos
3022 3022 )
3023 3023
3024 3024 def addpostdsstatus(self, ps):
3025 3025 """Add a callback to run within the wlock, at the point at which status
3026 3026 fixups happen.
3027 3027
3028 3028 On status completion, callback(wctx, status) will be called with the
3029 3029 wlock held, unless the dirstate has changed from underneath or the wlock
3030 3030 couldn't be grabbed.
3031 3031
3032 3032 Callbacks should not capture and use a cached copy of the dirstate --
3033 3033 it might change in the meanwhile. Instead, they should access the
3034 3034 dirstate via wctx.repo().dirstate.
3035 3035
3036 3036 This list is emptied out after each status run -- extensions should
3037 3037 make sure it adds to this list each time dirstate.status is called.
3038 3038 Extensions should also make sure they don't call this for statuses
3039 3039 that don't involve the dirstate.
3040 3040 """
3041 3041
3042 3042 # The list is located here for uniqueness reasons -- it is actually
3043 3043 # managed by the workingctx, but that isn't unique per-repo.
3044 3044 self._postdsstatus.append(ps)
3045 3045
3046 3046 def postdsstatus(self):
3047 3047 """Used by workingctx to get the list of post-dirstate-status hooks."""
3048 3048 return self._postdsstatus
3049 3049
3050 3050 def clearpostdsstatus(self):
3051 3051 """Used by workingctx to clear post-dirstate-status hooks."""
3052 3052 del self._postdsstatus[:]
3053 3053
3054 3054 def heads(self, start=None):
3055 3055 if start is None:
3056 3056 cl = self.changelog
3057 3057 headrevs = reversed(cl.headrevs())
3058 3058 return [cl.node(rev) for rev in headrevs]
3059 3059
3060 3060 heads = self.changelog.heads(start)
3061 3061 # sort the output in rev descending order
3062 3062 return sorted(heads, key=self.changelog.rev, reverse=True)
3063 3063
3064 3064 def branchheads(self, branch=None, start=None, closed=False):
3065 3065 '''return a (possibly filtered) list of heads for the given branch
3066 3066
3067 3067 Heads are returned in topological order, from newest to oldest.
3068 3068 If branch is None, use the dirstate branch.
3069 3069 If start is not None, return only heads reachable from start.
3070 3070 If closed is True, return heads that are marked as closed as well.
3071 3071 '''
3072 3072 if branch is None:
3073 3073 branch = self[None].branch()
3074 3074 branches = self.branchmap()
3075 3075 if not branches.hasbranch(branch):
3076 3076 return []
3077 3077 # the cache returns heads ordered lowest to highest
3078 3078 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3079 3079 if start is not None:
3080 3080 # filter out the heads that cannot be reached from startrev
3081 3081 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3082 3082 bheads = [h for h in bheads if h in fbheads]
3083 3083 return bheads
3084 3084
3085 3085 def branches(self, nodes):
3086 3086 if not nodes:
3087 3087 nodes = [self.changelog.tip()]
3088 3088 b = []
3089 3089 for n in nodes:
3090 3090 t = n
3091 3091 while True:
3092 3092 p = self.changelog.parents(n)
3093 3093 if p[1] != nullid or p[0] == nullid:
3094 3094 b.append((t, n, p[0], p[1]))
3095 3095 break
3096 3096 n = p[0]
3097 3097 return b
3098 3098
3099 3099 def between(self, pairs):
3100 3100 r = []
3101 3101
3102 3102 for top, bottom in pairs:
3103 3103 n, l, i = top, [], 0
3104 3104 f = 1
3105 3105
3106 3106 while n != bottom and n != nullid:
3107 3107 p = self.changelog.parents(n)[0]
3108 3108 if i == f:
3109 3109 l.append(n)
3110 3110 f = f * 2
3111 3111 n = p
3112 3112 i += 1
3113 3113
3114 3114 r.append(l)
3115 3115
3116 3116 return r
3117 3117
3118 3118 def checkpush(self, pushop):
3119 3119 """Extensions can override this function if additional checks have
3120 3120 to be performed before pushing, or call it if they override push
3121 3121 command.
3122 3122 """
3123 3123
3124 3124 @unfilteredpropertycache
3125 3125 def prepushoutgoinghooks(self):
3126 3126 """Return util.hooks consists of a pushop with repo, remote, outgoing
3127 3127 methods, which are called before pushing changesets.
3128 3128 """
3129 3129 return util.hooks()
3130 3130
3131 3131 def pushkey(self, namespace, key, old, new):
3132 3132 try:
3133 3133 tr = self.currenttransaction()
3134 3134 hookargs = {}
3135 3135 if tr is not None:
3136 3136 hookargs.update(tr.hookargs)
3137 3137 hookargs = pycompat.strkwargs(hookargs)
3138 3138 hookargs['namespace'] = namespace
3139 3139 hookargs['key'] = key
3140 3140 hookargs['old'] = old
3141 3141 hookargs['new'] = new
3142 3142 self.hook(b'prepushkey', throw=True, **hookargs)
3143 3143 except error.HookAbort as exc:
3144 3144 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3145 3145 if exc.hint:
3146 3146 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3147 3147 return False
3148 3148 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3149 3149 ret = pushkey.push(self, namespace, key, old, new)
3150 3150
3151 3151 def runhook(unused_success):
3152 3152 self.hook(
3153 3153 b'pushkey',
3154 3154 namespace=namespace,
3155 3155 key=key,
3156 3156 old=old,
3157 3157 new=new,
3158 3158 ret=ret,
3159 3159 )
3160 3160
3161 3161 self._afterlock(runhook)
3162 3162 return ret
3163 3163
3164 3164 def listkeys(self, namespace):
3165 3165 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3166 3166 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3167 3167 values = pushkey.list(self, namespace)
3168 3168 self.hook(b'listkeys', namespace=namespace, values=values)
3169 3169 return values
3170 3170
3171 3171 def debugwireargs(self, one, two, three=None, four=None, five=None):
3172 3172 '''used to test argument passing over the wire'''
3173 3173 return b"%s %s %s %s %s" % (
3174 3174 one,
3175 3175 two,
3176 3176 pycompat.bytestr(three),
3177 3177 pycompat.bytestr(four),
3178 3178 pycompat.bytestr(five),
3179 3179 )
3180 3180
3181 3181 def savecommitmessage(self, text):
3182 3182 fp = self.vfs(b'last-message.txt', b'wb')
3183 3183 try:
3184 3184 fp.write(text)
3185 3185 finally:
3186 3186 fp.close()
3187 3187 return self.pathto(fp.name[len(self.root) + 1 :])
3188 3188
3189 3189
3190 3190 # used to avoid circular references so destructors work
3191 3191 def aftertrans(files):
3192 3192 renamefiles = [tuple(t) for t in files]
3193 3193
3194 3194 def a():
3195 3195 for vfs, src, dest in renamefiles:
3196 3196 # if src and dest refer to a same file, vfs.rename is a no-op,
3197 3197 # leaving both src and dest on disk. delete dest to make sure
3198 3198 # the rename couldn't be such a no-op.
3199 3199 vfs.tryunlink(dest)
3200 3200 try:
3201 3201 vfs.rename(src, dest)
3202 3202 except OSError: # journal file does not yet exist
3203 3203 pass
3204 3204
3205 3205 return a
3206 3206
3207 3207
3208 3208 def undoname(fn):
3209 3209 base, name = os.path.split(fn)
3210 3210 assert name.startswith(b'journal')
3211 3211 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3212 3212
3213 3213
3214 3214 def instance(ui, path, create, intents=None, createopts=None):
3215 3215 localpath = util.urllocalpath(path)
3216 3216 if create:
3217 3217 createrepository(ui, localpath, createopts=createopts)
3218 3218
3219 3219 return makelocalrepository(ui, localpath, intents=intents)
3220 3220
3221 3221
3222 3222 def islocal(path):
3223 3223 return True
3224 3224
3225 3225
3226 3226 def defaultcreateopts(ui, createopts=None):
3227 3227 """Populate the default creation options for a repository.
3228 3228
3229 3229 A dictionary of explicitly requested creation options can be passed
3230 3230 in. Missing keys will be populated.
3231 3231 """
3232 3232 createopts = dict(createopts or {})
3233 3233
3234 3234 if b'backend' not in createopts:
3235 3235 # experimental config: storage.new-repo-backend
3236 3236 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3237 3237
3238 3238 return createopts
3239 3239
3240 3240
3241 3241 def newreporequirements(ui, createopts):
3242 3242 """Determine the set of requirements for a new local repository.
3243 3243
3244 3244 Extensions can wrap this function to specify custom requirements for
3245 3245 new repositories.
3246 3246 """
3247 3247 # If the repo is being created from a shared repository, we copy
3248 3248 # its requirements.
3249 3249 if b'sharedrepo' in createopts:
3250 3250 requirements = set(createopts[b'sharedrepo'].requirements)
3251 3251 if createopts.get(b'sharedrelative'):
3252 3252 requirements.add(b'relshared')
3253 3253 else:
3254 3254 requirements.add(b'shared')
3255 3255
3256 3256 return requirements
3257 3257
3258 3258 if b'backend' not in createopts:
3259 3259 raise error.ProgrammingError(
3260 3260 b'backend key not present in createopts; '
3261 3261 b'was defaultcreateopts() called?'
3262 3262 )
3263 3263
3264 3264 if createopts[b'backend'] != b'revlogv1':
3265 3265 raise error.Abort(
3266 3266 _(
3267 3267 b'unable to determine repository requirements for '
3268 3268 b'storage backend: %s'
3269 3269 )
3270 3270 % createopts[b'backend']
3271 3271 )
3272 3272
3273 3273 requirements = {b'revlogv1'}
3274 3274 if ui.configbool(b'format', b'usestore'):
3275 3275 requirements.add(b'store')
3276 3276 if ui.configbool(b'format', b'usefncache'):
3277 3277 requirements.add(b'fncache')
3278 3278 if ui.configbool(b'format', b'dotencode'):
3279 3279 requirements.add(b'dotencode')
3280 3280
3281 3281 compengines = ui.configlist(b'format', b'revlog-compression')
3282 3282 for compengine in compengines:
3283 3283 if compengine in util.compengines:
3284 3284 break
3285 3285 else:
3286 3286 raise error.Abort(
3287 3287 _(
3288 3288 b'compression engines %s defined by '
3289 3289 b'format.revlog-compression not available'
3290 3290 )
3291 3291 % b', '.join(b'"%s"' % e for e in compengines),
3292 3292 hint=_(
3293 3293 b'run "hg debuginstall" to list available '
3294 3294 b'compression engines'
3295 3295 ),
3296 3296 )
3297 3297
3298 3298 # zlib is the historical default and doesn't need an explicit requirement.
3299 3299 if compengine == b'zstd':
3300 3300 requirements.add(b'revlog-compression-zstd')
3301 3301 elif compengine != b'zlib':
3302 3302 requirements.add(b'exp-compression-%s' % compengine)
3303 3303
3304 3304 if scmutil.gdinitconfig(ui):
3305 3305 requirements.add(b'generaldelta')
3306 3306 if ui.configbool(b'format', b'sparse-revlog'):
3307 3307 requirements.add(SPARSEREVLOG_REQUIREMENT)
3308 3308
3309 3309 # experimental config: format.exp-use-side-data
3310 3310 if ui.configbool(b'format', b'exp-use-side-data'):
3311 3311 requirements.add(SIDEDATA_REQUIREMENT)
3312 3312 # experimental config: format.exp-use-copies-side-data-changeset
3313 3313 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3314 3314 requirements.add(SIDEDATA_REQUIREMENT)
3315 3315 requirements.add(COPIESSDC_REQUIREMENT)
3316 3316 if ui.configbool(b'experimental', b'treemanifest'):
3317 3317 requirements.add(b'treemanifest')
3318 3318
3319 3319 revlogv2 = ui.config(b'experimental', b'revlogv2')
3320 3320 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3321 3321 requirements.remove(b'revlogv1')
3322 3322 # generaldelta is implied by revlogv2.
3323 3323 requirements.discard(b'generaldelta')
3324 3324 requirements.add(REVLOGV2_REQUIREMENT)
3325 3325 # experimental config: format.internal-phase
3326 3326 if ui.configbool(b'format', b'internal-phase'):
3327 3327 requirements.add(b'internal-phase')
3328 3328
3329 3329 if createopts.get(b'narrowfiles'):
3330 3330 requirements.add(repository.NARROW_REQUIREMENT)
3331 3331
3332 3332 if createopts.get(b'lfs'):
3333 3333 requirements.add(b'lfs')
3334 3334
3335 3335 if ui.configbool(b'format', b'bookmarks-in-store'):
3336 3336 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3337 3337
3338 3338 if ui.configbool(b'format', b'use-persistent-nodemap'):
3339 3339 requirements.add(NODEMAP_REQUIREMENT)
3340 3340
3341 3341 return requirements
3342 3342
3343 3343
3344 3344 def checkrequirementscompat(ui, requirements):
3345 3345 """ Checks compatibility of repository requirements enabled and disabled.
3346 3346
3347 3347 Returns a set of requirements which needs to be dropped because dependend
3348 3348 requirements are not enabled. Also warns users about it """
3349 3349
3350 3350 dropped = set()
3351 3351
3352 3352 if b'store' not in requirements:
3353 3353 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3354 3354 ui.warn(
3355 3355 _(
3356 3356 b'ignoring enabled \'format.bookmarks-in-store\' config '
3357 3357 b'beacuse it is incompatible with disabled '
3358 3358 b'\'format.usestore\' config\n'
3359 3359 )
3360 3360 )
3361 3361 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3362 3362
3363 3363 if b'shared' in requirements or b'relshared' in requirements:
3364 3364 raise error.Abort(
3365 3365 _(
3366 3366 b"cannot create shared repository as source was created"
3367 3367 b" with 'format.usestore' config disabled"
3368 3368 )
3369 3369 )
3370 3370
3371 3371 return dropped
3372 3372
3373 3373
3374 3374 def filterknowncreateopts(ui, createopts):
3375 3375 """Filters a dict of repo creation options against options that are known.
3376 3376
3377 3377 Receives a dict of repo creation options and returns a dict of those
3378 3378 options that we don't know how to handle.
3379 3379
3380 3380 This function is called as part of repository creation. If the
3381 3381 returned dict contains any items, repository creation will not
3382 3382 be allowed, as it means there was a request to create a repository
3383 3383 with options not recognized by loaded code.
3384 3384
3385 3385 Extensions can wrap this function to filter out creation options
3386 3386 they know how to handle.
3387 3387 """
3388 3388 known = {
3389 3389 b'backend',
3390 3390 b'lfs',
3391 3391 b'narrowfiles',
3392 3392 b'sharedrepo',
3393 3393 b'sharedrelative',
3394 3394 b'shareditems',
3395 3395 b'shallowfilestore',
3396 3396 }
3397 3397
3398 3398 return {k: v for k, v in createopts.items() if k not in known}
3399 3399
3400 3400
3401 3401 def createrepository(ui, path, createopts=None):
3402 3402 """Create a new repository in a vfs.
3403 3403
3404 3404 ``path`` path to the new repo's working directory.
3405 3405 ``createopts`` options for the new repository.
3406 3406
3407 3407 The following keys for ``createopts`` are recognized:
3408 3408
3409 3409 backend
3410 3410 The storage backend to use.
3411 3411 lfs
3412 3412 Repository will be created with ``lfs`` requirement. The lfs extension
3413 3413 will automatically be loaded when the repository is accessed.
3414 3414 narrowfiles
3415 3415 Set up repository to support narrow file storage.
3416 3416 sharedrepo
3417 3417 Repository object from which storage should be shared.
3418 3418 sharedrelative
3419 3419 Boolean indicating if the path to the shared repo should be
3420 3420 stored as relative. By default, the pointer to the "parent" repo
3421 3421 is stored as an absolute path.
3422 3422 shareditems
3423 3423 Set of items to share to the new repository (in addition to storage).
3424 3424 shallowfilestore
3425 3425 Indicates that storage for files should be shallow (not all ancestor
3426 3426 revisions are known).
3427 3427 """
3428 3428 createopts = defaultcreateopts(ui, createopts=createopts)
3429 3429
3430 3430 unknownopts = filterknowncreateopts(ui, createopts)
3431 3431
3432 3432 if not isinstance(unknownopts, dict):
3433 3433 raise error.ProgrammingError(
3434 3434 b'filterknowncreateopts() did not return a dict'
3435 3435 )
3436 3436
3437 3437 if unknownopts:
3438 3438 raise error.Abort(
3439 3439 _(
3440 3440 b'unable to create repository because of unknown '
3441 3441 b'creation option: %s'
3442 3442 )
3443 3443 % b', '.join(sorted(unknownopts)),
3444 3444 hint=_(b'is a required extension not loaded?'),
3445 3445 )
3446 3446
3447 3447 requirements = newreporequirements(ui, createopts=createopts)
3448 3448 requirements -= checkrequirementscompat(ui, requirements)
3449 3449
3450 3450 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3451 3451
3452 3452 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3453 3453 if hgvfs.exists():
3454 3454 raise error.RepoError(_(b'repository %s already exists') % path)
3455 3455
3456 3456 if b'sharedrepo' in createopts:
3457 3457 sharedpath = createopts[b'sharedrepo'].sharedpath
3458 3458
3459 3459 if createopts.get(b'sharedrelative'):
3460 3460 try:
3461 3461 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3462 3462 except (IOError, ValueError) as e:
3463 3463 # ValueError is raised on Windows if the drive letters differ
3464 3464 # on each path.
3465 3465 raise error.Abort(
3466 3466 _(b'cannot calculate relative path'),
3467 3467 hint=stringutil.forcebytestr(e),
3468 3468 )
3469 3469
3470 3470 if not wdirvfs.exists():
3471 3471 wdirvfs.makedirs()
3472 3472
3473 3473 hgvfs.makedir(notindexed=True)
3474 3474 if b'sharedrepo' not in createopts:
3475 3475 hgvfs.mkdir(b'cache')
3476 3476 hgvfs.mkdir(b'wcache')
3477 3477
3478 3478 if b'store' in requirements and b'sharedrepo' not in createopts:
3479 3479 hgvfs.mkdir(b'store')
3480 3480
3481 3481 # We create an invalid changelog outside the store so very old
3482 3482 # Mercurial versions (which didn't know about the requirements
3483 3483 # file) encounter an error on reading the changelog. This
3484 3484 # effectively locks out old clients and prevents them from
3485 3485 # mucking with a repo in an unknown format.
3486 3486 #
3487 3487 # The revlog header has version 2, which won't be recognized by
3488 3488 # such old clients.
3489 3489 hgvfs.append(
3490 3490 b'00changelog.i',
3491 3491 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3492 3492 b'layout',
3493 3493 )
3494 3494
3495 3495 scmutil.writerequires(hgvfs, requirements)
3496 3496
3497 3497 # Write out file telling readers where to find the shared store.
3498 3498 if b'sharedrepo' in createopts:
3499 3499 hgvfs.write(b'sharedpath', sharedpath)
3500 3500
3501 3501 if createopts.get(b'shareditems'):
3502 3502 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3503 3503 hgvfs.write(b'shared', shared)
3504 3504
3505 3505
3506 3506 def poisonrepository(repo):
3507 3507 """Poison a repository instance so it can no longer be used."""
3508 3508 # Perform any cleanup on the instance.
3509 3509 repo.close()
3510 3510
3511 3511 # Our strategy is to replace the type of the object with one that
3512 3512 # has all attribute lookups result in error.
3513 3513 #
3514 3514 # But we have to allow the close() method because some constructors
3515 3515 # of repos call close() on repo references.
3516 3516 class poisonedrepository(object):
3517 3517 def __getattribute__(self, item):
3518 3518 if item == 'close':
3519 3519 return object.__getattribute__(self, item)
3520 3520
3521 3521 raise error.ProgrammingError(
3522 3522 b'repo instances should not be used after unshare'
3523 3523 )
3524 3524
3525 3525 def close(self):
3526 3526 pass
3527 3527
3528 3528 # We may have a repoview, which intercepts __setattr__. So be sure
3529 3529 # we operate at the lowest level possible.
3530 3530 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,831 +1,833
1 1 # sparse.py - functionality for sparse checkouts
2 2 #
3 3 # Copyright 2014 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 )
17 17 from . import (
18 18 error,
19 19 match as matchmod,
20 20 merge as mergemod,
21 21 mergestate as mergestatemod,
22 22 pathutil,
23 23 pycompat,
24 24 scmutil,
25 25 util,
26 26 )
27 from .interfaces import repository
27 28 from .utils import hashutil
28 29
30
29 31 # Whether sparse features are enabled. This variable is intended to be
30 32 # temporary to facilitate porting sparse to core. It should eventually be
31 33 # a per-repo option, possibly a repo requirement.
32 34 enabled = False
33 35
34 36
35 37 def parseconfig(ui, raw, action):
36 38 """Parse sparse config file content.
37 39
38 40 action is the command which is trigerring this read, can be narrow, sparse
39 41
40 42 Returns a tuple of includes, excludes, and profiles.
41 43 """
42 44 includes = set()
43 45 excludes = set()
44 46 profiles = set()
45 47 current = None
46 48 havesection = False
47 49
48 50 for line in raw.split(b'\n'):
49 51 line = line.strip()
50 52 if not line or line.startswith(b'#'):
51 53 # empty or comment line, skip
52 54 continue
53 55 elif line.startswith(b'%include '):
54 56 line = line[9:].strip()
55 57 if line:
56 58 profiles.add(line)
57 59 elif line == b'[include]':
58 60 if havesection and current != includes:
59 61 # TODO pass filename into this API so we can report it.
60 62 raise error.Abort(
61 63 _(
62 64 b'%(action)s config cannot have includes '
63 65 b'after excludes'
64 66 )
65 67 % {b'action': action}
66 68 )
67 69 havesection = True
68 70 current = includes
69 71 continue
70 72 elif line == b'[exclude]':
71 73 havesection = True
72 74 current = excludes
73 75 elif line:
74 76 if current is None:
75 77 raise error.Abort(
76 78 _(
77 79 b'%(action)s config entry outside of '
78 80 b'section: %(line)s'
79 81 )
80 82 % {b'action': action, b'line': line},
81 83 hint=_(
82 84 b'add an [include] or [exclude] line '
83 85 b'to declare the entry type'
84 86 ),
85 87 )
86 88
87 89 if line.strip().startswith(b'/'):
88 90 ui.warn(
89 91 _(
90 92 b'warning: %(action)s profile cannot use'
91 93 b' paths starting with /, ignoring %(line)s\n'
92 94 )
93 95 % {b'action': action, b'line': line}
94 96 )
95 97 continue
96 98 current.add(line)
97 99
98 100 return includes, excludes, profiles
99 101
100 102
101 103 # Exists as separate function to facilitate monkeypatching.
102 104 def readprofile(repo, profile, changeid):
103 105 """Resolve the raw content of a sparse profile file."""
104 106 # TODO add some kind of cache here because this incurs a manifest
105 107 # resolve and can be slow.
106 108 return repo.filectx(profile, changeid=changeid).data()
107 109
108 110
109 111 def patternsforrev(repo, rev):
110 112 """Obtain sparse checkout patterns for the given rev.
111 113
112 114 Returns a tuple of iterables representing includes, excludes, and
113 115 patterns.
114 116 """
115 117 # Feature isn't enabled. No-op.
116 118 if not enabled:
117 119 return set(), set(), set()
118 120
119 121 raw = repo.vfs.tryread(b'sparse')
120 122 if not raw:
121 123 return set(), set(), set()
122 124
123 125 if rev is None:
124 126 raise error.Abort(
125 127 _(b'cannot parse sparse patterns from working directory')
126 128 )
127 129
128 130 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
129 131 ctx = repo[rev]
130 132
131 133 if profiles:
132 134 visited = set()
133 135 while profiles:
134 136 profile = profiles.pop()
135 137 if profile in visited:
136 138 continue
137 139
138 140 visited.add(profile)
139 141
140 142 try:
141 143 raw = readprofile(repo, profile, rev)
142 144 except error.ManifestLookupError:
143 145 msg = (
144 146 b"warning: sparse profile '%s' not found "
145 147 b"in rev %s - ignoring it\n" % (profile, ctx)
146 148 )
147 149 # experimental config: sparse.missingwarning
148 150 if repo.ui.configbool(b'sparse', b'missingwarning'):
149 151 repo.ui.warn(msg)
150 152 else:
151 153 repo.ui.debug(msg)
152 154 continue
153 155
154 156 pincludes, pexcludes, subprofs = parseconfig(
155 157 repo.ui, raw, b'sparse'
156 158 )
157 159 includes.update(pincludes)
158 160 excludes.update(pexcludes)
159 161 profiles.update(subprofs)
160 162
161 163 profiles = visited
162 164
163 165 if includes:
164 166 includes.add(b'.hg*')
165 167
166 168 return includes, excludes, profiles
167 169
168 170
169 171 def activeconfig(repo):
170 172 """Determine the active sparse config rules.
171 173
172 174 Rules are constructed by reading the current sparse config and bringing in
173 175 referenced profiles from parents of the working directory.
174 176 """
175 177 revs = [
176 178 repo.changelog.rev(node)
177 179 for node in repo.dirstate.parents()
178 180 if node != nullid
179 181 ]
180 182
181 183 allincludes = set()
182 184 allexcludes = set()
183 185 allprofiles = set()
184 186
185 187 for rev in revs:
186 188 includes, excludes, profiles = patternsforrev(repo, rev)
187 189 allincludes |= includes
188 190 allexcludes |= excludes
189 191 allprofiles |= profiles
190 192
191 193 return allincludes, allexcludes, allprofiles
192 194
193 195
194 196 def configsignature(repo, includetemp=True):
195 197 """Obtain the signature string for the current sparse configuration.
196 198
197 199 This is used to construct a cache key for matchers.
198 200 """
199 201 cache = repo._sparsesignaturecache
200 202
201 203 signature = cache.get(b'signature')
202 204
203 205 if includetemp:
204 206 tempsignature = cache.get(b'tempsignature')
205 207 else:
206 208 tempsignature = b'0'
207 209
208 210 if signature is None or (includetemp and tempsignature is None):
209 211 signature = hex(hashutil.sha1(repo.vfs.tryread(b'sparse')).digest())
210 212 cache[b'signature'] = signature
211 213
212 214 if includetemp:
213 215 raw = repo.vfs.tryread(b'tempsparse')
214 216 tempsignature = hex(hashutil.sha1(raw).digest())
215 217 cache[b'tempsignature'] = tempsignature
216 218
217 219 return b'%s %s' % (signature, tempsignature)
218 220
219 221
220 222 def writeconfig(repo, includes, excludes, profiles):
221 223 """Write the sparse config file given a sparse configuration."""
222 224 with repo.vfs(b'sparse', b'wb') as fh:
223 225 for p in sorted(profiles):
224 226 fh.write(b'%%include %s\n' % p)
225 227
226 228 if includes:
227 229 fh.write(b'[include]\n')
228 230 for i in sorted(includes):
229 231 fh.write(i)
230 232 fh.write(b'\n')
231 233
232 234 if excludes:
233 235 fh.write(b'[exclude]\n')
234 236 for e in sorted(excludes):
235 237 fh.write(e)
236 238 fh.write(b'\n')
237 239
238 240 repo._sparsesignaturecache.clear()
239 241
240 242
241 243 def readtemporaryincludes(repo):
242 244 raw = repo.vfs.tryread(b'tempsparse')
243 245 if not raw:
244 246 return set()
245 247
246 248 return set(raw.split(b'\n'))
247 249
248 250
249 251 def writetemporaryincludes(repo, includes):
250 252 repo.vfs.write(b'tempsparse', b'\n'.join(sorted(includes)))
251 253 repo._sparsesignaturecache.clear()
252 254
253 255
254 256 def addtemporaryincludes(repo, additional):
255 257 includes = readtemporaryincludes(repo)
256 258 for i in additional:
257 259 includes.add(i)
258 260 writetemporaryincludes(repo, includes)
259 261
260 262
261 263 def prunetemporaryincludes(repo):
262 264 if not enabled or not repo.vfs.exists(b'tempsparse'):
263 265 return
264 266
265 267 s = repo.status()
266 268 if s.modified or s.added or s.removed or s.deleted:
267 269 # Still have pending changes. Don't bother trying to prune.
268 270 return
269 271
270 272 sparsematch = matcher(repo, includetemp=False)
271 273 dirstate = repo.dirstate
272 274 mresult = mergemod.mergeresult()
273 275 dropped = []
274 276 tempincludes = readtemporaryincludes(repo)
275 277 for file in tempincludes:
276 278 if file in dirstate and not sparsematch(file):
277 279 message = _(b'dropping temporarily included sparse files')
278 280 mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message)
279 281 dropped.append(file)
280 282
281 283 mergemod.applyupdates(
282 284 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
283 285 )
284 286
285 287 # Fix dirstate
286 288 for file in dropped:
287 289 dirstate.drop(file)
288 290
289 291 repo.vfs.unlink(b'tempsparse')
290 292 repo._sparsesignaturecache.clear()
291 293 msg = _(
292 294 b'cleaned up %d temporarily added file(s) from the '
293 295 b'sparse checkout\n'
294 296 )
295 297 repo.ui.status(msg % len(tempincludes))
296 298
297 299
298 300 def forceincludematcher(matcher, includes):
299 301 """Returns a matcher that returns true for any of the forced includes
300 302 before testing against the actual matcher."""
301 303 kindpats = [(b'path', include, b'') for include in includes]
302 304 includematcher = matchmod.includematcher(b'', kindpats)
303 305 return matchmod.unionmatcher([includematcher, matcher])
304 306
305 307
306 308 def matcher(repo, revs=None, includetemp=True):
307 309 """Obtain a matcher for sparse working directories for the given revs.
308 310
309 311 If multiple revisions are specified, the matcher is the union of all
310 312 revs.
311 313
312 314 ``includetemp`` indicates whether to use the temporary sparse profile.
313 315 """
314 316 # If sparse isn't enabled, sparse matcher matches everything.
315 317 if not enabled:
316 318 return matchmod.always()
317 319
318 320 if not revs or revs == [None]:
319 321 revs = [
320 322 repo.changelog.rev(node)
321 323 for node in repo.dirstate.parents()
322 324 if node != nullid
323 325 ]
324 326
325 327 signature = configsignature(repo, includetemp=includetemp)
326 328
327 329 key = b'%s %s' % (signature, b' '.join(map(pycompat.bytestr, revs)))
328 330
329 331 result = repo._sparsematchercache.get(key)
330 332 if result:
331 333 return result
332 334
333 335 matchers = []
334 336 for rev in revs:
335 337 try:
336 338 includes, excludes, profiles = patternsforrev(repo, rev)
337 339
338 340 if includes or excludes:
339 341 matcher = matchmod.match(
340 342 repo.root,
341 343 b'',
342 344 [],
343 345 include=includes,
344 346 exclude=excludes,
345 347 default=b'relpath',
346 348 )
347 349 matchers.append(matcher)
348 350 except IOError:
349 351 pass
350 352
351 353 if not matchers:
352 354 result = matchmod.always()
353 355 elif len(matchers) == 1:
354 356 result = matchers[0]
355 357 else:
356 358 result = matchmod.unionmatcher(matchers)
357 359
358 360 if includetemp:
359 361 tempincludes = readtemporaryincludes(repo)
360 362 result = forceincludematcher(result, tempincludes)
361 363
362 364 repo._sparsematchercache[key] = result
363 365
364 366 return result
365 367
366 368
367 369 def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult):
368 370 """Filter updates to only lay out files that match the sparse rules."""
369 371 if not enabled:
370 372 return
371 373
372 374 oldrevs = [pctx.rev() for pctx in wctx.parents()]
373 375 oldsparsematch = matcher(repo, oldrevs)
374 376
375 377 if oldsparsematch.always():
376 378 return
377 379
378 380 files = set()
379 381 prunedactions = {}
380 382
381 383 if branchmerge:
382 384 # If we're merging, use the wctx filter, since we're merging into
383 385 # the wctx.
384 386 sparsematch = matcher(repo, [wctx.p1().rev()])
385 387 else:
386 388 # If we're updating, use the target context's filter, since we're
387 389 # moving to the target context.
388 390 sparsematch = matcher(repo, [mctx.rev()])
389 391
390 392 temporaryfiles = []
391 393 for file, action in mresult.filemap():
392 394 type, args, msg = action
393 395 files.add(file)
394 396 if sparsematch(file):
395 397 prunedactions[file] = action
396 398 elif type == mergestatemod.ACTION_MERGE:
397 399 temporaryfiles.append(file)
398 400 prunedactions[file] = action
399 401 elif branchmerge:
400 402 if type != mergestatemod.ACTION_KEEP:
401 403 temporaryfiles.append(file)
402 404 prunedactions[file] = action
403 405 elif type == mergestatemod.ACTION_FORGET:
404 406 prunedactions[file] = action
405 407 elif file in wctx:
406 408 prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg)
407 409
408 410 # in case or rename on one side, it is possible that f1 might not
409 411 # be present in sparse checkout we should include it
410 412 # TODO: should we do the same for f2?
411 413 # exists as a separate check because file can be in sparse and hence
412 414 # if we try to club this condition in above `elif type == ACTION_MERGE`
413 415 # it won't be triggered
414 416 if branchmerge and type == mergestatemod.ACTION_MERGE:
415 417 f1, f2, fa, move, anc = args
416 418 if not sparsematch(f1):
417 419 temporaryfiles.append(f1)
418 420
419 421 if len(temporaryfiles) > 0:
420 422 repo.ui.status(
421 423 _(
422 424 b'temporarily included %d file(s) in the sparse '
423 425 b'checkout for merging\n'
424 426 )
425 427 % len(temporaryfiles)
426 428 )
427 429 addtemporaryincludes(repo, temporaryfiles)
428 430
429 431 # Add the new files to the working copy so they can be merged, etc
430 432 tmresult = mergemod.mergeresult()
431 433 message = b'temporarily adding to sparse checkout'
432 434 wctxmanifest = repo[None].manifest()
433 435 for file in temporaryfiles:
434 436 if file in wctxmanifest:
435 437 fctx = repo[None][file]
436 438 tmresult.addfile(
437 439 file,
438 440 mergestatemod.ACTION_GET,
439 441 (fctx.flags(), False),
440 442 message,
441 443 )
442 444
443 445 mergemod.applyupdates(
444 446 repo, tmresult, repo[None], repo[b'.'], False, wantfiledata=False
445 447 )
446 448
447 449 dirstate = repo.dirstate
448 450 for file, flags, msg in tmresult.getactions([mergestatemod.ACTION_GET]):
449 451 dirstate.normal(file)
450 452
451 453 profiles = activeconfig(repo)[2]
452 454 changedprofiles = profiles & files
453 455 # If an active profile changed during the update, refresh the checkout.
454 456 # Don't do this during a branch merge, since all incoming changes should
455 457 # have been handled by the temporary includes above.
456 458 if changedprofiles and not branchmerge:
457 459 mf = mctx.manifest()
458 460 for file in mf:
459 461 old = oldsparsematch(file)
460 462 new = sparsematch(file)
461 463 if not old and new:
462 464 flags = mf.flags(file)
463 465 prunedactions[file] = (
464 466 mergestatemod.ACTION_GET,
465 467 (flags, False),
466 468 b'',
467 469 )
468 470 elif old and not new:
469 471 prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'')
470 472
471 473 mresult.setactions(prunedactions)
472 474
473 475
474 476 def refreshwdir(repo, origstatus, origsparsematch, force=False):
475 477 """Refreshes working directory by taking sparse config into account.
476 478
477 479 The old status and sparse matcher is compared against the current sparse
478 480 matcher.
479 481
480 482 Will abort if a file with pending changes is being excluded or included
481 483 unless ``force`` is True.
482 484 """
483 485 # Verify there are no pending changes
484 486 pending = set()
485 487 pending.update(origstatus.modified)
486 488 pending.update(origstatus.added)
487 489 pending.update(origstatus.removed)
488 490 sparsematch = matcher(repo)
489 491 abort = False
490 492
491 493 for f in pending:
492 494 if not sparsematch(f):
493 495 repo.ui.warn(_(b"pending changes to '%s'\n") % f)
494 496 abort = not force
495 497
496 498 if abort:
497 499 raise error.Abort(
498 500 _(b'could not update sparseness due to pending changes')
499 501 )
500 502
501 503 # Calculate merge result
502 504 dirstate = repo.dirstate
503 505 ctx = repo[b'.']
504 506 added = []
505 507 lookup = []
506 508 dropped = []
507 509 mf = ctx.manifest()
508 510 files = set(mf)
509 511 mresult = mergemod.mergeresult()
510 512
511 513 for file in files:
512 514 old = origsparsematch(file)
513 515 new = sparsematch(file)
514 516 # Add files that are newly included, or that don't exist in
515 517 # the dirstate yet.
516 518 if (new and not old) or (old and new and not file in dirstate):
517 519 fl = mf.flags(file)
518 520 if repo.wvfs.exists(file):
519 521 mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'')
520 522 lookup.append(file)
521 523 else:
522 524 mresult.addfile(
523 525 file, mergestatemod.ACTION_GET, (fl, False), b''
524 526 )
525 527 added.append(file)
526 528 # Drop files that are newly excluded, or that still exist in
527 529 # the dirstate.
528 530 elif (old and not new) or (not old and not new and file in dirstate):
529 531 dropped.append(file)
530 532 if file not in pending:
531 533 mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'')
532 534
533 535 # Verify there are no pending changes in newly included files
534 536 abort = False
535 537 for file in lookup:
536 538 repo.ui.warn(_(b"pending changes to '%s'\n") % file)
537 539 abort = not force
538 540 if abort:
539 541 raise error.Abort(
540 542 _(
541 543 b'cannot change sparseness due to pending '
542 544 b'changes (delete the files or use '
543 545 b'--force to bring them back dirty)'
544 546 )
545 547 )
546 548
547 549 # Check for files that were only in the dirstate.
548 550 for file, state in pycompat.iteritems(dirstate):
549 551 if not file in files:
550 552 old = origsparsematch(file)
551 553 new = sparsematch(file)
552 554 if old and not new:
553 555 dropped.append(file)
554 556
555 557 mergemod.applyupdates(
556 558 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
557 559 )
558 560
559 561 # Fix dirstate
560 562 for file in added:
561 563 dirstate.normal(file)
562 564
563 565 for file in dropped:
564 566 dirstate.drop(file)
565 567
566 568 for file in lookup:
567 569 # File exists on disk, and we're bringing it back in an unknown state.
568 570 dirstate.normallookup(file)
569 571
570 572 return added, dropped, lookup
571 573
572 574
573 575 def aftercommit(repo, node):
574 576 """Perform actions after a working directory commit."""
575 577 # This function is called unconditionally, even if sparse isn't
576 578 # enabled.
577 579 ctx = repo[node]
578 580
579 581 profiles = patternsforrev(repo, ctx.rev())[2]
580 582
581 583 # profiles will only have data if sparse is enabled.
582 584 if profiles & set(ctx.files()):
583 585 origstatus = repo.status()
584 586 origsparsematch = matcher(repo)
585 587 refreshwdir(repo, origstatus, origsparsematch, force=True)
586 588
587 589 prunetemporaryincludes(repo)
588 590
589 591
590 592 def _updateconfigandrefreshwdir(
591 593 repo, includes, excludes, profiles, force=False, removing=False
592 594 ):
593 595 """Update the sparse config and working directory state."""
594 596 raw = repo.vfs.tryread(b'sparse')
595 597 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse')
596 598
597 599 oldstatus = repo.status()
598 600 oldmatch = matcher(repo)
599 601 oldrequires = set(repo.requirements)
600 602
601 603 # TODO remove this try..except once the matcher integrates better
602 604 # with dirstate. We currently have to write the updated config
603 605 # because that will invalidate the matcher cache and force a
604 606 # re-read. We ideally want to update the cached matcher on the
605 607 # repo instance then flush the new config to disk once wdir is
606 608 # updated. But this requires massive rework to matcher() and its
607 609 # consumers.
608 610
609 if b'exp-sparse' in oldrequires and removing:
610 repo.requirements.discard(b'exp-sparse')
611 if repository.SPARSE_REQUIREMENT in oldrequires and removing:
612 repo.requirements.discard(repository.SPARSE_REQUIREMENT)
611 613 scmutil.writereporequirements(repo)
612 elif b'exp-sparse' not in oldrequires:
613 repo.requirements.add(b'exp-sparse')
614 elif repository.SPARSE_REQUIREMENT not in oldrequires:
615 repo.requirements.add(repository.SPARSE_REQUIREMENT)
614 616 scmutil.writereporequirements(repo)
615 617
616 618 try:
617 619 writeconfig(repo, includes, excludes, profiles)
618 620 return refreshwdir(repo, oldstatus, oldmatch, force=force)
619 621 except Exception:
620 622 if repo.requirements != oldrequires:
621 623 repo.requirements.clear()
622 624 repo.requirements |= oldrequires
623 625 scmutil.writereporequirements(repo)
624 626 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
625 627 raise
626 628
627 629
628 630 def clearrules(repo, force=False):
629 631 """Clears include/exclude rules from the sparse config.
630 632
631 633 The remaining sparse config only has profiles, if defined. The working
632 634 directory is refreshed, as needed.
633 635 """
634 636 with repo.wlock():
635 637 raw = repo.vfs.tryread(b'sparse')
636 638 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
637 639
638 640 if not includes and not excludes:
639 641 return
640 642
641 643 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
642 644
643 645
644 646 def importfromfiles(repo, opts, paths, force=False):
645 647 """Import sparse config rules from files.
646 648
647 649 The updated sparse config is written out and the working directory
648 650 is refreshed, as needed.
649 651 """
650 652 with repo.wlock():
651 653 # read current configuration
652 654 raw = repo.vfs.tryread(b'sparse')
653 655 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
654 656 aincludes, aexcludes, aprofiles = activeconfig(repo)
655 657
656 658 # Import rules on top; only take in rules that are not yet
657 659 # part of the active rules.
658 660 changed = False
659 661 for p in paths:
660 662 with util.posixfile(util.expandpath(p), mode=b'rb') as fh:
661 663 raw = fh.read()
662 664
663 665 iincludes, iexcludes, iprofiles = parseconfig(
664 666 repo.ui, raw, b'sparse'
665 667 )
666 668 oldsize = len(includes) + len(excludes) + len(profiles)
667 669 includes.update(iincludes - aincludes)
668 670 excludes.update(iexcludes - aexcludes)
669 671 profiles.update(iprofiles - aprofiles)
670 672 if len(includes) + len(excludes) + len(profiles) > oldsize:
671 673 changed = True
672 674
673 675 profilecount = includecount = excludecount = 0
674 676 fcounts = (0, 0, 0)
675 677
676 678 if changed:
677 679 profilecount = len(profiles - aprofiles)
678 680 includecount = len(includes - aincludes)
679 681 excludecount = len(excludes - aexcludes)
680 682
681 683 fcounts = map(
682 684 len,
683 685 _updateconfigandrefreshwdir(
684 686 repo, includes, excludes, profiles, force=force
685 687 ),
686 688 )
687 689
688 690 printchanges(
689 691 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
690 692 )
691 693
692 694
693 695 def updateconfig(
694 696 repo,
695 697 pats,
696 698 opts,
697 699 include=False,
698 700 exclude=False,
699 701 reset=False,
700 702 delete=False,
701 703 enableprofile=False,
702 704 disableprofile=False,
703 705 force=False,
704 706 usereporootpaths=False,
705 707 ):
706 708 """Perform a sparse config update.
707 709
708 710 Only one of the actions may be performed.
709 711
710 712 The new config is written out and a working directory refresh is performed.
711 713 """
712 714 with repo.wlock():
713 715 raw = repo.vfs.tryread(b'sparse')
714 716 oldinclude, oldexclude, oldprofiles = parseconfig(
715 717 repo.ui, raw, b'sparse'
716 718 )
717 719
718 720 if reset:
719 721 newinclude = set()
720 722 newexclude = set()
721 723 newprofiles = set()
722 724 else:
723 725 newinclude = set(oldinclude)
724 726 newexclude = set(oldexclude)
725 727 newprofiles = set(oldprofiles)
726 728
727 729 if any(os.path.isabs(pat) for pat in pats):
728 730 raise error.Abort(_(b'paths cannot be absolute'))
729 731
730 732 if not usereporootpaths:
731 733 # let's treat paths as relative to cwd
732 734 root, cwd = repo.root, repo.getcwd()
733 735 abspats = []
734 736 for kindpat in pats:
735 737 kind, pat = matchmod._patsplit(kindpat, None)
736 738 if kind in matchmod.cwdrelativepatternkinds or kind is None:
737 739 ap = (kind + b':' if kind else b'') + pathutil.canonpath(
738 740 root, cwd, pat
739 741 )
740 742 abspats.append(ap)
741 743 else:
742 744 abspats.append(kindpat)
743 745 pats = abspats
744 746
745 747 if include:
746 748 newinclude.update(pats)
747 749 elif exclude:
748 750 newexclude.update(pats)
749 751 elif enableprofile:
750 752 newprofiles.update(pats)
751 753 elif disableprofile:
752 754 newprofiles.difference_update(pats)
753 755 elif delete:
754 756 newinclude.difference_update(pats)
755 757 newexclude.difference_update(pats)
756 758
757 759 profilecount = len(newprofiles - oldprofiles) - len(
758 760 oldprofiles - newprofiles
759 761 )
760 762 includecount = len(newinclude - oldinclude) - len(
761 763 oldinclude - newinclude
762 764 )
763 765 excludecount = len(newexclude - oldexclude) - len(
764 766 oldexclude - newexclude
765 767 )
766 768
767 769 fcounts = map(
768 770 len,
769 771 _updateconfigandrefreshwdir(
770 772 repo,
771 773 newinclude,
772 774 newexclude,
773 775 newprofiles,
774 776 force=force,
775 777 removing=reset,
776 778 ),
777 779 )
778 780
779 781 printchanges(
780 782 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
781 783 )
782 784
783 785
784 786 def printchanges(
785 787 ui,
786 788 opts,
787 789 profilecount=0,
788 790 includecount=0,
789 791 excludecount=0,
790 792 added=0,
791 793 dropped=0,
792 794 conflicting=0,
793 795 ):
794 796 """Print output summarizing sparse config changes."""
795 797 with ui.formatter(b'sparse', opts) as fm:
796 798 fm.startitem()
797 799 fm.condwrite(
798 800 ui.verbose,
799 801 b'profiles_added',
800 802 _(b'Profiles changed: %d\n'),
801 803 profilecount,
802 804 )
803 805 fm.condwrite(
804 806 ui.verbose,
805 807 b'include_rules_added',
806 808 _(b'Include rules changed: %d\n'),
807 809 includecount,
808 810 )
809 811 fm.condwrite(
810 812 ui.verbose,
811 813 b'exclude_rules_added',
812 814 _(b'Exclude rules changed: %d\n'),
813 815 excludecount,
814 816 )
815 817
816 818 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
817 819 # files are added or removed outside of the templating formatter
818 820 # framework. No point in repeating ourselves in that case.
819 821 if not fm.isplain():
820 822 fm.condwrite(
821 823 ui.verbose, b'files_added', _(b'Files added: %d\n'), added
822 824 )
823 825 fm.condwrite(
824 826 ui.verbose, b'files_dropped', _(b'Files dropped: %d\n'), dropped
825 827 )
826 828 fm.condwrite(
827 829 ui.verbose,
828 830 b'files_conflicting',
829 831 _(b'Files conflicting: %d\n'),
830 832 conflicting,
831 833 )
General Comments 0
You need to be logged in to leave comments. Login now