##// END OF EJS Templates
errors: make StorageError subclass Error, attaching an exit code to it...
Martin von Zweigbergk -
r48460:dd339191 default
parent child Browse files
Show More
@@ -1,685 +1,688 b''
1 1 # error.py - Mercurial exceptions
2 2 #
3 3 # Copyright 2005-2008 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Mercurial exceptions.
9 9
10 10 This allows us to catch exceptions at higher levels without forcing
11 11 imports.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import difflib
17 17
18 18 # Do not import anything but pycompat here, please
19 19 from . import pycompat
20 20
21 21 if pycompat.TYPE_CHECKING:
22 22 from typing import (
23 23 Any,
24 24 AnyStr,
25 25 Iterable,
26 26 List,
27 27 Optional,
28 28 Sequence,
29 29 Union,
30 30 )
31 31
32 32
33 33 def _tobytes(exc):
34 34 """Byte-stringify exception in the same way as BaseException_str()"""
35 35 if not exc.args:
36 36 return b''
37 37 if len(exc.args) == 1:
38 38 return pycompat.bytestr(exc.args[0])
39 39 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
40 40
41 41
42 42 class Hint(object):
43 43 """Mix-in to provide a hint of an error
44 44
45 45 This should come first in the inheritance list to consume a hint and
46 46 pass remaining arguments to the exception class.
47 47 """
48 48
49 49 def __init__(self, *args, **kw):
50 50 self.hint = kw.pop('hint', None)
51 51 super(Hint, self).__init__(*args, **kw)
52 52
53 53
54 54 class Error(Hint, Exception):
55 55 """Base class for Mercurial errors."""
56 56
57 57 def __init__(
58 58 self, message, hint=None, coarse_exit_code=None, detailed_exit_code=None
59 59 ):
60 60 # type: (bytes, Optional[bytes]) -> None
61 61 self.message = message
62 62 self.hint = hint
63 63 self.coarse_exit_code = coarse_exit_code
64 64 self.detailed_exit_code = detailed_exit_code
65 65 # Pass the message into the Exception constructor to help extensions
66 66 # that look for exc.args[0].
67 67 Exception.__init__(self, message)
68 68
69 69 def __bytes__(self):
70 70 return self.message
71 71
72 72 if pycompat.ispy3:
73 73
74 74 def __str__(self):
75 75 # the output would be unreadable if the message was translated,
76 76 # but do not replace it with encoding.strfromlocal(), which
77 77 # may raise another exception.
78 78 return pycompat.sysstr(self.__bytes__())
79 79
80 80 def format(self):
81 81 # type: () -> bytes
82 82 from .i18n import _
83 83
84 84 message = _(b"abort: %s\n") % self.message
85 85 if self.hint:
86 86 message += _(b"(%s)\n") % self.hint
87 87 return message
88 88
89 89
90 90 class Abort(Error):
91 91 """Raised if a command needs to print an error and exit."""
92 92
93 93
94 class StorageError(Hint, Exception):
94 class StorageError(Error):
95 95 """Raised when an error occurs in a storage layer.
96 96
97 97 Usually subclassed by a storage-specific exception.
98 98 """
99 99
100 __bytes__ = _tobytes
100 def __init__(self, message, hint=None):
101 super(StorageError, self).__init__(
102 message, hint=hint, detailed_exit_code=50
103 )
101 104
102 105
103 106 class RevlogError(StorageError):
104 107 pass
105 108
106 109
107 110 class SidedataHashError(RevlogError):
108 111 def __init__(self, key, expected, got):
109 112 self.hint = None
110 113 self.sidedatakey = key
111 114 self.expecteddigest = expected
112 115 self.actualdigest = got
113 116
114 117
115 118 class FilteredIndexError(IndexError):
116 119 __bytes__ = _tobytes
117 120
118 121
119 122 class LookupError(RevlogError, KeyError):
120 123 def __init__(self, name, index, message):
121 124 self.name = name
122 125 self.index = index
123 126 # this can't be called 'message' because at least some installs of
124 127 # Python 2.6+ complain about the 'message' property being deprecated
125 128 self.lookupmessage = message
126 129 if isinstance(name, bytes) and len(name) == 20:
127 130 from .node import hex
128 131
129 132 name = hex(name)
130 133 # if name is a binary node, it can be None
131 134 RevlogError.__init__(
132 135 self, b'%s@%s: %s' % (index, pycompat.bytestr(name), message)
133 136 )
134 137
135 138 def __bytes__(self):
136 139 return RevlogError.__bytes__(self)
137 140
138 141 def __str__(self):
139 142 return RevlogError.__str__(self)
140 143
141 144
142 145 class AmbiguousPrefixLookupError(LookupError):
143 146 pass
144 147
145 148
146 149 class FilteredLookupError(LookupError):
147 150 pass
148 151
149 152
150 153 class ManifestLookupError(LookupError):
151 154 pass
152 155
153 156
154 157 class CommandError(Exception):
155 158 """Exception raised on errors in parsing the command line."""
156 159
157 160 def __init__(self, command, message):
158 161 # type: (bytes, bytes) -> None
159 162 self.command = command
160 163 self.message = message
161 164 super(CommandError, self).__init__()
162 165
163 166 __bytes__ = _tobytes
164 167
165 168
166 169 class UnknownCommand(Exception):
167 170 """Exception raised if command is not in the command table."""
168 171
169 172 def __init__(self, command, all_commands=None):
170 173 # type: (bytes, Optional[List[bytes]]) -> None
171 174 self.command = command
172 175 self.all_commands = all_commands
173 176 super(UnknownCommand, self).__init__()
174 177
175 178 __bytes__ = _tobytes
176 179
177 180
178 181 class AmbiguousCommand(Exception):
179 182 """Exception raised if command shortcut matches more than one command."""
180 183
181 184 def __init__(self, prefix, matches):
182 185 # type: (bytes, List[bytes]) -> None
183 186 self.prefix = prefix
184 187 self.matches = matches
185 188 super(AmbiguousCommand, self).__init__()
186 189
187 190 __bytes__ = _tobytes
188 191
189 192
190 193 class WorkerError(Exception):
191 194 """Exception raised when a worker process dies."""
192 195
193 196 def __init__(self, status_code):
194 197 # type: (int) -> None
195 198 self.status_code = status_code
196 199 # Pass status code to superclass just so it becomes part of __bytes__
197 200 super(WorkerError, self).__init__(status_code)
198 201
199 202 __bytes__ = _tobytes
200 203
201 204
202 205 class InterventionRequired(Abort):
203 206 """Exception raised when a command requires human intervention."""
204 207
205 208 def __init__(self, message, hint=None):
206 209 super(InterventionRequired, self).__init__(
207 210 message, hint=hint, coarse_exit_code=1, detailed_exit_code=240
208 211 )
209 212
210 213 def format(self):
211 214 # type: () -> bytes
212 215 from .i18n import _
213 216
214 217 message = _(b"%s\n") % self.message
215 218 if self.hint:
216 219 message += _(b"(%s)\n") % self.hint
217 220 return message
218 221
219 222
220 223 class ConflictResolutionRequired(InterventionRequired):
221 224 """Exception raised when a continuable command required merge conflict resolution."""
222 225
223 226 def __init__(self, opname):
224 227 # type: (bytes) -> None
225 228 from .i18n import _
226 229
227 230 self.opname = opname
228 231 InterventionRequired.__init__(
229 232 self,
230 233 _(
231 234 b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
232 235 )
233 236 % opname,
234 237 )
235 238
236 239
237 240 class InputError(Abort):
238 241 """Indicates that the user made an error in their input.
239 242
240 243 Examples: Invalid command, invalid flags, invalid revision.
241 244 """
242 245
243 246 def __init__(self, message, hint=None):
244 247 super(InputError, self).__init__(
245 248 message, hint=hint, detailed_exit_code=10
246 249 )
247 250
248 251
249 252 class StateError(Abort):
250 253 """Indicates that the operation might work if retried in a different state.
251 254
252 255 Examples: Unresolved merge conflicts, unfinished operations.
253 256 """
254 257
255 258 def __init__(self, message, hint=None):
256 259 super(StateError, self).__init__(
257 260 message, hint=hint, detailed_exit_code=20
258 261 )
259 262
260 263
261 264 class CanceledError(Abort):
262 265 """Indicates that the user canceled the operation.
263 266
264 267 Examples: Close commit editor with error status, quit chistedit.
265 268 """
266 269
267 270 def __init__(self, message, hint=None):
268 271 super(CanceledError, self).__init__(
269 272 message, hint=hint, detailed_exit_code=250
270 273 )
271 274
272 275
273 276 class SecurityError(Abort):
274 277 """Indicates that some aspect of security failed.
275 278
276 279 Examples: Bad server credentials, expired local credentials for network
277 280 filesystem, mismatched GPG signature, DoS protection.
278 281 """
279 282
280 283 def __init__(self, message, hint=None):
281 284 super(SecurityError, self).__init__(
282 285 message, hint=hint, detailed_exit_code=150
283 286 )
284 287
285 288
286 289 class HookLoadError(Abort):
287 290 """raised when loading a hook fails, aborting an operation
288 291
289 292 Exists to allow more specialized catching."""
290 293
291 294
292 295 class HookAbort(Abort):
293 296 """raised when a validation hook fails, aborting an operation
294 297
295 298 Exists to allow more specialized catching."""
296 299
297 300 def __init__(self, message, hint=None):
298 301 super(HookAbort, self).__init__(
299 302 message, hint=hint, detailed_exit_code=40
300 303 )
301 304
302 305
303 306 class ConfigError(Abort):
304 307 """Exception raised when parsing config files"""
305 308
306 309 def __init__(self, message, location=None, hint=None):
307 310 # type: (bytes, Optional[bytes], Optional[bytes]) -> None
308 311 super(ConfigError, self).__init__(
309 312 message, hint=hint, detailed_exit_code=30
310 313 )
311 314 self.location = location
312 315
313 316 def format(self):
314 317 # type: () -> bytes
315 318 from .i18n import _
316 319
317 320 if self.location is not None:
318 321 message = _(b"config error at %s: %s\n") % (
319 322 pycompat.bytestr(self.location),
320 323 self.message,
321 324 )
322 325 else:
323 326 message = _(b"config error: %s\n") % self.message
324 327 if self.hint:
325 328 message += _(b"(%s)\n") % self.hint
326 329 return message
327 330
328 331
329 332 class UpdateAbort(Abort):
330 333 """Raised when an update is aborted for destination issue"""
331 334
332 335
333 336 class MergeDestAbort(Abort):
334 337 """Raised when an update is aborted for destination issues"""
335 338
336 339
337 340 class NoMergeDestAbort(MergeDestAbort):
338 341 """Raised when an update is aborted because there is nothing to merge"""
339 342
340 343
341 344 class ManyMergeDestAbort(MergeDestAbort):
342 345 """Raised when an update is aborted because destination is ambiguous"""
343 346
344 347
345 348 class ResponseExpected(Abort):
346 349 """Raised when an EOF is received for a prompt"""
347 350
348 351 def __init__(self):
349 352 from .i18n import _
350 353
351 354 Abort.__init__(self, _(b'response expected'))
352 355
353 356
354 357 class RemoteError(Abort):
355 358 """Exception raised when interacting with a remote repo fails"""
356 359
357 360 def __init__(self, message, hint=None):
358 361 super(RemoteError, self).__init__(
359 362 message, hint=hint, detailed_exit_code=100
360 363 )
361 364
362 365
363 366 class OutOfBandError(RemoteError):
364 367 """Exception raised when a remote repo reports failure"""
365 368
366 369 def __init__(self, message=None, hint=None):
367 370 from .i18n import _
368 371
369 372 if message:
370 373 # Abort.format() adds a trailing newline
371 374 message = _(b"remote error:\n%s") % message.rstrip(b'\n')
372 375 else:
373 376 message = _(b"remote error")
374 377 super(OutOfBandError, self).__init__(message, hint=hint)
375 378
376 379
377 380 class ParseError(Abort):
378 381 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
379 382
380 383 def __init__(self, message, location=None, hint=None):
381 384 # type: (bytes, Optional[Union[bytes, int]], Optional[bytes]) -> None
382 385 super(ParseError, self).__init__(
383 386 message, hint=hint, detailed_exit_code=10
384 387 )
385 388 self.location = location
386 389
387 390 def format(self):
388 391 # type: () -> bytes
389 392 from .i18n import _
390 393
391 394 if self.location is not None:
392 395 message = _(b"hg: parse error at %s: %s\n") % (
393 396 pycompat.bytestr(self.location),
394 397 self.message,
395 398 )
396 399 else:
397 400 message = _(b"hg: parse error: %s\n") % self.message
398 401 if self.hint:
399 402 message += _(b"(%s)\n") % self.hint
400 403 return message
401 404
402 405
403 406 class PatchError(Exception):
404 407 __bytes__ = _tobytes
405 408
406 409
407 410 def getsimilar(symbols, value):
408 411 # type: (Iterable[bytes], bytes) -> List[bytes]
409 412 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
410 413 # The cutoff for similarity here is pretty arbitrary. It should
411 414 # probably be investigated and tweaked.
412 415 return [s for s in symbols if sim(s) > 0.6]
413 416
414 417
415 418 def similarity_hint(similar):
416 419 # type: (List[bytes]) -> Optional[bytes]
417 420 from .i18n import _
418 421
419 422 if len(similar) == 1:
420 423 return _(b"did you mean %s?") % similar[0]
421 424 elif similar:
422 425 ss = b", ".join(sorted(similar))
423 426 return _(b"did you mean one of %s?") % ss
424 427 else:
425 428 return None
426 429
427 430
428 431 class UnknownIdentifier(ParseError):
429 432 """Exception raised when a {rev,file}set references an unknown identifier"""
430 433
431 434 def __init__(self, function, symbols):
432 435 # type: (bytes, Iterable[bytes]) -> None
433 436 from .i18n import _
434 437
435 438 similar = getsimilar(symbols, function)
436 439 hint = similarity_hint(similar)
437 440
438 441 ParseError.__init__(
439 442 self, _(b"unknown identifier: %s") % function, hint=hint
440 443 )
441 444
442 445
443 446 class RepoError(Hint, Exception):
444 447 __bytes__ = _tobytes
445 448
446 449
447 450 class RepoLookupError(RepoError):
448 451 pass
449 452
450 453
451 454 class FilteredRepoLookupError(RepoLookupError):
452 455 pass
453 456
454 457
455 458 class CapabilityError(RepoError):
456 459 pass
457 460
458 461
459 462 class RequirementError(RepoError):
460 463 """Exception raised if .hg/requires has an unknown entry."""
461 464
462 465
463 466 class StdioError(IOError):
464 467 """Raised if I/O to stdout or stderr fails"""
465 468
466 469 def __init__(self, err):
467 470 # type: (IOError) -> None
468 471 IOError.__init__(self, err.errno, err.strerror)
469 472
470 473 # no __bytes__() because error message is derived from the standard IOError
471 474
472 475
473 476 class UnsupportedMergeRecords(Abort):
474 477 def __init__(self, recordtypes):
475 478 # type: (Iterable[bytes]) -> None
476 479 from .i18n import _
477 480
478 481 self.recordtypes = sorted(recordtypes)
479 482 s = b' '.join(self.recordtypes)
480 483 Abort.__init__(
481 484 self,
482 485 _(b'unsupported merge state records: %s') % s,
483 486 hint=_(
484 487 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
485 488 b'more information'
486 489 ),
487 490 )
488 491
489 492
490 493 class UnknownVersion(Abort):
491 494 """generic exception for aborting from an encounter with an unknown version"""
492 495
493 496 def __init__(self, msg, hint=None, version=None):
494 497 # type: (bytes, Optional[bytes], Optional[bytes]) -> None
495 498 self.version = version
496 499 super(UnknownVersion, self).__init__(msg, hint=hint)
497 500
498 501
499 502 class LockError(IOError):
500 503 def __init__(self, errno, strerror, filename, desc):
501 504 # TODO: figure out if this should be bytes or str
502 505 # _type: (int, str, str, bytes) -> None
503 506 IOError.__init__(self, errno, strerror, filename)
504 507 self.desc = desc
505 508
506 509 # no __bytes__() because error message is derived from the standard IOError
507 510
508 511
509 512 class LockHeld(LockError):
510 513 def __init__(self, errno, filename, desc, locker):
511 514 LockError.__init__(self, errno, b'Lock held', filename, desc)
512 515 self.locker = locker
513 516
514 517
515 518 class LockUnavailable(LockError):
516 519 pass
517 520
518 521
519 522 # LockError is for errors while acquiring the lock -- this is unrelated
520 523 class LockInheritanceContractViolation(RuntimeError):
521 524 __bytes__ = _tobytes
522 525
523 526
524 527 class ResponseError(Exception):
525 528 """Raised to print an error with part of output and exit."""
526 529
527 530 __bytes__ = _tobytes
528 531
529 532
530 533 # derived from KeyboardInterrupt to simplify some breakout code
531 534 class SignalInterrupt(KeyboardInterrupt):
532 535 """Exception raised on SIGTERM and SIGHUP."""
533 536
534 537
535 538 class SignatureError(Exception):
536 539 __bytes__ = _tobytes
537 540
538 541
539 542 class PushRaced(RuntimeError):
540 543 """An exception raised during unbundling that indicate a push race"""
541 544
542 545 __bytes__ = _tobytes
543 546
544 547
545 548 class ProgrammingError(Hint, RuntimeError):
546 549 """Raised if a mercurial (core or extension) developer made a mistake"""
547 550
548 551 def __init__(self, msg, *args, **kwargs):
549 552 # type: (AnyStr, Any, Any) -> None
550 553 # On Python 3, turn the message back into a string since this is
551 554 # an internal-only error that won't be printed except in a
552 555 # stack traces.
553 556 msg = pycompat.sysstr(msg)
554 557 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
555 558
556 559 __bytes__ = _tobytes
557 560
558 561
559 562 class WdirUnsupported(Exception):
560 563 """An exception which is raised when 'wdir()' is not supported"""
561 564
562 565 __bytes__ = _tobytes
563 566
564 567
565 568 # bundle2 related errors
566 569 class BundleValueError(ValueError):
567 570 """error raised when bundle2 cannot be processed"""
568 571
569 572 __bytes__ = _tobytes
570 573
571 574
572 575 class BundleUnknownFeatureError(BundleValueError):
573 576 def __init__(self, parttype=None, params=(), values=()):
574 577 self.parttype = parttype
575 578 self.params = params
576 579 self.values = values
577 580 if self.parttype is None:
578 581 msg = b'Stream Parameter'
579 582 else:
580 583 msg = parttype
581 584 entries = self.params
582 585 if self.params and self.values:
583 586 assert len(self.params) == len(self.values)
584 587 entries = []
585 588 for idx, par in enumerate(self.params):
586 589 val = self.values[idx]
587 590 if val is None:
588 591 entries.append(val)
589 592 else:
590 593 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
591 594 if entries:
592 595 msg = b'%s - %s' % (msg, b', '.join(entries))
593 596 ValueError.__init__(self, msg) # TODO: convert to str?
594 597
595 598
596 599 class ReadOnlyPartError(RuntimeError):
597 600 """error raised when code tries to alter a part being generated"""
598 601
599 602 __bytes__ = _tobytes
600 603
601 604
602 605 class PushkeyFailed(Abort):
603 606 """error raised when a pushkey part failed to update a value"""
604 607
605 608 def __init__(
606 609 self, partid, namespace=None, key=None, new=None, old=None, ret=None
607 610 ):
608 611 self.partid = partid
609 612 self.namespace = namespace
610 613 self.key = key
611 614 self.new = new
612 615 self.old = old
613 616 self.ret = ret
614 617 # no i18n expected to be processed into a better message
615 618 Abort.__init__(
616 619 self, b'failed to update value for "%s/%s"' % (namespace, key)
617 620 )
618 621
619 622
620 623 class CensoredNodeError(StorageError):
621 624 """error raised when content verification fails on a censored node
622 625
623 626 Also contains the tombstone data substituted for the uncensored data.
624 627 """
625 628
626 629 def __init__(self, filename, node, tombstone):
627 630 # type: (bytes, bytes, bytes) -> None
628 631 from .node import short
629 632
630 633 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
631 634 self.tombstone = tombstone
632 635
633 636
634 637 class CensoredBaseError(StorageError):
635 638 """error raised when a delta is rejected because its base is censored
636 639
637 640 A delta based on a censored revision must be formed as single patch
638 641 operation which replaces the entire base with new content. This ensures
639 642 the delta may be applied by clones which have not censored the base.
640 643 """
641 644
642 645
643 646 class InvalidBundleSpecification(Exception):
644 647 """error raised when a bundle specification is invalid.
645 648
646 649 This is used for syntax errors as opposed to support errors.
647 650 """
648 651
649 652 __bytes__ = _tobytes
650 653
651 654
652 655 class UnsupportedBundleSpecification(Exception):
653 656 """error raised when a bundle specification is not supported."""
654 657
655 658 __bytes__ = _tobytes
656 659
657 660
658 661 class CorruptedState(Exception):
659 662 """error raised when a command is not able to read its state from file"""
660 663
661 664 __bytes__ = _tobytes
662 665
663 666
664 667 class PeerTransportError(Abort):
665 668 """Transport-level I/O error when communicating with a peer repo."""
666 669
667 670
668 671 class InMemoryMergeConflictsError(Exception):
669 672 """Exception raised when merge conflicts arose during an in-memory merge."""
670 673
671 674 __bytes__ = _tobytes
672 675
673 676
674 677 class WireprotoCommandError(Exception):
675 678 """Represents an error during execution of a wire protocol command.
676 679
677 680 Should only be thrown by wire protocol version 2 commands.
678 681
679 682 The error is a formatter string and an optional iterable of arguments.
680 683 """
681 684
682 685 def __init__(self, message, args=None):
683 686 # type: (bytes, Optional[Sequence[bytes]]) -> None
684 687 self.message = message
685 688 self.messageargs = args
@@ -1,2304 +1,2299 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullrev,
23 23 short,
24 24 wdirrev,
25 25 )
26 26 from .pycompat import getattr
27 27 from .thirdparty import attr
28 28 from . import (
29 29 copies as copiesmod,
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 policy,
38 38 pycompat,
39 39 requirements as requirementsmod,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 hashutil,
50 50 procutil,
51 51 stringutil,
52 52 )
53 53
54 54 if pycompat.iswindows:
55 55 from . import scmwindows as scmplatform
56 56 else:
57 57 from . import scmposix as scmplatform
58 58
59 59 parsers = policy.importmod('parsers')
60 60 rustrevlog = policy.importrust('revlog')
61 61
62 62 termsize = scmplatform.termsize
63 63
64 64
65 65 @attr.s(slots=True, repr=False)
66 66 class status(object):
67 67 """Struct with a list of files per status.
68 68
69 69 The 'deleted', 'unknown' and 'ignored' properties are only
70 70 relevant to the working copy.
71 71 """
72 72
73 73 modified = attr.ib(default=attr.Factory(list))
74 74 added = attr.ib(default=attr.Factory(list))
75 75 removed = attr.ib(default=attr.Factory(list))
76 76 deleted = attr.ib(default=attr.Factory(list))
77 77 unknown = attr.ib(default=attr.Factory(list))
78 78 ignored = attr.ib(default=attr.Factory(list))
79 79 clean = attr.ib(default=attr.Factory(list))
80 80
81 81 def __iter__(self):
82 82 yield self.modified
83 83 yield self.added
84 84 yield self.removed
85 85 yield self.deleted
86 86 yield self.unknown
87 87 yield self.ignored
88 88 yield self.clean
89 89
90 90 def __repr__(self):
91 91 return (
92 92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 93 r'unknown=%s, ignored=%s, clean=%s>'
94 94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95 95
96 96
97 97 def itersubrepos(ctx1, ctx2):
98 98 """find subrepos in ctx1 or ctx2"""
99 99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104 104
105 105 missing = set()
106 106
107 107 for subpath in ctx2.substate:
108 108 if subpath not in ctx1.substate:
109 109 del subpaths[subpath]
110 110 missing.add(subpath)
111 111
112 112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
113 113 yield subpath, ctx.sub(subpath)
114 114
115 115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 116 # status and diff will have an accurate result when it does
117 117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 118 # against itself.
119 119 for subpath in missing:
120 120 yield subpath, ctx2.nullsub(subpath, ctx1)
121 121
122 122
123 123 def nochangesfound(ui, repo, excluded=None):
124 124 """Report no changes for push/pull, excluded is None or a list of
125 125 nodes excluded from the push/pull.
126 126 """
127 127 secretlist = []
128 128 if excluded:
129 129 for n in excluded:
130 130 ctx = repo[n]
131 131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 132 secretlist.append(n)
133 133
134 134 if secretlist:
135 135 ui.status(
136 136 _(b"no changes found (ignored %d secret changesets)\n")
137 137 % len(secretlist)
138 138 )
139 139 else:
140 140 ui.status(_(b"no changes found\n"))
141 141
142 142
143 143 def callcatch(ui, func):
144 144 """call func() with global exception handling
145 145
146 146 return func() if no exception happens. otherwise do some error handling
147 147 and return an exit code accordingly. does not handle all exceptions.
148 148 """
149 149 coarse_exit_code = -1
150 150 detailed_exit_code = -1
151 151 try:
152 152 try:
153 153 return func()
154 154 except: # re-raises
155 155 ui.traceback()
156 156 raise
157 157 # Global exception handling, alphabetically
158 158 # Mercurial-specific first, followed by built-in and library exceptions
159 159 except error.LockHeld as inst:
160 160 detailed_exit_code = 20
161 161 if inst.errno == errno.ETIMEDOUT:
162 162 reason = _(b'timed out waiting for lock held by %r') % (
163 163 pycompat.bytestr(inst.locker)
164 164 )
165 165 else:
166 166 reason = _(b'lock held by %r') % inst.locker
167 167 ui.error(
168 168 _(b"abort: %s: %s\n")
169 169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
170 170 )
171 171 if not inst.locker:
172 172 ui.error(_(b"(lock might be very busy)\n"))
173 173 except error.LockUnavailable as inst:
174 174 detailed_exit_code = 20
175 175 ui.error(
176 176 _(b"abort: could not lock %s: %s\n")
177 177 % (
178 178 inst.desc or stringutil.forcebytestr(inst.filename),
179 179 encoding.strtolocal(inst.strerror),
180 180 )
181 181 )
182 182 except error.RepoError as inst:
183 183 ui.error(_(b"abort: %s\n") % inst)
184 184 if inst.hint:
185 185 ui.error(_(b"(%s)\n") % inst.hint)
186 186 except error.ResponseError as inst:
187 187 ui.error(_(b"abort: %s") % inst.args[0])
188 188 msg = inst.args[1]
189 189 if isinstance(msg, type(u'')):
190 190 msg = pycompat.sysbytes(msg)
191 191 if msg is None:
192 192 ui.error(b"\n")
193 193 elif not isinstance(msg, bytes):
194 194 ui.error(b" %r\n" % (msg,))
195 195 elif not msg:
196 196 ui.error(_(b" empty string\n"))
197 197 else:
198 198 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
199 199 except error.CensoredNodeError as inst:
200 200 ui.error(_(b"abort: file censored %s\n") % inst)
201 except error.StorageError as inst:
202 ui.error(_(b"abort: %s\n") % inst)
203 if inst.hint:
204 ui.error(_(b"(%s)\n") % inst.hint)
205 detailed_exit_code = 50
206 201 except error.WdirUnsupported:
207 202 ui.error(_(b"abort: working directory revision cannot be specified\n"))
208 203 except error.Error as inst:
209 204 if inst.detailed_exit_code is not None:
210 205 detailed_exit_code = inst.detailed_exit_code
211 206 if inst.coarse_exit_code is not None:
212 207 coarse_exit_code = inst.coarse_exit_code
213 208 ui.error(inst.format())
214 209 except error.WorkerError as inst:
215 210 # Don't print a message -- the worker already should have
216 211 return inst.status_code
217 212 except ImportError as inst:
218 213 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
219 214 m = stringutil.forcebytestr(inst).split()[-1]
220 215 if m in b"mpatch bdiff".split():
221 216 ui.error(_(b"(did you forget to compile extensions?)\n"))
222 217 elif m in b"zlib".split():
223 218 ui.error(_(b"(is your Python install correct?)\n"))
224 219 except util.urlerr.httperror as inst:
225 220 detailed_exit_code = 100
226 221 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
227 222 except util.urlerr.urlerror as inst:
228 223 detailed_exit_code = 100
229 224 try: # usually it is in the form (errno, strerror)
230 225 reason = inst.reason.args[1]
231 226 except (AttributeError, IndexError):
232 227 # it might be anything, for example a string
233 228 reason = inst.reason
234 229 if isinstance(reason, pycompat.unicode):
235 230 # SSLError of Python 2.7.9 contains a unicode
236 231 reason = encoding.unitolocal(reason)
237 232 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
238 233 except (IOError, OSError) as inst:
239 234 if (
240 235 util.safehasattr(inst, b"args")
241 236 and inst.args
242 237 and inst.args[0] == errno.EPIPE
243 238 ):
244 239 pass
245 240 elif getattr(inst, "strerror", None): # common IOError or OSError
246 241 if getattr(inst, "filename", None) is not None:
247 242 ui.error(
248 243 _(b"abort: %s: '%s'\n")
249 244 % (
250 245 encoding.strtolocal(inst.strerror),
251 246 stringutil.forcebytestr(inst.filename),
252 247 )
253 248 )
254 249 else:
255 250 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
256 251 else: # suspicious IOError
257 252 raise
258 253 except MemoryError:
259 254 ui.error(_(b"abort: out of memory\n"))
260 255 except SystemExit as inst:
261 256 # Commands shouldn't sys.exit directly, but give a return code.
262 257 # Just in case catch this and and pass exit code to caller.
263 258 detailed_exit_code = 254
264 259 coarse_exit_code = inst.code
265 260
266 261 if ui.configbool(b'ui', b'detailed-exit-code'):
267 262 return detailed_exit_code
268 263 else:
269 264 return coarse_exit_code
270 265
271 266
272 267 def checknewlabel(repo, lbl, kind):
273 268 # Do not use the "kind" parameter in ui output.
274 269 # It makes strings difficult to translate.
275 270 if lbl in [b'tip', b'.', b'null']:
276 271 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
277 272 for c in (b':', b'\0', b'\n', b'\r'):
278 273 if c in lbl:
279 274 raise error.InputError(
280 275 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
281 276 )
282 277 try:
283 278 int(lbl)
284 279 raise error.InputError(_(b"cannot use an integer as a name"))
285 280 except ValueError:
286 281 pass
287 282 if lbl.strip() != lbl:
288 283 raise error.InputError(
289 284 _(b"leading or trailing whitespace in name %r") % lbl
290 285 )
291 286
292 287
293 288 def checkfilename(f):
294 289 '''Check that the filename f is an acceptable filename for a tracked file'''
295 290 if b'\r' in f or b'\n' in f:
296 291 raise error.InputError(
297 292 _(b"'\\n' and '\\r' disallowed in filenames: %r")
298 293 % pycompat.bytestr(f)
299 294 )
300 295
301 296
302 297 def checkportable(ui, f):
303 298 '''Check if filename f is portable and warn or abort depending on config'''
304 299 checkfilename(f)
305 300 abort, warn = checkportabilityalert(ui)
306 301 if abort or warn:
307 302 msg = util.checkwinfilename(f)
308 303 if msg:
309 304 msg = b"%s: %s" % (msg, procutil.shellquote(f))
310 305 if abort:
311 306 raise error.InputError(msg)
312 307 ui.warn(_(b"warning: %s\n") % msg)
313 308
314 309
315 310 def checkportabilityalert(ui):
316 311 """check if the user's config requests nothing, a warning, or abort for
317 312 non-portable filenames"""
318 313 val = ui.config(b'ui', b'portablefilenames')
319 314 lval = val.lower()
320 315 bval = stringutil.parsebool(val)
321 316 abort = pycompat.iswindows or lval == b'abort'
322 317 warn = bval or lval == b'warn'
323 318 if bval is None and not (warn or abort or lval == b'ignore'):
324 319 raise error.ConfigError(
325 320 _(b"ui.portablefilenames value is invalid ('%s')") % val
326 321 )
327 322 return abort, warn
328 323
329 324
330 325 class casecollisionauditor(object):
331 326 def __init__(self, ui, abort, dirstate):
332 327 self._ui = ui
333 328 self._abort = abort
334 329 allfiles = b'\0'.join(dirstate)
335 330 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
336 331 self._dirstate = dirstate
337 332 # The purpose of _newfiles is so that we don't complain about
338 333 # case collisions if someone were to call this object with the
339 334 # same filename twice.
340 335 self._newfiles = set()
341 336
342 337 def __call__(self, f):
343 338 if f in self._newfiles:
344 339 return
345 340 fl = encoding.lower(f)
346 341 if fl in self._loweredfiles and f not in self._dirstate:
347 342 msg = _(b'possible case-folding collision for %s') % f
348 343 if self._abort:
349 344 raise error.Abort(msg)
350 345 self._ui.warn(_(b"warning: %s\n") % msg)
351 346 self._loweredfiles.add(fl)
352 347 self._newfiles.add(f)
353 348
354 349
355 350 def filteredhash(repo, maxrev):
356 351 """build hash of filtered revisions in the current repoview.
357 352
358 353 Multiple caches perform up-to-date validation by checking that the
359 354 tiprev and tipnode stored in the cache file match the current repository.
360 355 However, this is not sufficient for validating repoviews because the set
361 356 of revisions in the view may change without the repository tiprev and
362 357 tipnode changing.
363 358
364 359 This function hashes all the revs filtered from the view and returns
365 360 that SHA-1 digest.
366 361 """
367 362 cl = repo.changelog
368 363 if not cl.filteredrevs:
369 364 return None
370 365 key = cl._filteredrevs_hashcache.get(maxrev)
371 366 if not key:
372 367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
373 368 if revs:
374 369 s = hashutil.sha1()
375 370 for rev in revs:
376 371 s.update(b'%d;' % rev)
377 372 key = s.digest()
378 373 cl._filteredrevs_hashcache[maxrev] = key
379 374 return key
380 375
381 376
382 377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
383 378 """yield every hg repository under path, always recursively.
384 379 The recurse flag will only control recursion into repo working dirs"""
385 380
386 381 def errhandler(err):
387 382 if err.filename == path:
388 383 raise err
389 384
390 385 samestat = getattr(os.path, 'samestat', None)
391 386 if followsym and samestat is not None:
392 387
393 388 def adddir(dirlst, dirname):
394 389 dirstat = os.stat(dirname)
395 390 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
396 391 if not match:
397 392 dirlst.append(dirstat)
398 393 return not match
399 394
400 395 else:
401 396 followsym = False
402 397
403 398 if (seen_dirs is None) and followsym:
404 399 seen_dirs = []
405 400 adddir(seen_dirs, path)
406 401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
407 402 dirs.sort()
408 403 if b'.hg' in dirs:
409 404 yield root # found a repository
410 405 qroot = os.path.join(root, b'.hg', b'patches')
411 406 if os.path.isdir(os.path.join(qroot, b'.hg')):
412 407 yield qroot # we have a patch queue repo here
413 408 if recurse:
414 409 # avoid recursing inside the .hg directory
415 410 dirs.remove(b'.hg')
416 411 else:
417 412 dirs[:] = [] # don't descend further
418 413 elif followsym:
419 414 newdirs = []
420 415 for d in dirs:
421 416 fname = os.path.join(root, d)
422 417 if adddir(seen_dirs, fname):
423 418 if os.path.islink(fname):
424 419 for hgname in walkrepos(fname, True, seen_dirs):
425 420 yield hgname
426 421 else:
427 422 newdirs.append(d)
428 423 dirs[:] = newdirs
429 424
430 425
431 426 def binnode(ctx):
432 427 """Return binary node id for a given basectx"""
433 428 node = ctx.node()
434 429 if node is None:
435 430 return ctx.repo().nodeconstants.wdirid
436 431 return node
437 432
438 433
439 434 def intrev(ctx):
440 435 """Return integer for a given basectx that can be used in comparison or
441 436 arithmetic operation"""
442 437 rev = ctx.rev()
443 438 if rev is None:
444 439 return wdirrev
445 440 return rev
446 441
447 442
448 443 def formatchangeid(ctx):
449 444 """Format changectx as '{rev}:{node|formatnode}', which is the default
450 445 template provided by logcmdutil.changesettemplater"""
451 446 repo = ctx.repo()
452 447 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
453 448
454 449
455 450 def formatrevnode(ui, rev, node):
456 451 """Format given revision and node depending on the current verbosity"""
457 452 if ui.debugflag:
458 453 hexfunc = hex
459 454 else:
460 455 hexfunc = short
461 456 return b'%d:%s' % (rev, hexfunc(node))
462 457
463 458
464 459 def resolvehexnodeidprefix(repo, prefix):
465 460 if prefix.startswith(b'x'):
466 461 prefix = prefix[1:]
467 462 try:
468 463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
469 464 # This matches the shortesthexnodeidprefix() function below.
470 465 node = repo.unfiltered().changelog._partialmatch(prefix)
471 466 except error.AmbiguousPrefixLookupError:
472 467 revset = repo.ui.config(
473 468 b'experimental', b'revisions.disambiguatewithin'
474 469 )
475 470 if revset:
476 471 # Clear config to avoid infinite recursion
477 472 configoverrides = {
478 473 (b'experimental', b'revisions.disambiguatewithin'): None
479 474 }
480 475 with repo.ui.configoverride(configoverrides):
481 476 revs = repo.anyrevs([revset], user=True)
482 477 matches = []
483 478 for rev in revs:
484 479 node = repo.changelog.node(rev)
485 480 if hex(node).startswith(prefix):
486 481 matches.append(node)
487 482 if len(matches) == 1:
488 483 return matches[0]
489 484 raise
490 485 if node is None:
491 486 return
492 487 repo.changelog.rev(node) # make sure node isn't filtered
493 488 return node
494 489
495 490
496 491 def mayberevnum(repo, prefix):
497 492 """Checks if the given prefix may be mistaken for a revision number"""
498 493 try:
499 494 i = int(prefix)
500 495 # if we are a pure int, then starting with zero will not be
501 496 # confused as a rev; or, obviously, if the int is larger
502 497 # than the value of the tip rev. We still need to disambiguate if
503 498 # prefix == '0', since that *is* a valid revnum.
504 499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
505 500 return False
506 501 return True
507 502 except ValueError:
508 503 return False
509 504
510 505
511 506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
512 507 """Find the shortest unambiguous prefix that matches hexnode.
513 508
514 509 If "cache" is not None, it must be a dictionary that can be used for
515 510 caching between calls to this method.
516 511 """
517 512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
518 513 # which would be unacceptably slow. so we look for hash collision in
519 514 # unfiltered space, which means some hashes may be slightly longer.
520 515
521 516 minlength = max(minlength, 1)
522 517
523 518 def disambiguate(prefix):
524 519 """Disambiguate against revnums."""
525 520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
526 521 if mayberevnum(repo, prefix):
527 522 return b'x' + prefix
528 523 else:
529 524 return prefix
530 525
531 526 hexnode = hex(node)
532 527 for length in range(len(prefix), len(hexnode) + 1):
533 528 prefix = hexnode[:length]
534 529 if not mayberevnum(repo, prefix):
535 530 return prefix
536 531
537 532 cl = repo.unfiltered().changelog
538 533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
539 534 if revset:
540 535 revs = None
541 536 if cache is not None:
542 537 revs = cache.get(b'disambiguationrevset')
543 538 if revs is None:
544 539 revs = repo.anyrevs([revset], user=True)
545 540 if cache is not None:
546 541 cache[b'disambiguationrevset'] = revs
547 542 if cl.rev(node) in revs:
548 543 hexnode = hex(node)
549 544 nodetree = None
550 545 if cache is not None:
551 546 nodetree = cache.get(b'disambiguationnodetree')
552 547 if not nodetree:
553 548 if util.safehasattr(parsers, 'nodetree'):
554 549 # The CExt is the only implementation to provide a nodetree
555 550 # class so far.
556 551 index = cl.index
557 552 if util.safehasattr(index, 'get_cindex'):
558 553 # the rust wrapped need to give access to its internal index
559 554 index = index.get_cindex()
560 555 nodetree = parsers.nodetree(index, len(revs))
561 556 for r in revs:
562 557 nodetree.insert(r)
563 558 if cache is not None:
564 559 cache[b'disambiguationnodetree'] = nodetree
565 560 if nodetree is not None:
566 561 length = max(nodetree.shortest(node), minlength)
567 562 prefix = hexnode[:length]
568 563 return disambiguate(prefix)
569 564 for length in range(minlength, len(hexnode) + 1):
570 565 matches = []
571 566 prefix = hexnode[:length]
572 567 for rev in revs:
573 568 otherhexnode = repo[rev].hex()
574 569 if prefix == otherhexnode[:length]:
575 570 matches.append(otherhexnode)
576 571 if len(matches) == 1:
577 572 return disambiguate(prefix)
578 573
579 574 try:
580 575 return disambiguate(cl.shortest(node, minlength))
581 576 except error.LookupError:
582 577 raise error.RepoLookupError()
583 578
584 579
585 580 def isrevsymbol(repo, symbol):
586 581 """Checks if a symbol exists in the repo.
587 582
588 583 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
589 584 symbol is an ambiguous nodeid prefix.
590 585 """
591 586 try:
592 587 revsymbol(repo, symbol)
593 588 return True
594 589 except error.RepoLookupError:
595 590 return False
596 591
597 592
598 593 def revsymbol(repo, symbol):
599 594 """Returns a context given a single revision symbol (as string).
600 595
601 596 This is similar to revsingle(), but accepts only a single revision symbol,
602 597 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
603 598 not "max(public())".
604 599 """
605 600 if not isinstance(symbol, bytes):
606 601 msg = (
607 602 b"symbol (%s of type %s) was not a string, did you mean "
608 603 b"repo[symbol]?" % (symbol, type(symbol))
609 604 )
610 605 raise error.ProgrammingError(msg)
611 606 try:
612 607 if symbol in (b'.', b'tip', b'null'):
613 608 return repo[symbol]
614 609
615 610 try:
616 611 r = int(symbol)
617 612 if b'%d' % r != symbol:
618 613 raise ValueError
619 614 l = len(repo.changelog)
620 615 if r < 0:
621 616 r += l
622 617 if r < 0 or r >= l and r != wdirrev:
623 618 raise ValueError
624 619 return repo[r]
625 620 except error.FilteredIndexError:
626 621 raise
627 622 except (ValueError, OverflowError, IndexError):
628 623 pass
629 624
630 625 if len(symbol) == 2 * repo.nodeconstants.nodelen:
631 626 try:
632 627 node = bin(symbol)
633 628 rev = repo.changelog.rev(node)
634 629 return repo[rev]
635 630 except error.FilteredLookupError:
636 631 raise
637 632 except (TypeError, LookupError):
638 633 pass
639 634
640 635 # look up bookmarks through the name interface
641 636 try:
642 637 node = repo.names.singlenode(repo, symbol)
643 638 rev = repo.changelog.rev(node)
644 639 return repo[rev]
645 640 except KeyError:
646 641 pass
647 642
648 643 node = resolvehexnodeidprefix(repo, symbol)
649 644 if node is not None:
650 645 rev = repo.changelog.rev(node)
651 646 return repo[rev]
652 647
653 648 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
654 649
655 650 except error.WdirUnsupported:
656 651 return repo[None]
657 652 except (
658 653 error.FilteredIndexError,
659 654 error.FilteredLookupError,
660 655 error.FilteredRepoLookupError,
661 656 ):
662 657 raise _filterederror(repo, symbol)
663 658
664 659
665 660 def _filterederror(repo, changeid):
666 661 """build an exception to be raised about a filtered changeid
667 662
668 663 This is extracted in a function to help extensions (eg: evolve) to
669 664 experiment with various message variants."""
670 665 if repo.filtername.startswith(b'visible'):
671 666
672 667 # Check if the changeset is obsolete
673 668 unfilteredrepo = repo.unfiltered()
674 669 ctx = revsymbol(unfilteredrepo, changeid)
675 670
676 671 # If the changeset is obsolete, enrich the message with the reason
677 672 # that made this changeset not visible
678 673 if ctx.obsolete():
679 674 msg = obsutil._getfilteredreason(repo, changeid, ctx)
680 675 else:
681 676 msg = _(b"hidden revision '%s'") % changeid
682 677
683 678 hint = _(b'use --hidden to access hidden revisions')
684 679
685 680 return error.FilteredRepoLookupError(msg, hint=hint)
686 681 msg = _(b"filtered revision '%s' (not in '%s' subset)")
687 682 msg %= (changeid, repo.filtername)
688 683 return error.FilteredRepoLookupError(msg)
689 684
690 685
691 686 def revsingle(repo, revspec, default=b'.', localalias=None):
692 687 if not revspec and revspec != 0:
693 688 return repo[default]
694 689
695 690 l = revrange(repo, [revspec], localalias=localalias)
696 691 if not l:
697 692 raise error.Abort(_(b'empty revision set'))
698 693 return repo[l.last()]
699 694
700 695
701 696 def _pairspec(revspec):
702 697 tree = revsetlang.parse(revspec)
703 698 return tree and tree[0] in (
704 699 b'range',
705 700 b'rangepre',
706 701 b'rangepost',
707 702 b'rangeall',
708 703 )
709 704
710 705
711 706 def revpair(repo, revs):
712 707 if not revs:
713 708 return repo[b'.'], repo[None]
714 709
715 710 l = revrange(repo, revs)
716 711
717 712 if not l:
718 713 raise error.Abort(_(b'empty revision range'))
719 714
720 715 first = l.first()
721 716 second = l.last()
722 717
723 718 if (
724 719 first == second
725 720 and len(revs) >= 2
726 721 and not all(revrange(repo, [r]) for r in revs)
727 722 ):
728 723 raise error.Abort(_(b'empty revision on one side of range'))
729 724
730 725 # if top-level is range expression, the result must always be a pair
731 726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
732 727 return repo[first], repo[None]
733 728
734 729 return repo[first], repo[second]
735 730
736 731
737 732 def revrange(repo, specs, localalias=None):
738 733 """Execute 1 to many revsets and return the union.
739 734
740 735 This is the preferred mechanism for executing revsets using user-specified
741 736 config options, such as revset aliases.
742 737
743 738 The revsets specified by ``specs`` will be executed via a chained ``OR``
744 739 expression. If ``specs`` is empty, an empty result is returned.
745 740
746 741 ``specs`` can contain integers, in which case they are assumed to be
747 742 revision numbers.
748 743
749 744 It is assumed the revsets are already formatted. If you have arguments
750 745 that need to be expanded in the revset, call ``revsetlang.formatspec()``
751 746 and pass the result as an element of ``specs``.
752 747
753 748 Specifying a single revset is allowed.
754 749
755 750 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
756 751 integer revisions.
757 752 """
758 753 allspecs = []
759 754 for spec in specs:
760 755 if isinstance(spec, int):
761 756 spec = revsetlang.formatspec(b'%d', spec)
762 757 allspecs.append(spec)
763 758 return repo.anyrevs(allspecs, user=True, localalias=localalias)
764 759
765 760
766 761 def increasingwindows(windowsize=8, sizelimit=512):
767 762 while True:
768 763 yield windowsize
769 764 if windowsize < sizelimit:
770 765 windowsize *= 2
771 766
772 767
773 768 def walkchangerevs(repo, revs, makefilematcher, prepare):
774 769 """Iterate over files and the revs in a "windowed" way.
775 770
776 771 Callers most commonly need to iterate backwards over the history
777 772 in which they are interested. Doing so has awful (quadratic-looking)
778 773 performance, so we use iterators in a "windowed" way.
779 774
780 775 We walk a window of revisions in the desired order. Within the
781 776 window, we first walk forwards to gather data, then in the desired
782 777 order (usually backwards) to display it.
783 778
784 779 This function returns an iterator yielding contexts. Before
785 780 yielding each context, the iterator will first call the prepare
786 781 function on each context in the window in forward order."""
787 782
788 783 if not revs:
789 784 return []
790 785 change = repo.__getitem__
791 786
792 787 def iterate():
793 788 it = iter(revs)
794 789 stopiteration = False
795 790 for windowsize in increasingwindows():
796 791 nrevs = []
797 792 for i in pycompat.xrange(windowsize):
798 793 rev = next(it, None)
799 794 if rev is None:
800 795 stopiteration = True
801 796 break
802 797 nrevs.append(rev)
803 798 for rev in sorted(nrevs):
804 799 ctx = change(rev)
805 800 prepare(ctx, makefilematcher(ctx))
806 801 for rev in nrevs:
807 802 yield change(rev)
808 803
809 804 if stopiteration:
810 805 break
811 806
812 807 return iterate()
813 808
814 809
815 810 def meaningfulparents(repo, ctx):
816 811 """Return list of meaningful (or all if debug) parentrevs for rev.
817 812
818 813 For merges (two non-nullrev revisions) both parents are meaningful.
819 814 Otherwise the first parent revision is considered meaningful if it
820 815 is not the preceding revision.
821 816 """
822 817 parents = ctx.parents()
823 818 if len(parents) > 1:
824 819 return parents
825 820 if repo.ui.debugflag:
826 821 return [parents[0], repo[nullrev]]
827 822 if parents[0].rev() >= intrev(ctx) - 1:
828 823 return []
829 824 return parents
830 825
831 826
832 827 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
833 828 """Return a function that produced paths for presenting to the user.
834 829
835 830 The returned function takes a repo-relative path and produces a path
836 831 that can be presented in the UI.
837 832
838 833 Depending on the value of ui.relative-paths, either a repo-relative or
839 834 cwd-relative path will be produced.
840 835
841 836 legacyrelativevalue is the value to use if ui.relative-paths=legacy
842 837
843 838 If forcerelativevalue is not None, then that value will be used regardless
844 839 of what ui.relative-paths is set to.
845 840 """
846 841 if forcerelativevalue is not None:
847 842 relative = forcerelativevalue
848 843 else:
849 844 config = repo.ui.config(b'ui', b'relative-paths')
850 845 if config == b'legacy':
851 846 relative = legacyrelativevalue
852 847 else:
853 848 relative = stringutil.parsebool(config)
854 849 if relative is None:
855 850 raise error.ConfigError(
856 851 _(b"ui.relative-paths is not a boolean ('%s')") % config
857 852 )
858 853
859 854 if relative:
860 855 cwd = repo.getcwd()
861 856 if cwd != b'':
862 857 # this branch would work even if cwd == b'' (ie cwd = repo
863 858 # root), but its generality makes the returned function slower
864 859 pathto = repo.pathto
865 860 return lambda f: pathto(f, cwd)
866 861 if repo.ui.configbool(b'ui', b'slash'):
867 862 return lambda f: f
868 863 else:
869 864 return util.localpath
870 865
871 866
872 867 def subdiruipathfn(subpath, uipathfn):
873 868 '''Create a new uipathfn that treats the file as relative to subpath.'''
874 869 return lambda f: uipathfn(posixpath.join(subpath, f))
875 870
876 871
877 872 def anypats(pats, opts):
878 873 """Checks if any patterns, including --include and --exclude were given.
879 874
880 875 Some commands (e.g. addremove) use this condition for deciding whether to
881 876 print absolute or relative paths.
882 877 """
883 878 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
884 879
885 880
886 881 def expandpats(pats):
887 882 """Expand bare globs when running on windows.
888 883 On posix we assume it already has already been done by sh."""
889 884 if not util.expandglobs:
890 885 return list(pats)
891 886 ret = []
892 887 for kindpat in pats:
893 888 kind, pat = matchmod._patsplit(kindpat, None)
894 889 if kind is None:
895 890 try:
896 891 globbed = glob.glob(pat)
897 892 except re.error:
898 893 globbed = [pat]
899 894 if globbed:
900 895 ret.extend(globbed)
901 896 continue
902 897 ret.append(kindpat)
903 898 return ret
904 899
905 900
906 901 def matchandpats(
907 902 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
908 903 ):
909 904 """Return a matcher and the patterns that were used.
910 905 The matcher will warn about bad matches, unless an alternate badfn callback
911 906 is provided."""
912 907 if opts is None:
913 908 opts = {}
914 909 if not globbed and default == b'relpath':
915 910 pats = expandpats(pats or [])
916 911
917 912 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
918 913
919 914 def bad(f, msg):
920 915 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
921 916
922 917 if badfn is None:
923 918 badfn = bad
924 919
925 920 m = ctx.match(
926 921 pats,
927 922 opts.get(b'include'),
928 923 opts.get(b'exclude'),
929 924 default,
930 925 listsubrepos=opts.get(b'subrepos'),
931 926 badfn=badfn,
932 927 )
933 928
934 929 if m.always():
935 930 pats = []
936 931 return m, pats
937 932
938 933
939 934 def match(
940 935 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
941 936 ):
942 937 '''Return a matcher that will warn about bad matches.'''
943 938 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
944 939
945 940
946 941 def matchall(repo):
947 942 '''Return a matcher that will efficiently match everything.'''
948 943 return matchmod.always()
949 944
950 945
951 946 def matchfiles(repo, files, badfn=None):
952 947 '''Return a matcher that will efficiently match exactly these files.'''
953 948 return matchmod.exact(files, badfn=badfn)
954 949
955 950
956 951 def parsefollowlinespattern(repo, rev, pat, msg):
957 952 """Return a file name from `pat` pattern suitable for usage in followlines
958 953 logic.
959 954 """
960 955 if not matchmod.patkind(pat):
961 956 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
962 957 else:
963 958 ctx = repo[rev]
964 959 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
965 960 files = [f for f in ctx if m(f)]
966 961 if len(files) != 1:
967 962 raise error.ParseError(msg)
968 963 return files[0]
969 964
970 965
971 966 def getorigvfs(ui, repo):
972 967 """return a vfs suitable to save 'orig' file
973 968
974 969 return None if no special directory is configured"""
975 970 origbackuppath = ui.config(b'ui', b'origbackuppath')
976 971 if not origbackuppath:
977 972 return None
978 973 return vfs.vfs(repo.wvfs.join(origbackuppath))
979 974
980 975
981 976 def backuppath(ui, repo, filepath):
982 977 """customize where working copy backup files (.orig files) are created
983 978
984 979 Fetch user defined path from config file: [ui] origbackuppath = <path>
985 980 Fall back to default (filepath with .orig suffix) if not specified
986 981
987 982 filepath is repo-relative
988 983
989 984 Returns an absolute path
990 985 """
991 986 origvfs = getorigvfs(ui, repo)
992 987 if origvfs is None:
993 988 return repo.wjoin(filepath + b".orig")
994 989
995 990 origbackupdir = origvfs.dirname(filepath)
996 991 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
997 992 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
998 993
999 994 # Remove any files that conflict with the backup file's path
1000 995 for f in reversed(list(pathutil.finddirs(filepath))):
1001 996 if origvfs.isfileorlink(f):
1002 997 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1003 998 origvfs.unlink(f)
1004 999 break
1005 1000
1006 1001 origvfs.makedirs(origbackupdir)
1007 1002
1008 1003 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1009 1004 ui.note(
1010 1005 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1011 1006 )
1012 1007 origvfs.rmtree(filepath, forcibly=True)
1013 1008
1014 1009 return origvfs.join(filepath)
1015 1010
1016 1011
1017 1012 class _containsnode(object):
1018 1013 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1019 1014
1020 1015 def __init__(self, repo, revcontainer):
1021 1016 self._torev = repo.changelog.rev
1022 1017 self._revcontains = revcontainer.__contains__
1023 1018
1024 1019 def __contains__(self, node):
1025 1020 return self._revcontains(self._torev(node))
1026 1021
1027 1022
1028 1023 def cleanupnodes(
1029 1024 repo,
1030 1025 replacements,
1031 1026 operation,
1032 1027 moves=None,
1033 1028 metadata=None,
1034 1029 fixphase=False,
1035 1030 targetphase=None,
1036 1031 backup=True,
1037 1032 ):
1038 1033 """do common cleanups when old nodes are replaced by new nodes
1039 1034
1040 1035 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1041 1036 (we might also want to move working directory parent in the future)
1042 1037
1043 1038 By default, bookmark moves are calculated automatically from 'replacements',
1044 1039 but 'moves' can be used to override that. Also, 'moves' may include
1045 1040 additional bookmark moves that should not have associated obsmarkers.
1046 1041
1047 1042 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1048 1043 have replacements. operation is a string, like "rebase".
1049 1044
1050 1045 metadata is dictionary containing metadata to be stored in obsmarker if
1051 1046 obsolescence is enabled.
1052 1047 """
1053 1048 assert fixphase or targetphase is None
1054 1049 if not replacements and not moves:
1055 1050 return
1056 1051
1057 1052 # translate mapping's other forms
1058 1053 if not util.safehasattr(replacements, b'items'):
1059 1054 replacements = {(n,): () for n in replacements}
1060 1055 else:
1061 1056 # upgrading non tuple "source" to tuple ones for BC
1062 1057 repls = {}
1063 1058 for key, value in replacements.items():
1064 1059 if not isinstance(key, tuple):
1065 1060 key = (key,)
1066 1061 repls[key] = value
1067 1062 replacements = repls
1068 1063
1069 1064 # Unfiltered repo is needed since nodes in replacements might be hidden.
1070 1065 unfi = repo.unfiltered()
1071 1066
1072 1067 # Calculate bookmark movements
1073 1068 if moves is None:
1074 1069 moves = {}
1075 1070 for oldnodes, newnodes in replacements.items():
1076 1071 for oldnode in oldnodes:
1077 1072 if oldnode in moves:
1078 1073 continue
1079 1074 if len(newnodes) > 1:
1080 1075 # usually a split, take the one with biggest rev number
1081 1076 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1082 1077 elif len(newnodes) == 0:
1083 1078 # move bookmark backwards
1084 1079 allreplaced = []
1085 1080 for rep in replacements:
1086 1081 allreplaced.extend(rep)
1087 1082 roots = list(
1088 1083 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1089 1084 )
1090 1085 if roots:
1091 1086 newnode = roots[0].node()
1092 1087 else:
1093 1088 newnode = repo.nullid
1094 1089 else:
1095 1090 newnode = newnodes[0]
1096 1091 moves[oldnode] = newnode
1097 1092
1098 1093 allnewnodes = [n for ns in replacements.values() for n in ns]
1099 1094 toretract = {}
1100 1095 toadvance = {}
1101 1096 if fixphase:
1102 1097 precursors = {}
1103 1098 for oldnodes, newnodes in replacements.items():
1104 1099 for oldnode in oldnodes:
1105 1100 for newnode in newnodes:
1106 1101 precursors.setdefault(newnode, []).append(oldnode)
1107 1102
1108 1103 allnewnodes.sort(key=lambda n: unfi[n].rev())
1109 1104 newphases = {}
1110 1105
1111 1106 def phase(ctx):
1112 1107 return newphases.get(ctx.node(), ctx.phase())
1113 1108
1114 1109 for newnode in allnewnodes:
1115 1110 ctx = unfi[newnode]
1116 1111 parentphase = max(phase(p) for p in ctx.parents())
1117 1112 if targetphase is None:
1118 1113 oldphase = max(
1119 1114 unfi[oldnode].phase() for oldnode in precursors[newnode]
1120 1115 )
1121 1116 newphase = max(oldphase, parentphase)
1122 1117 else:
1123 1118 newphase = max(targetphase, parentphase)
1124 1119 newphases[newnode] = newphase
1125 1120 if newphase > ctx.phase():
1126 1121 toretract.setdefault(newphase, []).append(newnode)
1127 1122 elif newphase < ctx.phase():
1128 1123 toadvance.setdefault(newphase, []).append(newnode)
1129 1124
1130 1125 with repo.transaction(b'cleanup') as tr:
1131 1126 # Move bookmarks
1132 1127 bmarks = repo._bookmarks
1133 1128 bmarkchanges = []
1134 1129 for oldnode, newnode in moves.items():
1135 1130 oldbmarks = repo.nodebookmarks(oldnode)
1136 1131 if not oldbmarks:
1137 1132 continue
1138 1133 from . import bookmarks # avoid import cycle
1139 1134
1140 1135 repo.ui.debug(
1141 1136 b'moving bookmarks %r from %s to %s\n'
1142 1137 % (
1143 1138 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1144 1139 hex(oldnode),
1145 1140 hex(newnode),
1146 1141 )
1147 1142 )
1148 1143 # Delete divergent bookmarks being parents of related newnodes
1149 1144 deleterevs = repo.revs(
1150 1145 b'parents(roots(%ln & (::%n))) - parents(%n)',
1151 1146 allnewnodes,
1152 1147 newnode,
1153 1148 oldnode,
1154 1149 )
1155 1150 deletenodes = _containsnode(repo, deleterevs)
1156 1151 for name in oldbmarks:
1157 1152 bmarkchanges.append((name, newnode))
1158 1153 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1159 1154 bmarkchanges.append((b, None))
1160 1155
1161 1156 if bmarkchanges:
1162 1157 bmarks.applychanges(repo, tr, bmarkchanges)
1163 1158
1164 1159 for phase, nodes in toretract.items():
1165 1160 phases.retractboundary(repo, tr, phase, nodes)
1166 1161 for phase, nodes in toadvance.items():
1167 1162 phases.advanceboundary(repo, tr, phase, nodes)
1168 1163
1169 1164 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1170 1165 # Obsolete or strip nodes
1171 1166 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1172 1167 # If a node is already obsoleted, and we want to obsolete it
1173 1168 # without a successor, skip that obssolete request since it's
1174 1169 # unnecessary. That's the "if s or not isobs(n)" check below.
1175 1170 # Also sort the node in topology order, that might be useful for
1176 1171 # some obsstore logic.
1177 1172 # NOTE: the sorting might belong to createmarkers.
1178 1173 torev = unfi.changelog.rev
1179 1174 sortfunc = lambda ns: torev(ns[0][0])
1180 1175 rels = []
1181 1176 for ns, s in sorted(replacements.items(), key=sortfunc):
1182 1177 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1183 1178 rels.append(rel)
1184 1179 if rels:
1185 1180 obsolete.createmarkers(
1186 1181 repo, rels, operation=operation, metadata=metadata
1187 1182 )
1188 1183 elif phases.supportinternal(repo) and mayusearchived:
1189 1184 # this assume we do not have "unstable" nodes above the cleaned ones
1190 1185 allreplaced = set()
1191 1186 for ns in replacements.keys():
1192 1187 allreplaced.update(ns)
1193 1188 if backup:
1194 1189 from . import repair # avoid import cycle
1195 1190
1196 1191 node = min(allreplaced, key=repo.changelog.rev)
1197 1192 repair.backupbundle(
1198 1193 repo, allreplaced, allreplaced, node, operation
1199 1194 )
1200 1195 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1201 1196 else:
1202 1197 from . import repair # avoid import cycle
1203 1198
1204 1199 tostrip = list(n for ns in replacements for n in ns)
1205 1200 if tostrip:
1206 1201 repair.delayedstrip(
1207 1202 repo.ui, repo, tostrip, operation, backup=backup
1208 1203 )
1209 1204
1210 1205
1211 1206 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1212 1207 if opts is None:
1213 1208 opts = {}
1214 1209 m = matcher
1215 1210 dry_run = opts.get(b'dry_run')
1216 1211 try:
1217 1212 similarity = float(opts.get(b'similarity') or 0)
1218 1213 except ValueError:
1219 1214 raise error.Abort(_(b'similarity must be a number'))
1220 1215 if similarity < 0 or similarity > 100:
1221 1216 raise error.Abort(_(b'similarity must be between 0 and 100'))
1222 1217 similarity /= 100.0
1223 1218
1224 1219 ret = 0
1225 1220
1226 1221 wctx = repo[None]
1227 1222 for subpath in sorted(wctx.substate):
1228 1223 submatch = matchmod.subdirmatcher(subpath, m)
1229 1224 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1230 1225 sub = wctx.sub(subpath)
1231 1226 subprefix = repo.wvfs.reljoin(prefix, subpath)
1232 1227 subuipathfn = subdiruipathfn(subpath, uipathfn)
1233 1228 try:
1234 1229 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1235 1230 ret = 1
1236 1231 except error.LookupError:
1237 1232 repo.ui.status(
1238 1233 _(b"skipping missing subrepository: %s\n")
1239 1234 % uipathfn(subpath)
1240 1235 )
1241 1236
1242 1237 rejected = []
1243 1238
1244 1239 def badfn(f, msg):
1245 1240 if f in m.files():
1246 1241 m.bad(f, msg)
1247 1242 rejected.append(f)
1248 1243
1249 1244 badmatch = matchmod.badmatch(m, badfn)
1250 1245 added, unknown, deleted, removed, forgotten = _interestingfiles(
1251 1246 repo, badmatch
1252 1247 )
1253 1248
1254 1249 unknownset = set(unknown + forgotten)
1255 1250 toprint = unknownset.copy()
1256 1251 toprint.update(deleted)
1257 1252 for abs in sorted(toprint):
1258 1253 if repo.ui.verbose or not m.exact(abs):
1259 1254 if abs in unknownset:
1260 1255 status = _(b'adding %s\n') % uipathfn(abs)
1261 1256 label = b'ui.addremove.added'
1262 1257 else:
1263 1258 status = _(b'removing %s\n') % uipathfn(abs)
1264 1259 label = b'ui.addremove.removed'
1265 1260 repo.ui.status(status, label=label)
1266 1261
1267 1262 renames = _findrenames(
1268 1263 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1269 1264 )
1270 1265
1271 1266 if not dry_run:
1272 1267 _markchanges(repo, unknown + forgotten, deleted, renames)
1273 1268
1274 1269 for f in rejected:
1275 1270 if f in m.files():
1276 1271 return 1
1277 1272 return ret
1278 1273
1279 1274
1280 1275 def marktouched(repo, files, similarity=0.0):
1281 1276 """Assert that files have somehow been operated upon. files are relative to
1282 1277 the repo root."""
1283 1278 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1284 1279 rejected = []
1285 1280
1286 1281 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1287 1282
1288 1283 if repo.ui.verbose:
1289 1284 unknownset = set(unknown + forgotten)
1290 1285 toprint = unknownset.copy()
1291 1286 toprint.update(deleted)
1292 1287 for abs in sorted(toprint):
1293 1288 if abs in unknownset:
1294 1289 status = _(b'adding %s\n') % abs
1295 1290 else:
1296 1291 status = _(b'removing %s\n') % abs
1297 1292 repo.ui.status(status)
1298 1293
1299 1294 # TODO: We should probably have the caller pass in uipathfn and apply it to
1300 1295 # the messages above too. legacyrelativevalue=True is consistent with how
1301 1296 # it used to work.
1302 1297 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1303 1298 renames = _findrenames(
1304 1299 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1305 1300 )
1306 1301
1307 1302 _markchanges(repo, unknown + forgotten, deleted, renames)
1308 1303
1309 1304 for f in rejected:
1310 1305 if f in m.files():
1311 1306 return 1
1312 1307 return 0
1313 1308
1314 1309
1315 1310 def _interestingfiles(repo, matcher):
1316 1311 """Walk dirstate with matcher, looking for files that addremove would care
1317 1312 about.
1318 1313
1319 1314 This is different from dirstate.status because it doesn't care about
1320 1315 whether files are modified or clean."""
1321 1316 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1322 1317 audit_path = pathutil.pathauditor(repo.root, cached=True)
1323 1318
1324 1319 ctx = repo[None]
1325 1320 dirstate = repo.dirstate
1326 1321 matcher = repo.narrowmatch(matcher, includeexact=True)
1327 1322 walkresults = dirstate.walk(
1328 1323 matcher,
1329 1324 subrepos=sorted(ctx.substate),
1330 1325 unknown=True,
1331 1326 ignored=False,
1332 1327 full=False,
1333 1328 )
1334 1329 for abs, st in pycompat.iteritems(walkresults):
1335 1330 dstate = dirstate[abs]
1336 1331 if dstate == b'?' and audit_path.check(abs):
1337 1332 unknown.append(abs)
1338 1333 elif dstate != b'r' and not st:
1339 1334 deleted.append(abs)
1340 1335 elif dstate == b'r' and st:
1341 1336 forgotten.append(abs)
1342 1337 # for finding renames
1343 1338 elif dstate == b'r' and not st:
1344 1339 removed.append(abs)
1345 1340 elif dstate == b'a':
1346 1341 added.append(abs)
1347 1342
1348 1343 return added, unknown, deleted, removed, forgotten
1349 1344
1350 1345
1351 1346 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1352 1347 '''Find renames from removed files to added ones.'''
1353 1348 renames = {}
1354 1349 if similarity > 0:
1355 1350 for old, new, score in similar.findrenames(
1356 1351 repo, added, removed, similarity
1357 1352 ):
1358 1353 if (
1359 1354 repo.ui.verbose
1360 1355 or not matcher.exact(old)
1361 1356 or not matcher.exact(new)
1362 1357 ):
1363 1358 repo.ui.status(
1364 1359 _(
1365 1360 b'recording removal of %s as rename to %s '
1366 1361 b'(%d%% similar)\n'
1367 1362 )
1368 1363 % (uipathfn(old), uipathfn(new), score * 100)
1369 1364 )
1370 1365 renames[new] = old
1371 1366 return renames
1372 1367
1373 1368
1374 1369 def _markchanges(repo, unknown, deleted, renames):
1375 1370 """Marks the files in unknown as added, the files in deleted as removed,
1376 1371 and the files in renames as copied."""
1377 1372 wctx = repo[None]
1378 1373 with repo.wlock():
1379 1374 wctx.forget(deleted)
1380 1375 wctx.add(unknown)
1381 1376 for new, old in pycompat.iteritems(renames):
1382 1377 wctx.copy(old, new)
1383 1378
1384 1379
1385 1380 def getrenamedfn(repo, endrev=None):
1386 1381 if copiesmod.usechangesetcentricalgo(repo):
1387 1382
1388 1383 def getrenamed(fn, rev):
1389 1384 ctx = repo[rev]
1390 1385 p1copies = ctx.p1copies()
1391 1386 if fn in p1copies:
1392 1387 return p1copies[fn]
1393 1388 p2copies = ctx.p2copies()
1394 1389 if fn in p2copies:
1395 1390 return p2copies[fn]
1396 1391 return None
1397 1392
1398 1393 return getrenamed
1399 1394
1400 1395 rcache = {}
1401 1396 if endrev is None:
1402 1397 endrev = len(repo)
1403 1398
1404 1399 def getrenamed(fn, rev):
1405 1400 """looks up all renames for a file (up to endrev) the first
1406 1401 time the file is given. It indexes on the changerev and only
1407 1402 parses the manifest if linkrev != changerev.
1408 1403 Returns rename info for fn at changerev rev."""
1409 1404 if fn not in rcache:
1410 1405 rcache[fn] = {}
1411 1406 fl = repo.file(fn)
1412 1407 for i in fl:
1413 1408 lr = fl.linkrev(i)
1414 1409 renamed = fl.renamed(fl.node(i))
1415 1410 rcache[fn][lr] = renamed and renamed[0]
1416 1411 if lr >= endrev:
1417 1412 break
1418 1413 if rev in rcache[fn]:
1419 1414 return rcache[fn][rev]
1420 1415
1421 1416 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1422 1417 # filectx logic.
1423 1418 try:
1424 1419 return repo[rev][fn].copysource()
1425 1420 except error.LookupError:
1426 1421 return None
1427 1422
1428 1423 return getrenamed
1429 1424
1430 1425
1431 1426 def getcopiesfn(repo, endrev=None):
1432 1427 if copiesmod.usechangesetcentricalgo(repo):
1433 1428
1434 1429 def copiesfn(ctx):
1435 1430 if ctx.p2copies():
1436 1431 allcopies = ctx.p1copies().copy()
1437 1432 # There should be no overlap
1438 1433 allcopies.update(ctx.p2copies())
1439 1434 return sorted(allcopies.items())
1440 1435 else:
1441 1436 return sorted(ctx.p1copies().items())
1442 1437
1443 1438 else:
1444 1439 getrenamed = getrenamedfn(repo, endrev)
1445 1440
1446 1441 def copiesfn(ctx):
1447 1442 copies = []
1448 1443 for fn in ctx.files():
1449 1444 rename = getrenamed(fn, ctx.rev())
1450 1445 if rename:
1451 1446 copies.append((fn, rename))
1452 1447 return copies
1453 1448
1454 1449 return copiesfn
1455 1450
1456 1451
1457 1452 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1458 1453 """Update the dirstate to reflect the intent of copying src to dst. For
1459 1454 different reasons it might not end with dst being marked as copied from src.
1460 1455 """
1461 1456 origsrc = repo.dirstate.copied(src) or src
1462 1457 if dst == origsrc: # copying back a copy?
1463 1458 if repo.dirstate[dst] not in b'mn' and not dryrun:
1464 1459 repo.dirstate.normallookup(dst)
1465 1460 else:
1466 1461 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1467 1462 if not ui.quiet:
1468 1463 ui.warn(
1469 1464 _(
1470 1465 b"%s has not been committed yet, so no copy "
1471 1466 b"data will be stored for %s.\n"
1472 1467 )
1473 1468 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1474 1469 )
1475 1470 if repo.dirstate[dst] in b'?r' and not dryrun:
1476 1471 wctx.add([dst])
1477 1472 elif not dryrun:
1478 1473 wctx.copy(origsrc, dst)
1479 1474
1480 1475
1481 1476 def movedirstate(repo, newctx, match=None):
1482 1477 """Move the dirstate to newctx and adjust it as necessary.
1483 1478
1484 1479 A matcher can be provided as an optimization. It is probably a bug to pass
1485 1480 a matcher that doesn't match all the differences between the parent of the
1486 1481 working copy and newctx.
1487 1482 """
1488 1483 oldctx = repo[b'.']
1489 1484 ds = repo.dirstate
1490 1485 copies = dict(ds.copies())
1491 1486 ds.setparents(newctx.node(), repo.nullid)
1492 1487 s = newctx.status(oldctx, match=match)
1493 1488 for f in s.modified:
1494 1489 if ds[f] == b'r':
1495 1490 # modified + removed -> removed
1496 1491 continue
1497 1492 ds.normallookup(f)
1498 1493
1499 1494 for f in s.added:
1500 1495 if ds[f] == b'r':
1501 1496 # added + removed -> unknown
1502 1497 ds.drop(f)
1503 1498 elif ds[f] != b'a':
1504 1499 ds.add(f)
1505 1500
1506 1501 for f in s.removed:
1507 1502 if ds[f] == b'a':
1508 1503 # removed + added -> normal
1509 1504 ds.normallookup(f)
1510 1505 elif ds[f] != b'r':
1511 1506 ds.remove(f)
1512 1507
1513 1508 # Merge old parent and old working dir copies
1514 1509 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1515 1510 oldcopies.update(copies)
1516 1511 copies = {
1517 1512 dst: oldcopies.get(src, src)
1518 1513 for dst, src in pycompat.iteritems(oldcopies)
1519 1514 }
1520 1515 # Adjust the dirstate copies
1521 1516 for dst, src in pycompat.iteritems(copies):
1522 1517 if src not in newctx or dst in newctx or ds[dst] != b'a':
1523 1518 src = None
1524 1519 ds.copy(src, dst)
1525 1520 repo._quick_access_changeid_invalidate()
1526 1521
1527 1522
1528 1523 def filterrequirements(requirements):
1529 1524 """filters the requirements into two sets:
1530 1525
1531 1526 wcreq: requirements which should be written in .hg/requires
1532 1527 storereq: which should be written in .hg/store/requires
1533 1528
1534 1529 Returns (wcreq, storereq)
1535 1530 """
1536 1531 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1537 1532 wc, store = set(), set()
1538 1533 for r in requirements:
1539 1534 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1540 1535 wc.add(r)
1541 1536 else:
1542 1537 store.add(r)
1543 1538 return wc, store
1544 1539 return requirements, None
1545 1540
1546 1541
1547 1542 def istreemanifest(repo):
1548 1543 """returns whether the repository is using treemanifest or not"""
1549 1544 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1550 1545
1551 1546
1552 1547 def writereporequirements(repo, requirements=None):
1553 1548 """writes requirements for the repo
1554 1549
1555 1550 Requirements are written to .hg/requires and .hg/store/requires based
1556 1551 on whether share-safe mode is enabled and which requirements are wdir
1557 1552 requirements and which are store requirements
1558 1553 """
1559 1554 if requirements:
1560 1555 repo.requirements = requirements
1561 1556 wcreq, storereq = filterrequirements(repo.requirements)
1562 1557 if wcreq is not None:
1563 1558 writerequires(repo.vfs, wcreq)
1564 1559 if storereq is not None:
1565 1560 writerequires(repo.svfs, storereq)
1566 1561 elif repo.ui.configbool(b'format', b'usestore'):
1567 1562 # only remove store requires if we are using store
1568 1563 repo.svfs.tryunlink(b'requires')
1569 1564
1570 1565
1571 1566 def writerequires(opener, requirements):
1572 1567 with opener(b'requires', b'w', atomictemp=True) as fp:
1573 1568 for r in sorted(requirements):
1574 1569 fp.write(b"%s\n" % r)
1575 1570
1576 1571
1577 1572 class filecachesubentry(object):
1578 1573 def __init__(self, path, stat):
1579 1574 self.path = path
1580 1575 self.cachestat = None
1581 1576 self._cacheable = None
1582 1577
1583 1578 if stat:
1584 1579 self.cachestat = filecachesubentry.stat(self.path)
1585 1580
1586 1581 if self.cachestat:
1587 1582 self._cacheable = self.cachestat.cacheable()
1588 1583 else:
1589 1584 # None means we don't know yet
1590 1585 self._cacheable = None
1591 1586
1592 1587 def refresh(self):
1593 1588 if self.cacheable():
1594 1589 self.cachestat = filecachesubentry.stat(self.path)
1595 1590
1596 1591 def cacheable(self):
1597 1592 if self._cacheable is not None:
1598 1593 return self._cacheable
1599 1594
1600 1595 # we don't know yet, assume it is for now
1601 1596 return True
1602 1597
1603 1598 def changed(self):
1604 1599 # no point in going further if we can't cache it
1605 1600 if not self.cacheable():
1606 1601 return True
1607 1602
1608 1603 newstat = filecachesubentry.stat(self.path)
1609 1604
1610 1605 # we may not know if it's cacheable yet, check again now
1611 1606 if newstat and self._cacheable is None:
1612 1607 self._cacheable = newstat.cacheable()
1613 1608
1614 1609 # check again
1615 1610 if not self._cacheable:
1616 1611 return True
1617 1612
1618 1613 if self.cachestat != newstat:
1619 1614 self.cachestat = newstat
1620 1615 return True
1621 1616 else:
1622 1617 return False
1623 1618
1624 1619 @staticmethod
1625 1620 def stat(path):
1626 1621 try:
1627 1622 return util.cachestat(path)
1628 1623 except OSError as e:
1629 1624 if e.errno != errno.ENOENT:
1630 1625 raise
1631 1626
1632 1627
1633 1628 class filecacheentry(object):
1634 1629 def __init__(self, paths, stat=True):
1635 1630 self._entries = []
1636 1631 for path in paths:
1637 1632 self._entries.append(filecachesubentry(path, stat))
1638 1633
1639 1634 def changed(self):
1640 1635 '''true if any entry has changed'''
1641 1636 for entry in self._entries:
1642 1637 if entry.changed():
1643 1638 return True
1644 1639 return False
1645 1640
1646 1641 def refresh(self):
1647 1642 for entry in self._entries:
1648 1643 entry.refresh()
1649 1644
1650 1645
1651 1646 class filecache(object):
1652 1647 """A property like decorator that tracks files under .hg/ for updates.
1653 1648
1654 1649 On first access, the files defined as arguments are stat()ed and the
1655 1650 results cached. The decorated function is called. The results are stashed
1656 1651 away in a ``_filecache`` dict on the object whose method is decorated.
1657 1652
1658 1653 On subsequent access, the cached result is used as it is set to the
1659 1654 instance dictionary.
1660 1655
1661 1656 On external property set/delete operations, the caller must update the
1662 1657 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1663 1658 instead of directly setting <attr>.
1664 1659
1665 1660 When using the property API, the cached data is always used if available.
1666 1661 No stat() is performed to check if the file has changed.
1667 1662
1668 1663 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1669 1664 can populate an entry before the property's getter is called. In this case,
1670 1665 entries in ``_filecache`` will be used during property operations,
1671 1666 if available. If the underlying file changes, it is up to external callers
1672 1667 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1673 1668 method result as well as possibly calling ``del obj._filecache[attr]`` to
1674 1669 remove the ``filecacheentry``.
1675 1670 """
1676 1671
1677 1672 def __init__(self, *paths):
1678 1673 self.paths = paths
1679 1674
1680 1675 def join(self, obj, fname):
1681 1676 """Used to compute the runtime path of a cached file.
1682 1677
1683 1678 Users should subclass filecache and provide their own version of this
1684 1679 function to call the appropriate join function on 'obj' (an instance
1685 1680 of the class that its member function was decorated).
1686 1681 """
1687 1682 raise NotImplementedError
1688 1683
1689 1684 def __call__(self, func):
1690 1685 self.func = func
1691 1686 self.sname = func.__name__
1692 1687 self.name = pycompat.sysbytes(self.sname)
1693 1688 return self
1694 1689
1695 1690 def __get__(self, obj, type=None):
1696 1691 # if accessed on the class, return the descriptor itself.
1697 1692 if obj is None:
1698 1693 return self
1699 1694
1700 1695 assert self.sname not in obj.__dict__
1701 1696
1702 1697 entry = obj._filecache.get(self.name)
1703 1698
1704 1699 if entry:
1705 1700 if entry.changed():
1706 1701 entry.obj = self.func(obj)
1707 1702 else:
1708 1703 paths = [self.join(obj, path) for path in self.paths]
1709 1704
1710 1705 # We stat -before- creating the object so our cache doesn't lie if
1711 1706 # a writer modified between the time we read and stat
1712 1707 entry = filecacheentry(paths, True)
1713 1708 entry.obj = self.func(obj)
1714 1709
1715 1710 obj._filecache[self.name] = entry
1716 1711
1717 1712 obj.__dict__[self.sname] = entry.obj
1718 1713 return entry.obj
1719 1714
1720 1715 # don't implement __set__(), which would make __dict__ lookup as slow as
1721 1716 # function call.
1722 1717
1723 1718 def set(self, obj, value):
1724 1719 if self.name not in obj._filecache:
1725 1720 # we add an entry for the missing value because X in __dict__
1726 1721 # implies X in _filecache
1727 1722 paths = [self.join(obj, path) for path in self.paths]
1728 1723 ce = filecacheentry(paths, False)
1729 1724 obj._filecache[self.name] = ce
1730 1725 else:
1731 1726 ce = obj._filecache[self.name]
1732 1727
1733 1728 ce.obj = value # update cached copy
1734 1729 obj.__dict__[self.sname] = value # update copy returned by obj.x
1735 1730
1736 1731
1737 1732 def extdatasource(repo, source):
1738 1733 """Gather a map of rev -> value dict from the specified source
1739 1734
1740 1735 A source spec is treated as a URL, with a special case shell: type
1741 1736 for parsing the output from a shell command.
1742 1737
1743 1738 The data is parsed as a series of newline-separated records where
1744 1739 each record is a revision specifier optionally followed by a space
1745 1740 and a freeform string value. If the revision is known locally, it
1746 1741 is converted to a rev, otherwise the record is skipped.
1747 1742
1748 1743 Note that both key and value are treated as UTF-8 and converted to
1749 1744 the local encoding. This allows uniformity between local and
1750 1745 remote data sources.
1751 1746 """
1752 1747
1753 1748 spec = repo.ui.config(b"extdata", source)
1754 1749 if not spec:
1755 1750 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1756 1751
1757 1752 data = {}
1758 1753 src = proc = None
1759 1754 try:
1760 1755 if spec.startswith(b"shell:"):
1761 1756 # external commands should be run relative to the repo root
1762 1757 cmd = spec[6:]
1763 1758 proc = subprocess.Popen(
1764 1759 procutil.tonativestr(cmd),
1765 1760 shell=True,
1766 1761 bufsize=-1,
1767 1762 close_fds=procutil.closefds,
1768 1763 stdout=subprocess.PIPE,
1769 1764 cwd=procutil.tonativestr(repo.root),
1770 1765 )
1771 1766 src = proc.stdout
1772 1767 else:
1773 1768 # treat as a URL or file
1774 1769 src = url.open(repo.ui, spec)
1775 1770 for l in src:
1776 1771 if b" " in l:
1777 1772 k, v = l.strip().split(b" ", 1)
1778 1773 else:
1779 1774 k, v = l.strip(), b""
1780 1775
1781 1776 k = encoding.tolocal(k)
1782 1777 try:
1783 1778 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1784 1779 except (error.LookupError, error.RepoLookupError, error.InputError):
1785 1780 pass # we ignore data for nodes that don't exist locally
1786 1781 finally:
1787 1782 if proc:
1788 1783 try:
1789 1784 proc.communicate()
1790 1785 except ValueError:
1791 1786 # This happens if we started iterating src and then
1792 1787 # get a parse error on a line. It should be safe to ignore.
1793 1788 pass
1794 1789 if src:
1795 1790 src.close()
1796 1791 if proc and proc.returncode != 0:
1797 1792 raise error.Abort(
1798 1793 _(b"extdata command '%s' failed: %s")
1799 1794 % (cmd, procutil.explainexit(proc.returncode))
1800 1795 )
1801 1796
1802 1797 return data
1803 1798
1804 1799
1805 1800 class progress(object):
1806 1801 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1807 1802 self.ui = ui
1808 1803 self.pos = 0
1809 1804 self.topic = topic
1810 1805 self.unit = unit
1811 1806 self.total = total
1812 1807 self.debug = ui.configbool(b'progress', b'debug')
1813 1808 self._updatebar = updatebar
1814 1809
1815 1810 def __enter__(self):
1816 1811 return self
1817 1812
1818 1813 def __exit__(self, exc_type, exc_value, exc_tb):
1819 1814 self.complete()
1820 1815
1821 1816 def update(self, pos, item=b"", total=None):
1822 1817 assert pos is not None
1823 1818 if total:
1824 1819 self.total = total
1825 1820 self.pos = pos
1826 1821 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1827 1822 if self.debug:
1828 1823 self._printdebug(item)
1829 1824
1830 1825 def increment(self, step=1, item=b"", total=None):
1831 1826 self.update(self.pos + step, item, total)
1832 1827
1833 1828 def complete(self):
1834 1829 self.pos = None
1835 1830 self.unit = b""
1836 1831 self.total = None
1837 1832 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1838 1833
1839 1834 def _printdebug(self, item):
1840 1835 unit = b''
1841 1836 if self.unit:
1842 1837 unit = b' ' + self.unit
1843 1838 if item:
1844 1839 item = b' ' + item
1845 1840
1846 1841 if self.total:
1847 1842 pct = 100.0 * self.pos / self.total
1848 1843 self.ui.debug(
1849 1844 b'%s:%s %d/%d%s (%4.2f%%)\n'
1850 1845 % (self.topic, item, self.pos, self.total, unit, pct)
1851 1846 )
1852 1847 else:
1853 1848 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1854 1849
1855 1850
1856 1851 def gdinitconfig(ui):
1857 1852 """helper function to know if a repo should be created as general delta"""
1858 1853 # experimental config: format.generaldelta
1859 1854 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1860 1855 b'format', b'usegeneraldelta'
1861 1856 )
1862 1857
1863 1858
1864 1859 def gddeltaconfig(ui):
1865 1860 """helper function to know if incoming delta should be optimised"""
1866 1861 # experimental config: format.generaldelta
1867 1862 return ui.configbool(b'format', b'generaldelta')
1868 1863
1869 1864
1870 1865 class simplekeyvaluefile(object):
1871 1866 """A simple file with key=value lines
1872 1867
1873 1868 Keys must be alphanumerics and start with a letter, values must not
1874 1869 contain '\n' characters"""
1875 1870
1876 1871 firstlinekey = b'__firstline'
1877 1872
1878 1873 def __init__(self, vfs, path, keys=None):
1879 1874 self.vfs = vfs
1880 1875 self.path = path
1881 1876
1882 1877 def read(self, firstlinenonkeyval=False):
1883 1878 """Read the contents of a simple key-value file
1884 1879
1885 1880 'firstlinenonkeyval' indicates whether the first line of file should
1886 1881 be treated as a key-value pair or reuturned fully under the
1887 1882 __firstline key."""
1888 1883 lines = self.vfs.readlines(self.path)
1889 1884 d = {}
1890 1885 if firstlinenonkeyval:
1891 1886 if not lines:
1892 1887 e = _(b"empty simplekeyvalue file")
1893 1888 raise error.CorruptedState(e)
1894 1889 # we don't want to include '\n' in the __firstline
1895 1890 d[self.firstlinekey] = lines[0][:-1]
1896 1891 del lines[0]
1897 1892
1898 1893 try:
1899 1894 # the 'if line.strip()' part prevents us from failing on empty
1900 1895 # lines which only contain '\n' therefore are not skipped
1901 1896 # by 'if line'
1902 1897 updatedict = dict(
1903 1898 line[:-1].split(b'=', 1) for line in lines if line.strip()
1904 1899 )
1905 1900 if self.firstlinekey in updatedict:
1906 1901 e = _(b"%r can't be used as a key")
1907 1902 raise error.CorruptedState(e % self.firstlinekey)
1908 1903 d.update(updatedict)
1909 1904 except ValueError as e:
1910 1905 raise error.CorruptedState(stringutil.forcebytestr(e))
1911 1906 return d
1912 1907
1913 1908 def write(self, data, firstline=None):
1914 1909 """Write key=>value mapping to a file
1915 1910 data is a dict. Keys must be alphanumerical and start with a letter.
1916 1911 Values must not contain newline characters.
1917 1912
1918 1913 If 'firstline' is not None, it is written to file before
1919 1914 everything else, as it is, not in a key=value form"""
1920 1915 lines = []
1921 1916 if firstline is not None:
1922 1917 lines.append(b'%s\n' % firstline)
1923 1918
1924 1919 for k, v in data.items():
1925 1920 if k == self.firstlinekey:
1926 1921 e = b"key name '%s' is reserved" % self.firstlinekey
1927 1922 raise error.ProgrammingError(e)
1928 1923 if not k[0:1].isalpha():
1929 1924 e = b"keys must start with a letter in a key-value file"
1930 1925 raise error.ProgrammingError(e)
1931 1926 if not k.isalnum():
1932 1927 e = b"invalid key name in a simple key-value file"
1933 1928 raise error.ProgrammingError(e)
1934 1929 if b'\n' in v:
1935 1930 e = b"invalid value in a simple key-value file"
1936 1931 raise error.ProgrammingError(e)
1937 1932 lines.append(b"%s=%s\n" % (k, v))
1938 1933 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1939 1934 fp.write(b''.join(lines))
1940 1935
1941 1936
1942 1937 _reportobsoletedsource = [
1943 1938 b'debugobsolete',
1944 1939 b'pull',
1945 1940 b'push',
1946 1941 b'serve',
1947 1942 b'unbundle',
1948 1943 ]
1949 1944
1950 1945 _reportnewcssource = [
1951 1946 b'pull',
1952 1947 b'unbundle',
1953 1948 ]
1954 1949
1955 1950
1956 1951 def prefetchfiles(repo, revmatches):
1957 1952 """Invokes the registered file prefetch functions, allowing extensions to
1958 1953 ensure the corresponding files are available locally, before the command
1959 1954 uses them.
1960 1955
1961 1956 Args:
1962 1957 revmatches: a list of (revision, match) tuples to indicate the files to
1963 1958 fetch at each revision. If any of the match elements is None, it matches
1964 1959 all files.
1965 1960 """
1966 1961
1967 1962 def _matcher(m):
1968 1963 if m:
1969 1964 assert isinstance(m, matchmod.basematcher)
1970 1965 # The command itself will complain about files that don't exist, so
1971 1966 # don't duplicate the message.
1972 1967 return matchmod.badmatch(m, lambda fn, msg: None)
1973 1968 else:
1974 1969 return matchall(repo)
1975 1970
1976 1971 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1977 1972
1978 1973 fileprefetchhooks(repo, revbadmatches)
1979 1974
1980 1975
1981 1976 # a list of (repo, revs, match) prefetch functions
1982 1977 fileprefetchhooks = util.hooks()
1983 1978
1984 1979 # A marker that tells the evolve extension to suppress its own reporting
1985 1980 _reportstroubledchangesets = True
1986 1981
1987 1982
1988 1983 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1989 1984 """register a callback to issue a summary after the transaction is closed
1990 1985
1991 1986 If as_validator is true, then the callbacks are registered as transaction
1992 1987 validators instead
1993 1988 """
1994 1989
1995 1990 def txmatch(sources):
1996 1991 return any(txnname.startswith(source) for source in sources)
1997 1992
1998 1993 categories = []
1999 1994
2000 1995 def reportsummary(func):
2001 1996 """decorator for report callbacks."""
2002 1997 # The repoview life cycle is shorter than the one of the actual
2003 1998 # underlying repository. So the filtered object can die before the
2004 1999 # weakref is used leading to troubles. We keep a reference to the
2005 2000 # unfiltered object and restore the filtering when retrieving the
2006 2001 # repository through the weakref.
2007 2002 filtername = repo.filtername
2008 2003 reporef = weakref.ref(repo.unfiltered())
2009 2004
2010 2005 def wrapped(tr):
2011 2006 repo = reporef()
2012 2007 if filtername:
2013 2008 assert repo is not None # help pytype
2014 2009 repo = repo.filtered(filtername)
2015 2010 func(repo, tr)
2016 2011
2017 2012 newcat = b'%02i-txnreport' % len(categories)
2018 2013 if as_validator:
2019 2014 otr.addvalidator(newcat, wrapped)
2020 2015 else:
2021 2016 otr.addpostclose(newcat, wrapped)
2022 2017 categories.append(newcat)
2023 2018 return wrapped
2024 2019
2025 2020 @reportsummary
2026 2021 def reportchangegroup(repo, tr):
2027 2022 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2028 2023 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2029 2024 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2030 2025 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2031 2026 if cgchangesets or cgrevisions or cgfiles:
2032 2027 htext = b""
2033 2028 if cgheads:
2034 2029 htext = _(b" (%+d heads)") % cgheads
2035 2030 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2036 2031 if as_validator:
2037 2032 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2038 2033 assert repo is not None # help pytype
2039 2034 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2040 2035
2041 2036 if txmatch(_reportobsoletedsource):
2042 2037
2043 2038 @reportsummary
2044 2039 def reportobsoleted(repo, tr):
2045 2040 obsoleted = obsutil.getobsoleted(repo, tr)
2046 2041 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2047 2042 if newmarkers:
2048 2043 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2049 2044 if obsoleted:
2050 2045 msg = _(b'obsoleted %i changesets\n')
2051 2046 if as_validator:
2052 2047 msg = _(b'obsoleting %i changesets\n')
2053 2048 repo.ui.status(msg % len(obsoleted))
2054 2049
2055 2050 if obsolete.isenabled(
2056 2051 repo, obsolete.createmarkersopt
2057 2052 ) and repo.ui.configbool(
2058 2053 b'experimental', b'evolution.report-instabilities'
2059 2054 ):
2060 2055 instabilitytypes = [
2061 2056 (b'orphan', b'orphan'),
2062 2057 (b'phase-divergent', b'phasedivergent'),
2063 2058 (b'content-divergent', b'contentdivergent'),
2064 2059 ]
2065 2060
2066 2061 def getinstabilitycounts(repo):
2067 2062 filtered = repo.changelog.filteredrevs
2068 2063 counts = {}
2069 2064 for instability, revset in instabilitytypes:
2070 2065 counts[instability] = len(
2071 2066 set(obsolete.getrevs(repo, revset)) - filtered
2072 2067 )
2073 2068 return counts
2074 2069
2075 2070 oldinstabilitycounts = getinstabilitycounts(repo)
2076 2071
2077 2072 @reportsummary
2078 2073 def reportnewinstabilities(repo, tr):
2079 2074 newinstabilitycounts = getinstabilitycounts(repo)
2080 2075 for instability, revset in instabilitytypes:
2081 2076 delta = (
2082 2077 newinstabilitycounts[instability]
2083 2078 - oldinstabilitycounts[instability]
2084 2079 )
2085 2080 msg = getinstabilitymessage(delta, instability)
2086 2081 if msg:
2087 2082 repo.ui.warn(msg)
2088 2083
2089 2084 if txmatch(_reportnewcssource):
2090 2085
2091 2086 @reportsummary
2092 2087 def reportnewcs(repo, tr):
2093 2088 """Report the range of new revisions pulled/unbundled."""
2094 2089 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2095 2090 unfi = repo.unfiltered()
2096 2091 if origrepolen >= len(unfi):
2097 2092 return
2098 2093
2099 2094 # Compute the bounds of new visible revisions' range.
2100 2095 revs = smartset.spanset(repo, start=origrepolen)
2101 2096 if revs:
2102 2097 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2103 2098
2104 2099 if minrev == maxrev:
2105 2100 revrange = minrev
2106 2101 else:
2107 2102 revrange = b'%s:%s' % (minrev, maxrev)
2108 2103 draft = len(repo.revs(b'%ld and draft()', revs))
2109 2104 secret = len(repo.revs(b'%ld and secret()', revs))
2110 2105 if not (draft or secret):
2111 2106 msg = _(b'new changesets %s\n') % revrange
2112 2107 elif draft and secret:
2113 2108 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2114 2109 msg %= (revrange, draft, secret)
2115 2110 elif draft:
2116 2111 msg = _(b'new changesets %s (%d drafts)\n')
2117 2112 msg %= (revrange, draft)
2118 2113 elif secret:
2119 2114 msg = _(b'new changesets %s (%d secrets)\n')
2120 2115 msg %= (revrange, secret)
2121 2116 else:
2122 2117 errormsg = b'entered unreachable condition'
2123 2118 raise error.ProgrammingError(errormsg)
2124 2119 repo.ui.status(msg)
2125 2120
2126 2121 # search new changesets directly pulled as obsolete
2127 2122 duplicates = tr.changes.get(b'revduplicates', ())
2128 2123 obsadded = unfi.revs(
2129 2124 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2130 2125 )
2131 2126 cl = repo.changelog
2132 2127 extinctadded = [r for r in obsadded if r not in cl]
2133 2128 if extinctadded:
2134 2129 # They are not just obsolete, but obsolete and invisible
2135 2130 # we call them "extinct" internally but the terms have not been
2136 2131 # exposed to users.
2137 2132 msg = b'(%d other changesets obsolete on arrival)\n'
2138 2133 repo.ui.status(msg % len(extinctadded))
2139 2134
2140 2135 @reportsummary
2141 2136 def reportphasechanges(repo, tr):
2142 2137 """Report statistics of phase changes for changesets pre-existing
2143 2138 pull/unbundle.
2144 2139 """
2145 2140 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2146 2141 published = []
2147 2142 for revs, (old, new) in tr.changes.get(b'phases', []):
2148 2143 if new != phases.public:
2149 2144 continue
2150 2145 published.extend(rev for rev in revs if rev < origrepolen)
2151 2146 if not published:
2152 2147 return
2153 2148 msg = _(b'%d local changesets published\n')
2154 2149 if as_validator:
2155 2150 msg = _(b'%d local changesets will be published\n')
2156 2151 repo.ui.status(msg % len(published))
2157 2152
2158 2153
2159 2154 def getinstabilitymessage(delta, instability):
2160 2155 """function to return the message to show warning about new instabilities
2161 2156
2162 2157 exists as a separate function so that extension can wrap to show more
2163 2158 information like how to fix instabilities"""
2164 2159 if delta > 0:
2165 2160 return _(b'%i new %s changesets\n') % (delta, instability)
2166 2161
2167 2162
2168 2163 def nodesummaries(repo, nodes, maxnumnodes=4):
2169 2164 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2170 2165 return b' '.join(short(h) for h in nodes)
2171 2166 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2172 2167 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2173 2168
2174 2169
2175 2170 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2176 2171 """check that no named branch has multiple heads"""
2177 2172 if desc in (b'strip', b'repair'):
2178 2173 # skip the logic during strip
2179 2174 return
2180 2175 visible = repo.filtered(filtername)
2181 2176 # possible improvement: we could restrict the check to affected branch
2182 2177 bm = visible.branchmap()
2183 2178 for name in bm:
2184 2179 heads = bm.branchheads(name, closed=accountclosed)
2185 2180 if len(heads) > 1:
2186 2181 msg = _(b'rejecting multiple heads on branch "%s"')
2187 2182 msg %= name
2188 2183 hint = _(b'%d heads: %s')
2189 2184 hint %= (len(heads), nodesummaries(repo, heads))
2190 2185 raise error.Abort(msg, hint=hint)
2191 2186
2192 2187
2193 2188 def wrapconvertsink(sink):
2194 2189 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2195 2190 before it is used, whether or not the convert extension was formally loaded.
2196 2191 """
2197 2192 return sink
2198 2193
2199 2194
2200 2195 def unhidehashlikerevs(repo, specs, hiddentype):
2201 2196 """parse the user specs and unhide changesets whose hash or revision number
2202 2197 is passed.
2203 2198
2204 2199 hiddentype can be: 1) 'warn': warn while unhiding changesets
2205 2200 2) 'nowarn': don't warn while unhiding changesets
2206 2201
2207 2202 returns a repo object with the required changesets unhidden
2208 2203 """
2209 2204 if not repo.filtername or not repo.ui.configbool(
2210 2205 b'experimental', b'directaccess'
2211 2206 ):
2212 2207 return repo
2213 2208
2214 2209 if repo.filtername not in (b'visible', b'visible-hidden'):
2215 2210 return repo
2216 2211
2217 2212 symbols = set()
2218 2213 for spec in specs:
2219 2214 try:
2220 2215 tree = revsetlang.parse(spec)
2221 2216 except error.ParseError: # will be reported by scmutil.revrange()
2222 2217 continue
2223 2218
2224 2219 symbols.update(revsetlang.gethashlikesymbols(tree))
2225 2220
2226 2221 if not symbols:
2227 2222 return repo
2228 2223
2229 2224 revs = _getrevsfromsymbols(repo, symbols)
2230 2225
2231 2226 if not revs:
2232 2227 return repo
2233 2228
2234 2229 if hiddentype == b'warn':
2235 2230 unfi = repo.unfiltered()
2236 2231 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2237 2232 repo.ui.warn(
2238 2233 _(
2239 2234 b"warning: accessing hidden changesets for write "
2240 2235 b"operation: %s\n"
2241 2236 )
2242 2237 % revstr
2243 2238 )
2244 2239
2245 2240 # we have to use new filtername to separate branch/tags cache until we can
2246 2241 # disbale these cache when revisions are dynamically pinned.
2247 2242 return repo.filtered(b'visible-hidden', revs)
2248 2243
2249 2244
2250 2245 def _getrevsfromsymbols(repo, symbols):
2251 2246 """parse the list of symbols and returns a set of revision numbers of hidden
2252 2247 changesets present in symbols"""
2253 2248 revs = set()
2254 2249 unfi = repo.unfiltered()
2255 2250 unficl = unfi.changelog
2256 2251 cl = repo.changelog
2257 2252 tiprev = len(unficl)
2258 2253 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2259 2254 for s in symbols:
2260 2255 try:
2261 2256 n = int(s)
2262 2257 if n <= tiprev:
2263 2258 if not allowrevnums:
2264 2259 continue
2265 2260 else:
2266 2261 if n not in cl:
2267 2262 revs.add(n)
2268 2263 continue
2269 2264 except ValueError:
2270 2265 pass
2271 2266
2272 2267 try:
2273 2268 s = resolvehexnodeidprefix(unfi, s)
2274 2269 except (error.LookupError, error.WdirUnsupported):
2275 2270 s = None
2276 2271
2277 2272 if s is not None:
2278 2273 rev = unficl.rev(s)
2279 2274 if rev not in cl:
2280 2275 revs.add(rev)
2281 2276
2282 2277 return revs
2283 2278
2284 2279
2285 2280 def bookmarkrevs(repo, mark):
2286 2281 """Select revisions reachable by a given bookmark
2287 2282
2288 2283 If the bookmarked revision isn't a head, an empty set will be returned.
2289 2284 """
2290 2285 return repo.revs(format_bookmark_revspec(mark))
2291 2286
2292 2287
2293 2288 def format_bookmark_revspec(mark):
2294 2289 """Build a revset expression to select revisions reachable by a given
2295 2290 bookmark"""
2296 2291 mark = b'literal:' + mark
2297 2292 return revsetlang.formatspec(
2298 2293 b"ancestors(bookmark(%s)) - "
2299 2294 b"ancestors(head() and not bookmark(%s)) - "
2300 2295 b"ancestors(bookmark() and not bookmark(%s))",
2301 2296 mark,
2302 2297 mark,
2303 2298 mark,
2304 2299 )
General Comments 0
You need to be logged in to leave comments. Login now