##// END OF EJS Templates
errors: create "similarity hint" for UnknownIdentifier eagerly in constructor...
Martin von Zweigbergk -
r46495:1817b668 default
parent child Browse files
Show More
@@ -1,540 +1,543 b''
1 1 # error.py - Mercurial exceptions
2 2 #
3 3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Mercurial exceptions.
9 9
10 10 This allows us to catch exceptions at higher levels without forcing
11 11 imports.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import difflib
17 17
18 18 # Do not import anything but pycompat here, please
19 19 from . import pycompat
20 20
21 21
22 22 def _tobytes(exc):
23 23 """Byte-stringify exception in the same way as BaseException_str()"""
24 24 if not exc.args:
25 25 return b''
26 26 if len(exc.args) == 1:
27 27 return pycompat.bytestr(exc.args[0])
28 28 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
29 29
30 30
31 31 class Hint(object):
32 32 """Mix-in to provide a hint of an error
33 33
34 34 This should come first in the inheritance list to consume a hint and
35 35 pass remaining arguments to the exception class.
36 36 """
37 37
38 38 def __init__(self, *args, **kw):
39 39 self.hint = kw.pop('hint', None)
40 40 super(Hint, self).__init__(*args, **kw)
41 41
42 42
43 43 class StorageError(Hint, Exception):
44 44 """Raised when an error occurs in a storage layer.
45 45
46 46 Usually subclassed by a storage-specific exception.
47 47 """
48 48
49 49 __bytes__ = _tobytes
50 50
51 51
52 52 class RevlogError(StorageError):
53 53 pass
54 54
55 55
56 56 class SidedataHashError(RevlogError):
57 57 def __init__(self, key, expected, got):
58 58 self.sidedatakey = key
59 59 self.expecteddigest = expected
60 60 self.actualdigest = got
61 61
62 62
63 63 class FilteredIndexError(IndexError):
64 64 __bytes__ = _tobytes
65 65
66 66
67 67 class LookupError(RevlogError, KeyError):
68 68 def __init__(self, name, index, message):
69 69 self.name = name
70 70 self.index = index
71 71 # this can't be called 'message' because at least some installs of
72 72 # Python 2.6+ complain about the 'message' property being deprecated
73 73 self.lookupmessage = message
74 74 if isinstance(name, bytes) and len(name) == 20:
75 75 from .node import short
76 76
77 77 name = short(name)
78 78 # if name is a binary node, it can be None
79 79 RevlogError.__init__(
80 80 self, b'%s@%s: %s' % (index, pycompat.bytestr(name), message)
81 81 )
82 82
83 83 def __bytes__(self):
84 84 return RevlogError.__bytes__(self)
85 85
86 86 def __str__(self):
87 87 return RevlogError.__str__(self)
88 88
89 89
90 90 class AmbiguousPrefixLookupError(LookupError):
91 91 pass
92 92
93 93
94 94 class FilteredLookupError(LookupError):
95 95 pass
96 96
97 97
98 98 class ManifestLookupError(LookupError):
99 99 pass
100 100
101 101
102 102 class CommandError(Exception):
103 103 """Exception raised on errors in parsing the command line."""
104 104
105 105 def __init__(self, command, message):
106 106 self.command = command
107 107 self.message = message
108 108 super(CommandError, self).__init__()
109 109
110 110 __bytes__ = _tobytes
111 111
112 112
113 113 class UnknownCommand(Exception):
114 114 """Exception raised if command is not in the command table."""
115 115
116 116 def __init__(self, command, all_commands=None):
117 117 self.command = command
118 118 self.all_commands = all_commands
119 119 super(UnknownCommand, self).__init__()
120 120
121 121 __bytes__ = _tobytes
122 122
123 123
124 124 class AmbiguousCommand(Exception):
125 125 """Exception raised if command shortcut matches more than one command."""
126 126
127 127 def __init__(self, prefix, matches):
128 128 self.prefix = prefix
129 129 self.matches = matches
130 130 super(AmbiguousCommand, self).__init__()
131 131
132 132 __bytes__ = _tobytes
133 133
134 134
135 135 class WorkerError(Exception):
136 136 """Exception raised when a worker process dies."""
137 137
138 138 def __init__(self, status_code):
139 139 self.status_code = status_code
140 140
141 141
142 142 class InterventionRequired(Hint, Exception):
143 143 """Exception raised when a command requires human intervention."""
144 144
145 145 __bytes__ = _tobytes
146 146
147 147
148 148 class ConflictResolutionRequired(InterventionRequired):
149 149 """Exception raised when a continuable command required merge conflict resolution."""
150 150
151 151 def __init__(self, opname):
152 152 from .i18n import _
153 153
154 154 self.opname = opname
155 155 InterventionRequired.__init__(
156 156 self,
157 157 _(
158 158 b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
159 159 )
160 160 % opname,
161 161 )
162 162
163 163
164 164 class Abort(Hint, Exception):
165 165 """Raised if a command needs to print an error and exit."""
166 166
167 167 def __init__(self, message, hint=None):
168 168 self.message = message
169 169 self.hint = hint
170 170 # Pass the message into the Exception constructor to help extensions
171 171 # that look for exc.args[0].
172 172 Exception.__init__(self, message)
173 173
174 174 def __bytes__(self):
175 175 return self.message
176 176
177 177 if pycompat.ispy3:
178 178
179 179 def __str__(self):
180 180 # the output would be unreadable if the message was translated,
181 181 # but do not replace it with encoding.strfromlocal(), which
182 182 # may raise another exception.
183 183 return pycompat.sysstr(self.__bytes__())
184 184
185 185
186 186 class InputError(Abort):
187 187 """Indicates that the user made an error in their input.
188 188
189 189 Examples: Invalid command, invalid flags, invalid revision.
190 190 """
191 191
192 192
193 193 class StateError(Abort):
194 194 """Indicates that the operation might work if retried in a different state.
195 195
196 196 Examples: Unresolved merge conflicts, unfinished operations.
197 197 """
198 198
199 199
200 200 class CanceledError(Abort):
201 201 """Indicates that the user canceled the operation.
202 202
203 203 Examples: Close commit editor with error status, quit chistedit.
204 204 """
205 205
206 206
207 207 class HookLoadError(Abort):
208 208 """raised when loading a hook fails, aborting an operation
209 209
210 210 Exists to allow more specialized catching."""
211 211
212 212
213 213 class HookAbort(Abort):
214 214 """raised when a validation hook fails, aborting an operation
215 215
216 216 Exists to allow more specialized catching."""
217 217
218 218
219 219 class ConfigError(Abort):
220 220 """Exception raised when parsing config files"""
221 221
222 222
223 223 class UpdateAbort(Abort):
224 224 """Raised when an update is aborted for destination issue"""
225 225
226 226
227 227 class MergeDestAbort(Abort):
228 228 """Raised when an update is aborted for destination issues"""
229 229
230 230
231 231 class NoMergeDestAbort(MergeDestAbort):
232 232 """Raised when an update is aborted because there is nothing to merge"""
233 233
234 234
235 235 class ManyMergeDestAbort(MergeDestAbort):
236 236 """Raised when an update is aborted because destination is ambiguous"""
237 237
238 238
239 239 class ResponseExpected(Abort):
240 240 """Raised when an EOF is received for a prompt"""
241 241
242 242 def __init__(self):
243 243 from .i18n import _
244 244
245 245 Abort.__init__(self, _(b'response expected'))
246 246
247 247
248 248 class OutOfBandError(Hint, Exception):
249 249 """Exception raised when a remote repo reports failure"""
250 250
251 251 __bytes__ = _tobytes
252 252
253 253
254 254 class ParseError(Hint, Exception):
255 255 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
256 256
257 257 def __init__(self, message, location=None, hint=None):
258 258 self.message = message
259 259 self.location = location
260 260 self.hint = hint
261 261 # Pass the message and possibly location into the Exception constructor
262 262 # to help code that looks for exc.args.
263 263 if location is not None:
264 264 Exception.__init__(self, message, location)
265 265 else:
266 266 Exception.__init__(self, message)
267 267
268 268 __bytes__ = _tobytes
269 269
270 270
271 271 class PatchError(Exception):
272 272 __bytes__ = _tobytes
273 273
274 274
275 275 def getsimilar(symbols, value):
276 276 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
277 277 # The cutoff for similarity here is pretty arbitrary. It should
278 278 # probably be investigated and tweaked.
279 279 return [s for s in symbols if sim(s) > 0.6]
280 280
281 281
282 282 def similarity_hint(similar):
283 283 from .i18n import _
284 284
285 285 if len(similar) == 1:
286 286 return _(b"did you mean %s?") % similar[0]
287 287 elif similar:
288 288 ss = b", ".join(sorted(similar))
289 289 return _(b"did you mean one of %s?") % ss
290 290 else:
291 291 return None
292 292
293 293
294 294 class UnknownIdentifier(ParseError):
295 295 """Exception raised when a {rev,file}set references an unknown identifier"""
296 296
297 297 def __init__(self, function, symbols):
298 298 from .i18n import _
299 299
300 ParseError.__init__(self, _(b"unknown identifier: %s") % function)
301 self.function = function
302 self.symbols = symbols
300 similar = getsimilar(symbols, function)
301 hint = similarity_hint(similar)
302
303 ParseError.__init__(
304 self, _(b"unknown identifier: %s") % function, hint=hint
305 )
303 306
304 307
305 308 class RepoError(Hint, Exception):
306 309 __bytes__ = _tobytes
307 310
308 311
309 312 class RepoLookupError(RepoError):
310 313 pass
311 314
312 315
313 316 class FilteredRepoLookupError(RepoLookupError):
314 317 pass
315 318
316 319
317 320 class CapabilityError(RepoError):
318 321 pass
319 322
320 323
321 324 class RequirementError(RepoError):
322 325 """Exception raised if .hg/requires has an unknown entry."""
323 326
324 327
325 328 class StdioError(IOError):
326 329 """Raised if I/O to stdout or stderr fails"""
327 330
328 331 def __init__(self, err):
329 332 IOError.__init__(self, err.errno, err.strerror)
330 333
331 334 # no __bytes__() because error message is derived from the standard IOError
332 335
333 336
334 337 class UnsupportedMergeRecords(Abort):
335 338 def __init__(self, recordtypes):
336 339 from .i18n import _
337 340
338 341 self.recordtypes = sorted(recordtypes)
339 342 s = b' '.join(self.recordtypes)
340 343 Abort.__init__(
341 344 self,
342 345 _(b'unsupported merge state records: %s') % s,
343 346 hint=_(
344 347 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
345 348 b'more information'
346 349 ),
347 350 )
348 351
349 352
350 353 class UnknownVersion(Abort):
351 354 """generic exception for aborting from an encounter with an unknown version
352 355 """
353 356
354 357 def __init__(self, msg, hint=None, version=None):
355 358 self.version = version
356 359 super(UnknownVersion, self).__init__(msg, hint=hint)
357 360
358 361
359 362 class LockError(IOError):
360 363 def __init__(self, errno, strerror, filename, desc):
361 364 IOError.__init__(self, errno, strerror, filename)
362 365 self.desc = desc
363 366
364 367 # no __bytes__() because error message is derived from the standard IOError
365 368
366 369
367 370 class LockHeld(LockError):
368 371 def __init__(self, errno, filename, desc, locker):
369 372 LockError.__init__(self, errno, b'Lock held', filename, desc)
370 373 self.locker = locker
371 374
372 375
373 376 class LockUnavailable(LockError):
374 377 pass
375 378
376 379
377 380 # LockError is for errors while acquiring the lock -- this is unrelated
378 381 class LockInheritanceContractViolation(RuntimeError):
379 382 __bytes__ = _tobytes
380 383
381 384
382 385 class ResponseError(Exception):
383 386 """Raised to print an error with part of output and exit."""
384 387
385 388 __bytes__ = _tobytes
386 389
387 390
388 391 # derived from KeyboardInterrupt to simplify some breakout code
389 392 class SignalInterrupt(KeyboardInterrupt):
390 393 """Exception raised on SIGTERM and SIGHUP."""
391 394
392 395
393 396 class SignatureError(Exception):
394 397 __bytes__ = _tobytes
395 398
396 399
397 400 class PushRaced(RuntimeError):
398 401 """An exception raised during unbundling that indicate a push race"""
399 402
400 403 __bytes__ = _tobytes
401 404
402 405
403 406 class ProgrammingError(Hint, RuntimeError):
404 407 """Raised if a mercurial (core or extension) developer made a mistake"""
405 408
406 409 def __init__(self, msg, *args, **kwargs):
407 410 # On Python 3, turn the message back into a string since this is
408 411 # an internal-only error that won't be printed except in a
409 412 # stack traces.
410 413 msg = pycompat.sysstr(msg)
411 414 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
412 415
413 416 __bytes__ = _tobytes
414 417
415 418
416 419 class WdirUnsupported(Exception):
417 420 """An exception which is raised when 'wdir()' is not supported"""
418 421
419 422 __bytes__ = _tobytes
420 423
421 424
422 425 # bundle2 related errors
423 426 class BundleValueError(ValueError):
424 427 """error raised when bundle2 cannot be processed"""
425 428
426 429 __bytes__ = _tobytes
427 430
428 431
429 432 class BundleUnknownFeatureError(BundleValueError):
430 433 def __init__(self, parttype=None, params=(), values=()):
431 434 self.parttype = parttype
432 435 self.params = params
433 436 self.values = values
434 437 if self.parttype is None:
435 438 msg = b'Stream Parameter'
436 439 else:
437 440 msg = parttype
438 441 entries = self.params
439 442 if self.params and self.values:
440 443 assert len(self.params) == len(self.values)
441 444 entries = []
442 445 for idx, par in enumerate(self.params):
443 446 val = self.values[idx]
444 447 if val is None:
445 448 entries.append(val)
446 449 else:
447 450 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
448 451 if entries:
449 452 msg = b'%s - %s' % (msg, b', '.join(entries))
450 453 ValueError.__init__(self, msg)
451 454
452 455
453 456 class ReadOnlyPartError(RuntimeError):
454 457 """error raised when code tries to alter a part being generated"""
455 458
456 459 __bytes__ = _tobytes
457 460
458 461
459 462 class PushkeyFailed(Abort):
460 463 """error raised when a pushkey part failed to update a value"""
461 464
462 465 def __init__(
463 466 self, partid, namespace=None, key=None, new=None, old=None, ret=None
464 467 ):
465 468 self.partid = partid
466 469 self.namespace = namespace
467 470 self.key = key
468 471 self.new = new
469 472 self.old = old
470 473 self.ret = ret
471 474 # no i18n expected to be processed into a better message
472 475 Abort.__init__(
473 476 self, b'failed to update value for "%s/%s"' % (namespace, key)
474 477 )
475 478
476 479
477 480 class CensoredNodeError(StorageError):
478 481 """error raised when content verification fails on a censored node
479 482
480 483 Also contains the tombstone data substituted for the uncensored data.
481 484 """
482 485
483 486 def __init__(self, filename, node, tombstone):
484 487 from .node import short
485 488
486 489 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
487 490 self.tombstone = tombstone
488 491
489 492
490 493 class CensoredBaseError(StorageError):
491 494 """error raised when a delta is rejected because its base is censored
492 495
493 496 A delta based on a censored revision must be formed as single patch
494 497 operation which replaces the entire base with new content. This ensures
495 498 the delta may be applied by clones which have not censored the base.
496 499 """
497 500
498 501
499 502 class InvalidBundleSpecification(Exception):
500 503 """error raised when a bundle specification is invalid.
501 504
502 505 This is used for syntax errors as opposed to support errors.
503 506 """
504 507
505 508 __bytes__ = _tobytes
506 509
507 510
508 511 class UnsupportedBundleSpecification(Exception):
509 512 """error raised when a bundle specification is not supported."""
510 513
511 514 __bytes__ = _tobytes
512 515
513 516
514 517 class CorruptedState(Exception):
515 518 """error raised when a command is not able to read its state from file"""
516 519
517 520 __bytes__ = _tobytes
518 521
519 522
520 523 class PeerTransportError(Abort):
521 524 """Transport-level I/O error when communicating with a peer repo."""
522 525
523 526
524 527 class InMemoryMergeConflictsError(Exception):
525 528 """Exception raised when merge conflicts arose during an in-memory merge."""
526 529
527 530 __bytes__ = _tobytes
528 531
529 532
530 533 class WireprotoCommandError(Exception):
531 534 """Represents an error during execution of a wire protocol command.
532 535
533 536 Should only be thrown by wire protocol version 2 commands.
534 537
535 538 The error is a formatter string and an optional iterable of arguments.
536 539 """
537 540
538 541 def __init__(self, message, args=None):
539 542 self.message = message
540 543 self.messageargs = args
@@ -1,2330 +1,2324 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28 from .pycompat import getattr
29 29 from .thirdparty import attr
30 30 from . import (
31 31 copies as copiesmod,
32 32 encoding,
33 33 error,
34 34 match as matchmod,
35 35 obsolete,
36 36 obsutil,
37 37 pathutil,
38 38 phases,
39 39 policy,
40 40 pycompat,
41 41 requirements as requirementsmod,
42 42 revsetlang,
43 43 similar,
44 44 smartset,
45 45 url,
46 46 util,
47 47 vfs,
48 48 )
49 49
50 50 from .utils import (
51 51 hashutil,
52 52 procutil,
53 53 stringutil,
54 54 )
55 55
56 56 if pycompat.iswindows:
57 57 from . import scmwindows as scmplatform
58 58 else:
59 59 from . import scmposix as scmplatform
60 60
61 61 parsers = policy.importmod('parsers')
62 62 rustrevlog = policy.importrust('revlog')
63 63
64 64 termsize = scmplatform.termsize
65 65
66 66
67 67 @attr.s(slots=True, repr=False)
68 68 class status(object):
69 69 '''Struct with a list of files per status.
70 70
71 71 The 'deleted', 'unknown' and 'ignored' properties are only
72 72 relevant to the working copy.
73 73 '''
74 74
75 75 modified = attr.ib(default=attr.Factory(list))
76 76 added = attr.ib(default=attr.Factory(list))
77 77 removed = attr.ib(default=attr.Factory(list))
78 78 deleted = attr.ib(default=attr.Factory(list))
79 79 unknown = attr.ib(default=attr.Factory(list))
80 80 ignored = attr.ib(default=attr.Factory(list))
81 81 clean = attr.ib(default=attr.Factory(list))
82 82
83 83 def __iter__(self):
84 84 yield self.modified
85 85 yield self.added
86 86 yield self.removed
87 87 yield self.deleted
88 88 yield self.unknown
89 89 yield self.ignored
90 90 yield self.clean
91 91
92 92 def __repr__(self):
93 93 return (
94 94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
95 95 r'unknown=%s, ignored=%s, clean=%s>'
96 96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
97 97
98 98
99 99 def itersubrepos(ctx1, ctx2):
100 100 """find subrepos in ctx1 or ctx2"""
101 101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106 106
107 107 missing = set()
108 108
109 109 for subpath in ctx2.substate:
110 110 if subpath not in ctx1.substate:
111 111 del subpaths[subpath]
112 112 missing.add(subpath)
113 113
114 114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
115 115 yield subpath, ctx.sub(subpath)
116 116
117 117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 118 # status and diff will have an accurate result when it does
119 119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 120 # against itself.
121 121 for subpath in missing:
122 122 yield subpath, ctx2.nullsub(subpath, ctx1)
123 123
124 124
125 125 def nochangesfound(ui, repo, excluded=None):
126 126 '''Report no changes for push/pull, excluded is None or a list of
127 127 nodes excluded from the push/pull.
128 128 '''
129 129 secretlist = []
130 130 if excluded:
131 131 for n in excluded:
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(
138 138 _(b"no changes found (ignored %d secret changesets)\n")
139 139 % len(secretlist)
140 140 )
141 141 else:
142 142 ui.status(_(b"no changes found\n"))
143 143
144 144
145 145 def formatparse(write, inst):
146 146 if inst.location is not None:
147 147 write(
148 148 _(b"hg: parse error at %s: %s\n")
149 149 % (pycompat.bytestr(inst.location), inst.message)
150 150 )
151 151 else:
152 152 write(_(b"hg: parse error: %s\n") % inst.message)
153 if isinstance(inst, error.UnknownIdentifier):
154 # make sure to check fileset first, as revset can invoke fileset
155 similar = error.getsimilar(inst.symbols, inst.function)
156 hint = error.similarity_hint(similar)
157 if hint:
158 write(b"(%s)\n" % hint)
159 elif inst.hint:
153 if inst.hint:
160 154 write(_(b"(%s)\n") % inst.hint)
161 155
162 156
163 157 def callcatch(ui, func):
164 158 """call func() with global exception handling
165 159
166 160 return func() if no exception happens. otherwise do some error handling
167 161 and return an exit code accordingly. does not handle all exceptions.
168 162 """
169 163 coarse_exit_code = -1
170 164 detailed_exit_code = -1
171 165 try:
172 166 try:
173 167 return func()
174 168 except: # re-raises
175 169 ui.traceback()
176 170 raise
177 171 # Global exception handling, alphabetically
178 172 # Mercurial-specific first, followed by built-in and library exceptions
179 173 except error.LockHeld as inst:
180 174 detailed_exit_code = 20
181 175 if inst.errno == errno.ETIMEDOUT:
182 176 reason = _(b'timed out waiting for lock held by %r') % (
183 177 pycompat.bytestr(inst.locker)
184 178 )
185 179 else:
186 180 reason = _(b'lock held by %r') % inst.locker
187 181 ui.error(
188 182 _(b"abort: %s: %s\n")
189 183 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
190 184 )
191 185 if not inst.locker:
192 186 ui.error(_(b"(lock might be very busy)\n"))
193 187 except error.LockUnavailable as inst:
194 188 detailed_exit_code = 20
195 189 ui.error(
196 190 _(b"abort: could not lock %s: %s\n")
197 191 % (
198 192 inst.desc or stringutil.forcebytestr(inst.filename),
199 193 encoding.strtolocal(inst.strerror),
200 194 )
201 195 )
202 196 except error.OutOfBandError as inst:
203 197 detailed_exit_code = 100
204 198 if inst.args:
205 199 msg = _(b"abort: remote error:\n")
206 200 else:
207 201 msg = _(b"abort: remote error\n")
208 202 ui.error(msg)
209 203 if inst.args:
210 204 ui.error(b''.join(inst.args))
211 205 if inst.hint:
212 206 ui.error(b'(%s)\n' % inst.hint)
213 207 except error.RepoError as inst:
214 208 ui.error(_(b"abort: %s!\n") % inst)
215 209 if inst.hint:
216 210 ui.error(_(b"(%s)\n") % inst.hint)
217 211 except error.ResponseError as inst:
218 212 ui.error(_(b"abort: %s") % inst.args[0])
219 213 msg = inst.args[1]
220 214 if isinstance(msg, type(u'')):
221 215 msg = pycompat.sysbytes(msg)
222 216 if not isinstance(msg, bytes):
223 217 ui.error(b" %r\n" % (msg,))
224 218 elif not msg:
225 219 ui.error(_(b" empty string\n"))
226 220 else:
227 221 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
228 222 except error.CensoredNodeError as inst:
229 223 ui.error(_(b"abort: file censored %s!\n") % inst)
230 224 except error.StorageError as inst:
231 225 ui.error(_(b"abort: %s!\n") % inst)
232 226 if inst.hint:
233 227 ui.error(_(b"(%s)\n") % inst.hint)
234 228 except error.InterventionRequired as inst:
235 229 ui.error(b"%s\n" % inst)
236 230 if inst.hint:
237 231 ui.error(_(b"(%s)\n") % inst.hint)
238 232 detailed_exit_code = 240
239 233 coarse_exit_code = 1
240 234 except error.WdirUnsupported:
241 235 ui.error(_(b"abort: working directory revision cannot be specified\n"))
242 236 except error.Abort as inst:
243 237 if isinstance(inst, error.InputError):
244 238 detailed_exit_code = 10
245 239 elif isinstance(inst, error.StateError):
246 240 detailed_exit_code = 20
247 241 elif isinstance(inst, error.ConfigError):
248 242 detailed_exit_code = 30
249 243 elif isinstance(inst, error.CanceledError):
250 244 detailed_exit_code = 250
251 245 ui.error(_(b"abort: %s\n") % inst.message)
252 246 if inst.hint:
253 247 ui.error(_(b"(%s)\n") % inst.hint)
254 248 except error.WorkerError as inst:
255 249 # Don't print a message -- the worker already should have
256 250 return inst.status_code
257 251 except ImportError as inst:
258 252 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
259 253 m = stringutil.forcebytestr(inst).split()[-1]
260 254 if m in b"mpatch bdiff".split():
261 255 ui.error(_(b"(did you forget to compile extensions?)\n"))
262 256 elif m in b"zlib".split():
263 257 ui.error(_(b"(is your Python install correct?)\n"))
264 258 except util.urlerr.httperror as inst:
265 259 detailed_exit_code = 100
266 260 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
267 261 except util.urlerr.urlerror as inst:
268 262 detailed_exit_code = 100
269 263 try: # usually it is in the form (errno, strerror)
270 264 reason = inst.reason.args[1]
271 265 except (AttributeError, IndexError):
272 266 # it might be anything, for example a string
273 267 reason = inst.reason
274 268 if isinstance(reason, pycompat.unicode):
275 269 # SSLError of Python 2.7.9 contains a unicode
276 270 reason = encoding.unitolocal(reason)
277 271 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
278 272 except (IOError, OSError) as inst:
279 273 if (
280 274 util.safehasattr(inst, b"args")
281 275 and inst.args
282 276 and inst.args[0] == errno.EPIPE
283 277 ):
284 278 pass
285 279 elif getattr(inst, "strerror", None): # common IOError or OSError
286 280 if getattr(inst, "filename", None) is not None:
287 281 ui.error(
288 282 _(b"abort: %s: '%s'\n")
289 283 % (
290 284 encoding.strtolocal(inst.strerror),
291 285 stringutil.forcebytestr(inst.filename),
292 286 )
293 287 )
294 288 else:
295 289 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
296 290 else: # suspicious IOError
297 291 raise
298 292 except MemoryError:
299 293 ui.error(_(b"abort: out of memory\n"))
300 294 except SystemExit as inst:
301 295 # Commands shouldn't sys.exit directly, but give a return code.
302 296 # Just in case catch this and and pass exit code to caller.
303 297 detailed_exit_code = 254
304 298 coarse_exit_code = inst.code
305 299
306 300 if ui.configbool(b'ui', b'detailed-exit-code'):
307 301 return detailed_exit_code
308 302 else:
309 303 return coarse_exit_code
310 304
311 305
312 306 def checknewlabel(repo, lbl, kind):
313 307 # Do not use the "kind" parameter in ui output.
314 308 # It makes strings difficult to translate.
315 309 if lbl in [b'tip', b'.', b'null']:
316 310 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
317 311 for c in (b':', b'\0', b'\n', b'\r'):
318 312 if c in lbl:
319 313 raise error.InputError(
320 314 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
321 315 )
322 316 try:
323 317 int(lbl)
324 318 raise error.InputError(_(b"cannot use an integer as a name"))
325 319 except ValueError:
326 320 pass
327 321 if lbl.strip() != lbl:
328 322 raise error.InputError(
329 323 _(b"leading or trailing whitespace in name %r") % lbl
330 324 )
331 325
332 326
333 327 def checkfilename(f):
334 328 '''Check that the filename f is an acceptable filename for a tracked file'''
335 329 if b'\r' in f or b'\n' in f:
336 330 raise error.InputError(
337 331 _(b"'\\n' and '\\r' disallowed in filenames: %r")
338 332 % pycompat.bytestr(f)
339 333 )
340 334
341 335
342 336 def checkportable(ui, f):
343 337 '''Check if filename f is portable and warn or abort depending on config'''
344 338 checkfilename(f)
345 339 abort, warn = checkportabilityalert(ui)
346 340 if abort or warn:
347 341 msg = util.checkwinfilename(f)
348 342 if msg:
349 343 msg = b"%s: %s" % (msg, procutil.shellquote(f))
350 344 if abort:
351 345 raise error.InputError(msg)
352 346 ui.warn(_(b"warning: %s\n") % msg)
353 347
354 348
355 349 def checkportabilityalert(ui):
356 350 '''check if the user's config requests nothing, a warning, or abort for
357 351 non-portable filenames'''
358 352 val = ui.config(b'ui', b'portablefilenames')
359 353 lval = val.lower()
360 354 bval = stringutil.parsebool(val)
361 355 abort = pycompat.iswindows or lval == b'abort'
362 356 warn = bval or lval == b'warn'
363 357 if bval is None and not (warn or abort or lval == b'ignore'):
364 358 raise error.ConfigError(
365 359 _(b"ui.portablefilenames value is invalid ('%s')") % val
366 360 )
367 361 return abort, warn
368 362
369 363
370 364 class casecollisionauditor(object):
371 365 def __init__(self, ui, abort, dirstate):
372 366 self._ui = ui
373 367 self._abort = abort
374 368 allfiles = b'\0'.join(dirstate)
375 369 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
376 370 self._dirstate = dirstate
377 371 # The purpose of _newfiles is so that we don't complain about
378 372 # case collisions if someone were to call this object with the
379 373 # same filename twice.
380 374 self._newfiles = set()
381 375
382 376 def __call__(self, f):
383 377 if f in self._newfiles:
384 378 return
385 379 fl = encoding.lower(f)
386 380 if fl in self._loweredfiles and f not in self._dirstate:
387 381 msg = _(b'possible case-folding collision for %s') % f
388 382 if self._abort:
389 383 raise error.Abort(msg)
390 384 self._ui.warn(_(b"warning: %s\n") % msg)
391 385 self._loweredfiles.add(fl)
392 386 self._newfiles.add(f)
393 387
394 388
395 389 def filteredhash(repo, maxrev):
396 390 """build hash of filtered revisions in the current repoview.
397 391
398 392 Multiple caches perform up-to-date validation by checking that the
399 393 tiprev and tipnode stored in the cache file match the current repository.
400 394 However, this is not sufficient for validating repoviews because the set
401 395 of revisions in the view may change without the repository tiprev and
402 396 tipnode changing.
403 397
404 398 This function hashes all the revs filtered from the view and returns
405 399 that SHA-1 digest.
406 400 """
407 401 cl = repo.changelog
408 402 if not cl.filteredrevs:
409 403 return None
410 404 key = cl._filteredrevs_hashcache.get(maxrev)
411 405 if not key:
412 406 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
413 407 if revs:
414 408 s = hashutil.sha1()
415 409 for rev in revs:
416 410 s.update(b'%d;' % rev)
417 411 key = s.digest()
418 412 cl._filteredrevs_hashcache[maxrev] = key
419 413 return key
420 414
421 415
422 416 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
423 417 '''yield every hg repository under path, always recursively.
424 418 The recurse flag will only control recursion into repo working dirs'''
425 419
426 420 def errhandler(err):
427 421 if err.filename == path:
428 422 raise err
429 423
430 424 samestat = getattr(os.path, 'samestat', None)
431 425 if followsym and samestat is not None:
432 426
433 427 def adddir(dirlst, dirname):
434 428 dirstat = os.stat(dirname)
435 429 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
436 430 if not match:
437 431 dirlst.append(dirstat)
438 432 return not match
439 433
440 434 else:
441 435 followsym = False
442 436
443 437 if (seen_dirs is None) and followsym:
444 438 seen_dirs = []
445 439 adddir(seen_dirs, path)
446 440 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
447 441 dirs.sort()
448 442 if b'.hg' in dirs:
449 443 yield root # found a repository
450 444 qroot = os.path.join(root, b'.hg', b'patches')
451 445 if os.path.isdir(os.path.join(qroot, b'.hg')):
452 446 yield qroot # we have a patch queue repo here
453 447 if recurse:
454 448 # avoid recursing inside the .hg directory
455 449 dirs.remove(b'.hg')
456 450 else:
457 451 dirs[:] = [] # don't descend further
458 452 elif followsym:
459 453 newdirs = []
460 454 for d in dirs:
461 455 fname = os.path.join(root, d)
462 456 if adddir(seen_dirs, fname):
463 457 if os.path.islink(fname):
464 458 for hgname in walkrepos(fname, True, seen_dirs):
465 459 yield hgname
466 460 else:
467 461 newdirs.append(d)
468 462 dirs[:] = newdirs
469 463
470 464
471 465 def binnode(ctx):
472 466 """Return binary node id for a given basectx"""
473 467 node = ctx.node()
474 468 if node is None:
475 469 return wdirid
476 470 return node
477 471
478 472
479 473 def intrev(ctx):
480 474 """Return integer for a given basectx that can be used in comparison or
481 475 arithmetic operation"""
482 476 rev = ctx.rev()
483 477 if rev is None:
484 478 return wdirrev
485 479 return rev
486 480
487 481
488 482 def formatchangeid(ctx):
489 483 """Format changectx as '{rev}:{node|formatnode}', which is the default
490 484 template provided by logcmdutil.changesettemplater"""
491 485 repo = ctx.repo()
492 486 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
493 487
494 488
495 489 def formatrevnode(ui, rev, node):
496 490 """Format given revision and node depending on the current verbosity"""
497 491 if ui.debugflag:
498 492 hexfunc = hex
499 493 else:
500 494 hexfunc = short
501 495 return b'%d:%s' % (rev, hexfunc(node))
502 496
503 497
504 498 def resolvehexnodeidprefix(repo, prefix):
505 499 if prefix.startswith(b'x'):
506 500 prefix = prefix[1:]
507 501 try:
508 502 # Uses unfiltered repo because it's faster when prefix is ambiguous/
509 503 # This matches the shortesthexnodeidprefix() function below.
510 504 node = repo.unfiltered().changelog._partialmatch(prefix)
511 505 except error.AmbiguousPrefixLookupError:
512 506 revset = repo.ui.config(
513 507 b'experimental', b'revisions.disambiguatewithin'
514 508 )
515 509 if revset:
516 510 # Clear config to avoid infinite recursion
517 511 configoverrides = {
518 512 (b'experimental', b'revisions.disambiguatewithin'): None
519 513 }
520 514 with repo.ui.configoverride(configoverrides):
521 515 revs = repo.anyrevs([revset], user=True)
522 516 matches = []
523 517 for rev in revs:
524 518 node = repo.changelog.node(rev)
525 519 if hex(node).startswith(prefix):
526 520 matches.append(node)
527 521 if len(matches) == 1:
528 522 return matches[0]
529 523 raise
530 524 if node is None:
531 525 return
532 526 repo.changelog.rev(node) # make sure node isn't filtered
533 527 return node
534 528
535 529
536 530 def mayberevnum(repo, prefix):
537 531 """Checks if the given prefix may be mistaken for a revision number"""
538 532 try:
539 533 i = int(prefix)
540 534 # if we are a pure int, then starting with zero will not be
541 535 # confused as a rev; or, obviously, if the int is larger
542 536 # than the value of the tip rev. We still need to disambiguate if
543 537 # prefix == '0', since that *is* a valid revnum.
544 538 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
545 539 return False
546 540 return True
547 541 except ValueError:
548 542 return False
549 543
550 544
551 545 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
552 546 """Find the shortest unambiguous prefix that matches hexnode.
553 547
554 548 If "cache" is not None, it must be a dictionary that can be used for
555 549 caching between calls to this method.
556 550 """
557 551 # _partialmatch() of filtered changelog could take O(len(repo)) time,
558 552 # which would be unacceptably slow. so we look for hash collision in
559 553 # unfiltered space, which means some hashes may be slightly longer.
560 554
561 555 minlength = max(minlength, 1)
562 556
563 557 def disambiguate(prefix):
564 558 """Disambiguate against revnums."""
565 559 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
566 560 if mayberevnum(repo, prefix):
567 561 return b'x' + prefix
568 562 else:
569 563 return prefix
570 564
571 565 hexnode = hex(node)
572 566 for length in range(len(prefix), len(hexnode) + 1):
573 567 prefix = hexnode[:length]
574 568 if not mayberevnum(repo, prefix):
575 569 return prefix
576 570
577 571 cl = repo.unfiltered().changelog
578 572 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
579 573 if revset:
580 574 revs = None
581 575 if cache is not None:
582 576 revs = cache.get(b'disambiguationrevset')
583 577 if revs is None:
584 578 revs = repo.anyrevs([revset], user=True)
585 579 if cache is not None:
586 580 cache[b'disambiguationrevset'] = revs
587 581 if cl.rev(node) in revs:
588 582 hexnode = hex(node)
589 583 nodetree = None
590 584 if cache is not None:
591 585 nodetree = cache.get(b'disambiguationnodetree')
592 586 if not nodetree:
593 587 if util.safehasattr(parsers, 'nodetree'):
594 588 # The CExt is the only implementation to provide a nodetree
595 589 # class so far.
596 590 index = cl.index
597 591 if util.safehasattr(index, 'get_cindex'):
598 592 # the rust wrapped need to give access to its internal index
599 593 index = index.get_cindex()
600 594 nodetree = parsers.nodetree(index, len(revs))
601 595 for r in revs:
602 596 nodetree.insert(r)
603 597 if cache is not None:
604 598 cache[b'disambiguationnodetree'] = nodetree
605 599 if nodetree is not None:
606 600 length = max(nodetree.shortest(node), minlength)
607 601 prefix = hexnode[:length]
608 602 return disambiguate(prefix)
609 603 for length in range(minlength, len(hexnode) + 1):
610 604 matches = []
611 605 prefix = hexnode[:length]
612 606 for rev in revs:
613 607 otherhexnode = repo[rev].hex()
614 608 if prefix == otherhexnode[:length]:
615 609 matches.append(otherhexnode)
616 610 if len(matches) == 1:
617 611 return disambiguate(prefix)
618 612
619 613 try:
620 614 return disambiguate(cl.shortest(node, minlength))
621 615 except error.LookupError:
622 616 raise error.RepoLookupError()
623 617
624 618
625 619 def isrevsymbol(repo, symbol):
626 620 """Checks if a symbol exists in the repo.
627 621
628 622 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
629 623 symbol is an ambiguous nodeid prefix.
630 624 """
631 625 try:
632 626 revsymbol(repo, symbol)
633 627 return True
634 628 except error.RepoLookupError:
635 629 return False
636 630
637 631
638 632 def revsymbol(repo, symbol):
639 633 """Returns a context given a single revision symbol (as string).
640 634
641 635 This is similar to revsingle(), but accepts only a single revision symbol,
642 636 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
643 637 not "max(public())".
644 638 """
645 639 if not isinstance(symbol, bytes):
646 640 msg = (
647 641 b"symbol (%s of type %s) was not a string, did you mean "
648 642 b"repo[symbol]?" % (symbol, type(symbol))
649 643 )
650 644 raise error.ProgrammingError(msg)
651 645 try:
652 646 if symbol in (b'.', b'tip', b'null'):
653 647 return repo[symbol]
654 648
655 649 try:
656 650 r = int(symbol)
657 651 if b'%d' % r != symbol:
658 652 raise ValueError
659 653 l = len(repo.changelog)
660 654 if r < 0:
661 655 r += l
662 656 if r < 0 or r >= l and r != wdirrev:
663 657 raise ValueError
664 658 return repo[r]
665 659 except error.FilteredIndexError:
666 660 raise
667 661 except (ValueError, OverflowError, IndexError):
668 662 pass
669 663
670 664 if len(symbol) == 40:
671 665 try:
672 666 node = bin(symbol)
673 667 rev = repo.changelog.rev(node)
674 668 return repo[rev]
675 669 except error.FilteredLookupError:
676 670 raise
677 671 except (TypeError, LookupError):
678 672 pass
679 673
680 674 # look up bookmarks through the name interface
681 675 try:
682 676 node = repo.names.singlenode(repo, symbol)
683 677 rev = repo.changelog.rev(node)
684 678 return repo[rev]
685 679 except KeyError:
686 680 pass
687 681
688 682 node = resolvehexnodeidprefix(repo, symbol)
689 683 if node is not None:
690 684 rev = repo.changelog.rev(node)
691 685 return repo[rev]
692 686
693 687 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
694 688
695 689 except error.WdirUnsupported:
696 690 return repo[None]
697 691 except (
698 692 error.FilteredIndexError,
699 693 error.FilteredLookupError,
700 694 error.FilteredRepoLookupError,
701 695 ):
702 696 raise _filterederror(repo, symbol)
703 697
704 698
705 699 def _filterederror(repo, changeid):
706 700 """build an exception to be raised about a filtered changeid
707 701
708 702 This is extracted in a function to help extensions (eg: evolve) to
709 703 experiment with various message variants."""
710 704 if repo.filtername.startswith(b'visible'):
711 705
712 706 # Check if the changeset is obsolete
713 707 unfilteredrepo = repo.unfiltered()
714 708 ctx = revsymbol(unfilteredrepo, changeid)
715 709
716 710 # If the changeset is obsolete, enrich the message with the reason
717 711 # that made this changeset not visible
718 712 if ctx.obsolete():
719 713 msg = obsutil._getfilteredreason(repo, changeid, ctx)
720 714 else:
721 715 msg = _(b"hidden revision '%s'") % changeid
722 716
723 717 hint = _(b'use --hidden to access hidden revisions')
724 718
725 719 return error.FilteredRepoLookupError(msg, hint=hint)
726 720 msg = _(b"filtered revision '%s' (not in '%s' subset)")
727 721 msg %= (changeid, repo.filtername)
728 722 return error.FilteredRepoLookupError(msg)
729 723
730 724
731 725 def revsingle(repo, revspec, default=b'.', localalias=None):
732 726 if not revspec and revspec != 0:
733 727 return repo[default]
734 728
735 729 l = revrange(repo, [revspec], localalias=localalias)
736 730 if not l:
737 731 raise error.Abort(_(b'empty revision set'))
738 732 return repo[l.last()]
739 733
740 734
741 735 def _pairspec(revspec):
742 736 tree = revsetlang.parse(revspec)
743 737 return tree and tree[0] in (
744 738 b'range',
745 739 b'rangepre',
746 740 b'rangepost',
747 741 b'rangeall',
748 742 )
749 743
750 744
751 745 def revpair(repo, revs):
752 746 if not revs:
753 747 return repo[b'.'], repo[None]
754 748
755 749 l = revrange(repo, revs)
756 750
757 751 if not l:
758 752 raise error.Abort(_(b'empty revision range'))
759 753
760 754 first = l.first()
761 755 second = l.last()
762 756
763 757 if (
764 758 first == second
765 759 and len(revs) >= 2
766 760 and not all(revrange(repo, [r]) for r in revs)
767 761 ):
768 762 raise error.Abort(_(b'empty revision on one side of range'))
769 763
770 764 # if top-level is range expression, the result must always be a pair
771 765 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
772 766 return repo[first], repo[None]
773 767
774 768 return repo[first], repo[second]
775 769
776 770
777 771 def revrange(repo, specs, localalias=None):
778 772 """Execute 1 to many revsets and return the union.
779 773
780 774 This is the preferred mechanism for executing revsets using user-specified
781 775 config options, such as revset aliases.
782 776
783 777 The revsets specified by ``specs`` will be executed via a chained ``OR``
784 778 expression. If ``specs`` is empty, an empty result is returned.
785 779
786 780 ``specs`` can contain integers, in which case they are assumed to be
787 781 revision numbers.
788 782
789 783 It is assumed the revsets are already formatted. If you have arguments
790 784 that need to be expanded in the revset, call ``revsetlang.formatspec()``
791 785 and pass the result as an element of ``specs``.
792 786
793 787 Specifying a single revset is allowed.
794 788
795 789 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
796 790 integer revisions.
797 791 """
798 792 allspecs = []
799 793 for spec in specs:
800 794 if isinstance(spec, int):
801 795 spec = revsetlang.formatspec(b'%d', spec)
802 796 allspecs.append(spec)
803 797 return repo.anyrevs(allspecs, user=True, localalias=localalias)
804 798
805 799
806 800 def increasingwindows(windowsize=8, sizelimit=512):
807 801 while True:
808 802 yield windowsize
809 803 if windowsize < sizelimit:
810 804 windowsize *= 2
811 805
812 806
813 807 def walkchangerevs(repo, revs, makefilematcher, prepare):
814 808 '''Iterate over files and the revs in a "windowed" way.
815 809
816 810 Callers most commonly need to iterate backwards over the history
817 811 in which they are interested. Doing so has awful (quadratic-looking)
818 812 performance, so we use iterators in a "windowed" way.
819 813
820 814 We walk a window of revisions in the desired order. Within the
821 815 window, we first walk forwards to gather data, then in the desired
822 816 order (usually backwards) to display it.
823 817
824 818 This function returns an iterator yielding contexts. Before
825 819 yielding each context, the iterator will first call the prepare
826 820 function on each context in the window in forward order.'''
827 821
828 822 if not revs:
829 823 return []
830 824 change = repo.__getitem__
831 825
832 826 def iterate():
833 827 it = iter(revs)
834 828 stopiteration = False
835 829 for windowsize in increasingwindows():
836 830 nrevs = []
837 831 for i in pycompat.xrange(windowsize):
838 832 rev = next(it, None)
839 833 if rev is None:
840 834 stopiteration = True
841 835 break
842 836 nrevs.append(rev)
843 837 for rev in sorted(nrevs):
844 838 ctx = change(rev)
845 839 prepare(ctx, makefilematcher(ctx))
846 840 for rev in nrevs:
847 841 yield change(rev)
848 842
849 843 if stopiteration:
850 844 break
851 845
852 846 return iterate()
853 847
854 848
855 849 def meaningfulparents(repo, ctx):
856 850 """Return list of meaningful (or all if debug) parentrevs for rev.
857 851
858 852 For merges (two non-nullrev revisions) both parents are meaningful.
859 853 Otherwise the first parent revision is considered meaningful if it
860 854 is not the preceding revision.
861 855 """
862 856 parents = ctx.parents()
863 857 if len(parents) > 1:
864 858 return parents
865 859 if repo.ui.debugflag:
866 860 return [parents[0], repo[nullrev]]
867 861 if parents[0].rev() >= intrev(ctx) - 1:
868 862 return []
869 863 return parents
870 864
871 865
872 866 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
873 867 """Return a function that produced paths for presenting to the user.
874 868
875 869 The returned function takes a repo-relative path and produces a path
876 870 that can be presented in the UI.
877 871
878 872 Depending on the value of ui.relative-paths, either a repo-relative or
879 873 cwd-relative path will be produced.
880 874
881 875 legacyrelativevalue is the value to use if ui.relative-paths=legacy
882 876
883 877 If forcerelativevalue is not None, then that value will be used regardless
884 878 of what ui.relative-paths is set to.
885 879 """
886 880 if forcerelativevalue is not None:
887 881 relative = forcerelativevalue
888 882 else:
889 883 config = repo.ui.config(b'ui', b'relative-paths')
890 884 if config == b'legacy':
891 885 relative = legacyrelativevalue
892 886 else:
893 887 relative = stringutil.parsebool(config)
894 888 if relative is None:
895 889 raise error.ConfigError(
896 890 _(b"ui.relative-paths is not a boolean ('%s')") % config
897 891 )
898 892
899 893 if relative:
900 894 cwd = repo.getcwd()
901 895 if cwd != b'':
902 896 # this branch would work even if cwd == b'' (ie cwd = repo
903 897 # root), but its generality makes the returned function slower
904 898 pathto = repo.pathto
905 899 return lambda f: pathto(f, cwd)
906 900 if repo.ui.configbool(b'ui', b'slash'):
907 901 return lambda f: f
908 902 else:
909 903 return util.localpath
910 904
911 905
912 906 def subdiruipathfn(subpath, uipathfn):
913 907 '''Create a new uipathfn that treats the file as relative to subpath.'''
914 908 return lambda f: uipathfn(posixpath.join(subpath, f))
915 909
916 910
917 911 def anypats(pats, opts):
918 912 '''Checks if any patterns, including --include and --exclude were given.
919 913
920 914 Some commands (e.g. addremove) use this condition for deciding whether to
921 915 print absolute or relative paths.
922 916 '''
923 917 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
924 918
925 919
926 920 def expandpats(pats):
927 921 '''Expand bare globs when running on windows.
928 922 On posix we assume it already has already been done by sh.'''
929 923 if not util.expandglobs:
930 924 return list(pats)
931 925 ret = []
932 926 for kindpat in pats:
933 927 kind, pat = matchmod._patsplit(kindpat, None)
934 928 if kind is None:
935 929 try:
936 930 globbed = glob.glob(pat)
937 931 except re.error:
938 932 globbed = [pat]
939 933 if globbed:
940 934 ret.extend(globbed)
941 935 continue
942 936 ret.append(kindpat)
943 937 return ret
944 938
945 939
946 940 def matchandpats(
947 941 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
948 942 ):
949 943 '''Return a matcher and the patterns that were used.
950 944 The matcher will warn about bad matches, unless an alternate badfn callback
951 945 is provided.'''
952 946 if opts is None:
953 947 opts = {}
954 948 if not globbed and default == b'relpath':
955 949 pats = expandpats(pats or [])
956 950
957 951 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
958 952
959 953 def bad(f, msg):
960 954 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
961 955
962 956 if badfn is None:
963 957 badfn = bad
964 958
965 959 m = ctx.match(
966 960 pats,
967 961 opts.get(b'include'),
968 962 opts.get(b'exclude'),
969 963 default,
970 964 listsubrepos=opts.get(b'subrepos'),
971 965 badfn=badfn,
972 966 )
973 967
974 968 if m.always():
975 969 pats = []
976 970 return m, pats
977 971
978 972
979 973 def match(
980 974 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
981 975 ):
982 976 '''Return a matcher that will warn about bad matches.'''
983 977 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
984 978
985 979
986 980 def matchall(repo):
987 981 '''Return a matcher that will efficiently match everything.'''
988 982 return matchmod.always()
989 983
990 984
991 985 def matchfiles(repo, files, badfn=None):
992 986 '''Return a matcher that will efficiently match exactly these files.'''
993 987 return matchmod.exact(files, badfn=badfn)
994 988
995 989
996 990 def parsefollowlinespattern(repo, rev, pat, msg):
997 991 """Return a file name from `pat` pattern suitable for usage in followlines
998 992 logic.
999 993 """
1000 994 if not matchmod.patkind(pat):
1001 995 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
1002 996 else:
1003 997 ctx = repo[rev]
1004 998 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
1005 999 files = [f for f in ctx if m(f)]
1006 1000 if len(files) != 1:
1007 1001 raise error.ParseError(msg)
1008 1002 return files[0]
1009 1003
1010 1004
1011 1005 def getorigvfs(ui, repo):
1012 1006 """return a vfs suitable to save 'orig' file
1013 1007
1014 1008 return None if no special directory is configured"""
1015 1009 origbackuppath = ui.config(b'ui', b'origbackuppath')
1016 1010 if not origbackuppath:
1017 1011 return None
1018 1012 return vfs.vfs(repo.wvfs.join(origbackuppath))
1019 1013
1020 1014
1021 1015 def backuppath(ui, repo, filepath):
1022 1016 '''customize where working copy backup files (.orig files) are created
1023 1017
1024 1018 Fetch user defined path from config file: [ui] origbackuppath = <path>
1025 1019 Fall back to default (filepath with .orig suffix) if not specified
1026 1020
1027 1021 filepath is repo-relative
1028 1022
1029 1023 Returns an absolute path
1030 1024 '''
1031 1025 origvfs = getorigvfs(ui, repo)
1032 1026 if origvfs is None:
1033 1027 return repo.wjoin(filepath + b".orig")
1034 1028
1035 1029 origbackupdir = origvfs.dirname(filepath)
1036 1030 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1037 1031 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1038 1032
1039 1033 # Remove any files that conflict with the backup file's path
1040 1034 for f in reversed(list(pathutil.finddirs(filepath))):
1041 1035 if origvfs.isfileorlink(f):
1042 1036 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1043 1037 origvfs.unlink(f)
1044 1038 break
1045 1039
1046 1040 origvfs.makedirs(origbackupdir)
1047 1041
1048 1042 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1049 1043 ui.note(
1050 1044 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1051 1045 )
1052 1046 origvfs.rmtree(filepath, forcibly=True)
1053 1047
1054 1048 return origvfs.join(filepath)
1055 1049
1056 1050
1057 1051 class _containsnode(object):
1058 1052 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1059 1053
1060 1054 def __init__(self, repo, revcontainer):
1061 1055 self._torev = repo.changelog.rev
1062 1056 self._revcontains = revcontainer.__contains__
1063 1057
1064 1058 def __contains__(self, node):
1065 1059 return self._revcontains(self._torev(node))
1066 1060
1067 1061
1068 1062 def cleanupnodes(
1069 1063 repo,
1070 1064 replacements,
1071 1065 operation,
1072 1066 moves=None,
1073 1067 metadata=None,
1074 1068 fixphase=False,
1075 1069 targetphase=None,
1076 1070 backup=True,
1077 1071 ):
1078 1072 """do common cleanups when old nodes are replaced by new nodes
1079 1073
1080 1074 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1081 1075 (we might also want to move working directory parent in the future)
1082 1076
1083 1077 By default, bookmark moves are calculated automatically from 'replacements',
1084 1078 but 'moves' can be used to override that. Also, 'moves' may include
1085 1079 additional bookmark moves that should not have associated obsmarkers.
1086 1080
1087 1081 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1088 1082 have replacements. operation is a string, like "rebase".
1089 1083
1090 1084 metadata is dictionary containing metadata to be stored in obsmarker if
1091 1085 obsolescence is enabled.
1092 1086 """
1093 1087 assert fixphase or targetphase is None
1094 1088 if not replacements and not moves:
1095 1089 return
1096 1090
1097 1091 # translate mapping's other forms
1098 1092 if not util.safehasattr(replacements, b'items'):
1099 1093 replacements = {(n,): () for n in replacements}
1100 1094 else:
1101 1095 # upgrading non tuple "source" to tuple ones for BC
1102 1096 repls = {}
1103 1097 for key, value in replacements.items():
1104 1098 if not isinstance(key, tuple):
1105 1099 key = (key,)
1106 1100 repls[key] = value
1107 1101 replacements = repls
1108 1102
1109 1103 # Unfiltered repo is needed since nodes in replacements might be hidden.
1110 1104 unfi = repo.unfiltered()
1111 1105
1112 1106 # Calculate bookmark movements
1113 1107 if moves is None:
1114 1108 moves = {}
1115 1109 for oldnodes, newnodes in replacements.items():
1116 1110 for oldnode in oldnodes:
1117 1111 if oldnode in moves:
1118 1112 continue
1119 1113 if len(newnodes) > 1:
1120 1114 # usually a split, take the one with biggest rev number
1121 1115 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1122 1116 elif len(newnodes) == 0:
1123 1117 # move bookmark backwards
1124 1118 allreplaced = []
1125 1119 for rep in replacements:
1126 1120 allreplaced.extend(rep)
1127 1121 roots = list(
1128 1122 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1129 1123 )
1130 1124 if roots:
1131 1125 newnode = roots[0].node()
1132 1126 else:
1133 1127 newnode = nullid
1134 1128 else:
1135 1129 newnode = newnodes[0]
1136 1130 moves[oldnode] = newnode
1137 1131
1138 1132 allnewnodes = [n for ns in replacements.values() for n in ns]
1139 1133 toretract = {}
1140 1134 toadvance = {}
1141 1135 if fixphase:
1142 1136 precursors = {}
1143 1137 for oldnodes, newnodes in replacements.items():
1144 1138 for oldnode in oldnodes:
1145 1139 for newnode in newnodes:
1146 1140 precursors.setdefault(newnode, []).append(oldnode)
1147 1141
1148 1142 allnewnodes.sort(key=lambda n: unfi[n].rev())
1149 1143 newphases = {}
1150 1144
1151 1145 def phase(ctx):
1152 1146 return newphases.get(ctx.node(), ctx.phase())
1153 1147
1154 1148 for newnode in allnewnodes:
1155 1149 ctx = unfi[newnode]
1156 1150 parentphase = max(phase(p) for p in ctx.parents())
1157 1151 if targetphase is None:
1158 1152 oldphase = max(
1159 1153 unfi[oldnode].phase() for oldnode in precursors[newnode]
1160 1154 )
1161 1155 newphase = max(oldphase, parentphase)
1162 1156 else:
1163 1157 newphase = max(targetphase, parentphase)
1164 1158 newphases[newnode] = newphase
1165 1159 if newphase > ctx.phase():
1166 1160 toretract.setdefault(newphase, []).append(newnode)
1167 1161 elif newphase < ctx.phase():
1168 1162 toadvance.setdefault(newphase, []).append(newnode)
1169 1163
1170 1164 with repo.transaction(b'cleanup') as tr:
1171 1165 # Move bookmarks
1172 1166 bmarks = repo._bookmarks
1173 1167 bmarkchanges = []
1174 1168 for oldnode, newnode in moves.items():
1175 1169 oldbmarks = repo.nodebookmarks(oldnode)
1176 1170 if not oldbmarks:
1177 1171 continue
1178 1172 from . import bookmarks # avoid import cycle
1179 1173
1180 1174 repo.ui.debug(
1181 1175 b'moving bookmarks %r from %s to %s\n'
1182 1176 % (
1183 1177 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1184 1178 hex(oldnode),
1185 1179 hex(newnode),
1186 1180 )
1187 1181 )
1188 1182 # Delete divergent bookmarks being parents of related newnodes
1189 1183 deleterevs = repo.revs(
1190 1184 b'parents(roots(%ln & (::%n))) - parents(%n)',
1191 1185 allnewnodes,
1192 1186 newnode,
1193 1187 oldnode,
1194 1188 )
1195 1189 deletenodes = _containsnode(repo, deleterevs)
1196 1190 for name in oldbmarks:
1197 1191 bmarkchanges.append((name, newnode))
1198 1192 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1199 1193 bmarkchanges.append((b, None))
1200 1194
1201 1195 if bmarkchanges:
1202 1196 bmarks.applychanges(repo, tr, bmarkchanges)
1203 1197
1204 1198 for phase, nodes in toretract.items():
1205 1199 phases.retractboundary(repo, tr, phase, nodes)
1206 1200 for phase, nodes in toadvance.items():
1207 1201 phases.advanceboundary(repo, tr, phase, nodes)
1208 1202
1209 1203 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1210 1204 # Obsolete or strip nodes
1211 1205 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1212 1206 # If a node is already obsoleted, and we want to obsolete it
1213 1207 # without a successor, skip that obssolete request since it's
1214 1208 # unnecessary. That's the "if s or not isobs(n)" check below.
1215 1209 # Also sort the node in topology order, that might be useful for
1216 1210 # some obsstore logic.
1217 1211 # NOTE: the sorting might belong to createmarkers.
1218 1212 torev = unfi.changelog.rev
1219 1213 sortfunc = lambda ns: torev(ns[0][0])
1220 1214 rels = []
1221 1215 for ns, s in sorted(replacements.items(), key=sortfunc):
1222 1216 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1223 1217 rels.append(rel)
1224 1218 if rels:
1225 1219 obsolete.createmarkers(
1226 1220 repo, rels, operation=operation, metadata=metadata
1227 1221 )
1228 1222 elif phases.supportinternal(repo) and mayusearchived:
1229 1223 # this assume we do not have "unstable" nodes above the cleaned ones
1230 1224 allreplaced = set()
1231 1225 for ns in replacements.keys():
1232 1226 allreplaced.update(ns)
1233 1227 if backup:
1234 1228 from . import repair # avoid import cycle
1235 1229
1236 1230 node = min(allreplaced, key=repo.changelog.rev)
1237 1231 repair.backupbundle(
1238 1232 repo, allreplaced, allreplaced, node, operation
1239 1233 )
1240 1234 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1241 1235 else:
1242 1236 from . import repair # avoid import cycle
1243 1237
1244 1238 tostrip = list(n for ns in replacements for n in ns)
1245 1239 if tostrip:
1246 1240 repair.delayedstrip(
1247 1241 repo.ui, repo, tostrip, operation, backup=backup
1248 1242 )
1249 1243
1250 1244
1251 1245 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1252 1246 if opts is None:
1253 1247 opts = {}
1254 1248 m = matcher
1255 1249 dry_run = opts.get(b'dry_run')
1256 1250 try:
1257 1251 similarity = float(opts.get(b'similarity') or 0)
1258 1252 except ValueError:
1259 1253 raise error.Abort(_(b'similarity must be a number'))
1260 1254 if similarity < 0 or similarity > 100:
1261 1255 raise error.Abort(_(b'similarity must be between 0 and 100'))
1262 1256 similarity /= 100.0
1263 1257
1264 1258 ret = 0
1265 1259
1266 1260 wctx = repo[None]
1267 1261 for subpath in sorted(wctx.substate):
1268 1262 submatch = matchmod.subdirmatcher(subpath, m)
1269 1263 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1270 1264 sub = wctx.sub(subpath)
1271 1265 subprefix = repo.wvfs.reljoin(prefix, subpath)
1272 1266 subuipathfn = subdiruipathfn(subpath, uipathfn)
1273 1267 try:
1274 1268 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1275 1269 ret = 1
1276 1270 except error.LookupError:
1277 1271 repo.ui.status(
1278 1272 _(b"skipping missing subrepository: %s\n")
1279 1273 % uipathfn(subpath)
1280 1274 )
1281 1275
1282 1276 rejected = []
1283 1277
1284 1278 def badfn(f, msg):
1285 1279 if f in m.files():
1286 1280 m.bad(f, msg)
1287 1281 rejected.append(f)
1288 1282
1289 1283 badmatch = matchmod.badmatch(m, badfn)
1290 1284 added, unknown, deleted, removed, forgotten = _interestingfiles(
1291 1285 repo, badmatch
1292 1286 )
1293 1287
1294 1288 unknownset = set(unknown + forgotten)
1295 1289 toprint = unknownset.copy()
1296 1290 toprint.update(deleted)
1297 1291 for abs in sorted(toprint):
1298 1292 if repo.ui.verbose or not m.exact(abs):
1299 1293 if abs in unknownset:
1300 1294 status = _(b'adding %s\n') % uipathfn(abs)
1301 1295 label = b'ui.addremove.added'
1302 1296 else:
1303 1297 status = _(b'removing %s\n') % uipathfn(abs)
1304 1298 label = b'ui.addremove.removed'
1305 1299 repo.ui.status(status, label=label)
1306 1300
1307 1301 renames = _findrenames(
1308 1302 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1309 1303 )
1310 1304
1311 1305 if not dry_run:
1312 1306 _markchanges(repo, unknown + forgotten, deleted, renames)
1313 1307
1314 1308 for f in rejected:
1315 1309 if f in m.files():
1316 1310 return 1
1317 1311 return ret
1318 1312
1319 1313
1320 1314 def marktouched(repo, files, similarity=0.0):
1321 1315 '''Assert that files have somehow been operated upon. files are relative to
1322 1316 the repo root.'''
1323 1317 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1324 1318 rejected = []
1325 1319
1326 1320 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1327 1321
1328 1322 if repo.ui.verbose:
1329 1323 unknownset = set(unknown + forgotten)
1330 1324 toprint = unknownset.copy()
1331 1325 toprint.update(deleted)
1332 1326 for abs in sorted(toprint):
1333 1327 if abs in unknownset:
1334 1328 status = _(b'adding %s\n') % abs
1335 1329 else:
1336 1330 status = _(b'removing %s\n') % abs
1337 1331 repo.ui.status(status)
1338 1332
1339 1333 # TODO: We should probably have the caller pass in uipathfn and apply it to
1340 1334 # the messages above too. legacyrelativevalue=True is consistent with how
1341 1335 # it used to work.
1342 1336 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1343 1337 renames = _findrenames(
1344 1338 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1345 1339 )
1346 1340
1347 1341 _markchanges(repo, unknown + forgotten, deleted, renames)
1348 1342
1349 1343 for f in rejected:
1350 1344 if f in m.files():
1351 1345 return 1
1352 1346 return 0
1353 1347
1354 1348
1355 1349 def _interestingfiles(repo, matcher):
1356 1350 '''Walk dirstate with matcher, looking for files that addremove would care
1357 1351 about.
1358 1352
1359 1353 This is different from dirstate.status because it doesn't care about
1360 1354 whether files are modified or clean.'''
1361 1355 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1362 1356 audit_path = pathutil.pathauditor(repo.root, cached=True)
1363 1357
1364 1358 ctx = repo[None]
1365 1359 dirstate = repo.dirstate
1366 1360 matcher = repo.narrowmatch(matcher, includeexact=True)
1367 1361 walkresults = dirstate.walk(
1368 1362 matcher,
1369 1363 subrepos=sorted(ctx.substate),
1370 1364 unknown=True,
1371 1365 ignored=False,
1372 1366 full=False,
1373 1367 )
1374 1368 for abs, st in pycompat.iteritems(walkresults):
1375 1369 dstate = dirstate[abs]
1376 1370 if dstate == b'?' and audit_path.check(abs):
1377 1371 unknown.append(abs)
1378 1372 elif dstate != b'r' and not st:
1379 1373 deleted.append(abs)
1380 1374 elif dstate == b'r' and st:
1381 1375 forgotten.append(abs)
1382 1376 # for finding renames
1383 1377 elif dstate == b'r' and not st:
1384 1378 removed.append(abs)
1385 1379 elif dstate == b'a':
1386 1380 added.append(abs)
1387 1381
1388 1382 return added, unknown, deleted, removed, forgotten
1389 1383
1390 1384
1391 1385 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1392 1386 '''Find renames from removed files to added ones.'''
1393 1387 renames = {}
1394 1388 if similarity > 0:
1395 1389 for old, new, score in similar.findrenames(
1396 1390 repo, added, removed, similarity
1397 1391 ):
1398 1392 if (
1399 1393 repo.ui.verbose
1400 1394 or not matcher.exact(old)
1401 1395 or not matcher.exact(new)
1402 1396 ):
1403 1397 repo.ui.status(
1404 1398 _(
1405 1399 b'recording removal of %s as rename to %s '
1406 1400 b'(%d%% similar)\n'
1407 1401 )
1408 1402 % (uipathfn(old), uipathfn(new), score * 100)
1409 1403 )
1410 1404 renames[new] = old
1411 1405 return renames
1412 1406
1413 1407
1414 1408 def _markchanges(repo, unknown, deleted, renames):
1415 1409 '''Marks the files in unknown as added, the files in deleted as removed,
1416 1410 and the files in renames as copied.'''
1417 1411 wctx = repo[None]
1418 1412 with repo.wlock():
1419 1413 wctx.forget(deleted)
1420 1414 wctx.add(unknown)
1421 1415 for new, old in pycompat.iteritems(renames):
1422 1416 wctx.copy(old, new)
1423 1417
1424 1418
1425 1419 def getrenamedfn(repo, endrev=None):
1426 1420 if copiesmod.usechangesetcentricalgo(repo):
1427 1421
1428 1422 def getrenamed(fn, rev):
1429 1423 ctx = repo[rev]
1430 1424 p1copies = ctx.p1copies()
1431 1425 if fn in p1copies:
1432 1426 return p1copies[fn]
1433 1427 p2copies = ctx.p2copies()
1434 1428 if fn in p2copies:
1435 1429 return p2copies[fn]
1436 1430 return None
1437 1431
1438 1432 return getrenamed
1439 1433
1440 1434 rcache = {}
1441 1435 if endrev is None:
1442 1436 endrev = len(repo)
1443 1437
1444 1438 def getrenamed(fn, rev):
1445 1439 '''looks up all renames for a file (up to endrev) the first
1446 1440 time the file is given. It indexes on the changerev and only
1447 1441 parses the manifest if linkrev != changerev.
1448 1442 Returns rename info for fn at changerev rev.'''
1449 1443 if fn not in rcache:
1450 1444 rcache[fn] = {}
1451 1445 fl = repo.file(fn)
1452 1446 for i in fl:
1453 1447 lr = fl.linkrev(i)
1454 1448 renamed = fl.renamed(fl.node(i))
1455 1449 rcache[fn][lr] = renamed and renamed[0]
1456 1450 if lr >= endrev:
1457 1451 break
1458 1452 if rev in rcache[fn]:
1459 1453 return rcache[fn][rev]
1460 1454
1461 1455 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1462 1456 # filectx logic.
1463 1457 try:
1464 1458 return repo[rev][fn].copysource()
1465 1459 except error.LookupError:
1466 1460 return None
1467 1461
1468 1462 return getrenamed
1469 1463
1470 1464
1471 1465 def getcopiesfn(repo, endrev=None):
1472 1466 if copiesmod.usechangesetcentricalgo(repo):
1473 1467
1474 1468 def copiesfn(ctx):
1475 1469 if ctx.p2copies():
1476 1470 allcopies = ctx.p1copies().copy()
1477 1471 # There should be no overlap
1478 1472 allcopies.update(ctx.p2copies())
1479 1473 return sorted(allcopies.items())
1480 1474 else:
1481 1475 return sorted(ctx.p1copies().items())
1482 1476
1483 1477 else:
1484 1478 getrenamed = getrenamedfn(repo, endrev)
1485 1479
1486 1480 def copiesfn(ctx):
1487 1481 copies = []
1488 1482 for fn in ctx.files():
1489 1483 rename = getrenamed(fn, ctx.rev())
1490 1484 if rename:
1491 1485 copies.append((fn, rename))
1492 1486 return copies
1493 1487
1494 1488 return copiesfn
1495 1489
1496 1490
1497 1491 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1498 1492 """Update the dirstate to reflect the intent of copying src to dst. For
1499 1493 different reasons it might not end with dst being marked as copied from src.
1500 1494 """
1501 1495 origsrc = repo.dirstate.copied(src) or src
1502 1496 if dst == origsrc: # copying back a copy?
1503 1497 if repo.dirstate[dst] not in b'mn' and not dryrun:
1504 1498 repo.dirstate.normallookup(dst)
1505 1499 else:
1506 1500 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1507 1501 if not ui.quiet:
1508 1502 ui.warn(
1509 1503 _(
1510 1504 b"%s has not been committed yet, so no copy "
1511 1505 b"data will be stored for %s.\n"
1512 1506 )
1513 1507 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1514 1508 )
1515 1509 if repo.dirstate[dst] in b'?r' and not dryrun:
1516 1510 wctx.add([dst])
1517 1511 elif not dryrun:
1518 1512 wctx.copy(origsrc, dst)
1519 1513
1520 1514
1521 1515 def movedirstate(repo, newctx, match=None):
1522 1516 """Move the dirstate to newctx and adjust it as necessary.
1523 1517
1524 1518 A matcher can be provided as an optimization. It is probably a bug to pass
1525 1519 a matcher that doesn't match all the differences between the parent of the
1526 1520 working copy and newctx.
1527 1521 """
1528 1522 oldctx = repo[b'.']
1529 1523 ds = repo.dirstate
1530 1524 copies = dict(ds.copies())
1531 1525 ds.setparents(newctx.node(), nullid)
1532 1526 s = newctx.status(oldctx, match=match)
1533 1527 for f in s.modified:
1534 1528 if ds[f] == b'r':
1535 1529 # modified + removed -> removed
1536 1530 continue
1537 1531 ds.normallookup(f)
1538 1532
1539 1533 for f in s.added:
1540 1534 if ds[f] == b'r':
1541 1535 # added + removed -> unknown
1542 1536 ds.drop(f)
1543 1537 elif ds[f] != b'a':
1544 1538 ds.add(f)
1545 1539
1546 1540 for f in s.removed:
1547 1541 if ds[f] == b'a':
1548 1542 # removed + added -> normal
1549 1543 ds.normallookup(f)
1550 1544 elif ds[f] != b'r':
1551 1545 ds.remove(f)
1552 1546
1553 1547 # Merge old parent and old working dir copies
1554 1548 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1555 1549 oldcopies.update(copies)
1556 1550 copies = {
1557 1551 dst: oldcopies.get(src, src)
1558 1552 for dst, src in pycompat.iteritems(oldcopies)
1559 1553 }
1560 1554 # Adjust the dirstate copies
1561 1555 for dst, src in pycompat.iteritems(copies):
1562 1556 if src not in newctx or dst in newctx or ds[dst] != b'a':
1563 1557 src = None
1564 1558 ds.copy(src, dst)
1565 1559 repo._quick_access_changeid_invalidate()
1566 1560
1567 1561
1568 1562 def filterrequirements(requirements):
1569 1563 """ filters the requirements into two sets:
1570 1564
1571 1565 wcreq: requirements which should be written in .hg/requires
1572 1566 storereq: which should be written in .hg/store/requires
1573 1567
1574 1568 Returns (wcreq, storereq)
1575 1569 """
1576 1570 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1577 1571 wc, store = set(), set()
1578 1572 for r in requirements:
1579 1573 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1580 1574 wc.add(r)
1581 1575 else:
1582 1576 store.add(r)
1583 1577 return wc, store
1584 1578 return requirements, None
1585 1579
1586 1580
1587 1581 def istreemanifest(repo):
1588 1582 """ returns whether the repository is using treemanifest or not """
1589 1583 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1590 1584
1591 1585
1592 1586 def writereporequirements(repo, requirements=None):
1593 1587 """ writes requirements for the repo to .hg/requires """
1594 1588 if requirements:
1595 1589 repo.requirements = requirements
1596 1590 wcreq, storereq = filterrequirements(repo.requirements)
1597 1591 if wcreq is not None:
1598 1592 writerequires(repo.vfs, wcreq)
1599 1593 if storereq is not None:
1600 1594 writerequires(repo.svfs, storereq)
1601 1595
1602 1596
1603 1597 def writerequires(opener, requirements):
1604 1598 with opener(b'requires', b'w', atomictemp=True) as fp:
1605 1599 for r in sorted(requirements):
1606 1600 fp.write(b"%s\n" % r)
1607 1601
1608 1602
1609 1603 class filecachesubentry(object):
1610 1604 def __init__(self, path, stat):
1611 1605 self.path = path
1612 1606 self.cachestat = None
1613 1607 self._cacheable = None
1614 1608
1615 1609 if stat:
1616 1610 self.cachestat = filecachesubentry.stat(self.path)
1617 1611
1618 1612 if self.cachestat:
1619 1613 self._cacheable = self.cachestat.cacheable()
1620 1614 else:
1621 1615 # None means we don't know yet
1622 1616 self._cacheable = None
1623 1617
1624 1618 def refresh(self):
1625 1619 if self.cacheable():
1626 1620 self.cachestat = filecachesubentry.stat(self.path)
1627 1621
1628 1622 def cacheable(self):
1629 1623 if self._cacheable is not None:
1630 1624 return self._cacheable
1631 1625
1632 1626 # we don't know yet, assume it is for now
1633 1627 return True
1634 1628
1635 1629 def changed(self):
1636 1630 # no point in going further if we can't cache it
1637 1631 if not self.cacheable():
1638 1632 return True
1639 1633
1640 1634 newstat = filecachesubentry.stat(self.path)
1641 1635
1642 1636 # we may not know if it's cacheable yet, check again now
1643 1637 if newstat and self._cacheable is None:
1644 1638 self._cacheable = newstat.cacheable()
1645 1639
1646 1640 # check again
1647 1641 if not self._cacheable:
1648 1642 return True
1649 1643
1650 1644 if self.cachestat != newstat:
1651 1645 self.cachestat = newstat
1652 1646 return True
1653 1647 else:
1654 1648 return False
1655 1649
1656 1650 @staticmethod
1657 1651 def stat(path):
1658 1652 try:
1659 1653 return util.cachestat(path)
1660 1654 except OSError as e:
1661 1655 if e.errno != errno.ENOENT:
1662 1656 raise
1663 1657
1664 1658
1665 1659 class filecacheentry(object):
1666 1660 def __init__(self, paths, stat=True):
1667 1661 self._entries = []
1668 1662 for path in paths:
1669 1663 self._entries.append(filecachesubentry(path, stat))
1670 1664
1671 1665 def changed(self):
1672 1666 '''true if any entry has changed'''
1673 1667 for entry in self._entries:
1674 1668 if entry.changed():
1675 1669 return True
1676 1670 return False
1677 1671
1678 1672 def refresh(self):
1679 1673 for entry in self._entries:
1680 1674 entry.refresh()
1681 1675
1682 1676
1683 1677 class filecache(object):
1684 1678 """A property like decorator that tracks files under .hg/ for updates.
1685 1679
1686 1680 On first access, the files defined as arguments are stat()ed and the
1687 1681 results cached. The decorated function is called. The results are stashed
1688 1682 away in a ``_filecache`` dict on the object whose method is decorated.
1689 1683
1690 1684 On subsequent access, the cached result is used as it is set to the
1691 1685 instance dictionary.
1692 1686
1693 1687 On external property set/delete operations, the caller must update the
1694 1688 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1695 1689 instead of directly setting <attr>.
1696 1690
1697 1691 When using the property API, the cached data is always used if available.
1698 1692 No stat() is performed to check if the file has changed.
1699 1693
1700 1694 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1701 1695 can populate an entry before the property's getter is called. In this case,
1702 1696 entries in ``_filecache`` will be used during property operations,
1703 1697 if available. If the underlying file changes, it is up to external callers
1704 1698 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1705 1699 method result as well as possibly calling ``del obj._filecache[attr]`` to
1706 1700 remove the ``filecacheentry``.
1707 1701 """
1708 1702
1709 1703 def __init__(self, *paths):
1710 1704 self.paths = paths
1711 1705
1712 1706 def join(self, obj, fname):
1713 1707 """Used to compute the runtime path of a cached file.
1714 1708
1715 1709 Users should subclass filecache and provide their own version of this
1716 1710 function to call the appropriate join function on 'obj' (an instance
1717 1711 of the class that its member function was decorated).
1718 1712 """
1719 1713 raise NotImplementedError
1720 1714
1721 1715 def __call__(self, func):
1722 1716 self.func = func
1723 1717 self.sname = func.__name__
1724 1718 self.name = pycompat.sysbytes(self.sname)
1725 1719 return self
1726 1720
1727 1721 def __get__(self, obj, type=None):
1728 1722 # if accessed on the class, return the descriptor itself.
1729 1723 if obj is None:
1730 1724 return self
1731 1725
1732 1726 assert self.sname not in obj.__dict__
1733 1727
1734 1728 entry = obj._filecache.get(self.name)
1735 1729
1736 1730 if entry:
1737 1731 if entry.changed():
1738 1732 entry.obj = self.func(obj)
1739 1733 else:
1740 1734 paths = [self.join(obj, path) for path in self.paths]
1741 1735
1742 1736 # We stat -before- creating the object so our cache doesn't lie if
1743 1737 # a writer modified between the time we read and stat
1744 1738 entry = filecacheentry(paths, True)
1745 1739 entry.obj = self.func(obj)
1746 1740
1747 1741 obj._filecache[self.name] = entry
1748 1742
1749 1743 obj.__dict__[self.sname] = entry.obj
1750 1744 return entry.obj
1751 1745
1752 1746 # don't implement __set__(), which would make __dict__ lookup as slow as
1753 1747 # function call.
1754 1748
1755 1749 def set(self, obj, value):
1756 1750 if self.name not in obj._filecache:
1757 1751 # we add an entry for the missing value because X in __dict__
1758 1752 # implies X in _filecache
1759 1753 paths = [self.join(obj, path) for path in self.paths]
1760 1754 ce = filecacheentry(paths, False)
1761 1755 obj._filecache[self.name] = ce
1762 1756 else:
1763 1757 ce = obj._filecache[self.name]
1764 1758
1765 1759 ce.obj = value # update cached copy
1766 1760 obj.__dict__[self.sname] = value # update copy returned by obj.x
1767 1761
1768 1762
1769 1763 def extdatasource(repo, source):
1770 1764 """Gather a map of rev -> value dict from the specified source
1771 1765
1772 1766 A source spec is treated as a URL, with a special case shell: type
1773 1767 for parsing the output from a shell command.
1774 1768
1775 1769 The data is parsed as a series of newline-separated records where
1776 1770 each record is a revision specifier optionally followed by a space
1777 1771 and a freeform string value. If the revision is known locally, it
1778 1772 is converted to a rev, otherwise the record is skipped.
1779 1773
1780 1774 Note that both key and value are treated as UTF-8 and converted to
1781 1775 the local encoding. This allows uniformity between local and
1782 1776 remote data sources.
1783 1777 """
1784 1778
1785 1779 spec = repo.ui.config(b"extdata", source)
1786 1780 if not spec:
1787 1781 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1788 1782
1789 1783 data = {}
1790 1784 src = proc = None
1791 1785 try:
1792 1786 if spec.startswith(b"shell:"):
1793 1787 # external commands should be run relative to the repo root
1794 1788 cmd = spec[6:]
1795 1789 proc = subprocess.Popen(
1796 1790 procutil.tonativestr(cmd),
1797 1791 shell=True,
1798 1792 bufsize=-1,
1799 1793 close_fds=procutil.closefds,
1800 1794 stdout=subprocess.PIPE,
1801 1795 cwd=procutil.tonativestr(repo.root),
1802 1796 )
1803 1797 src = proc.stdout
1804 1798 else:
1805 1799 # treat as a URL or file
1806 1800 src = url.open(repo.ui, spec)
1807 1801 for l in src:
1808 1802 if b" " in l:
1809 1803 k, v = l.strip().split(b" ", 1)
1810 1804 else:
1811 1805 k, v = l.strip(), b""
1812 1806
1813 1807 k = encoding.tolocal(k)
1814 1808 try:
1815 1809 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1816 1810 except (error.LookupError, error.RepoLookupError):
1817 1811 pass # we ignore data for nodes that don't exist locally
1818 1812 finally:
1819 1813 if proc:
1820 1814 try:
1821 1815 proc.communicate()
1822 1816 except ValueError:
1823 1817 # This happens if we started iterating src and then
1824 1818 # get a parse error on a line. It should be safe to ignore.
1825 1819 pass
1826 1820 if src:
1827 1821 src.close()
1828 1822 if proc and proc.returncode != 0:
1829 1823 raise error.Abort(
1830 1824 _(b"extdata command '%s' failed: %s")
1831 1825 % (cmd, procutil.explainexit(proc.returncode))
1832 1826 )
1833 1827
1834 1828 return data
1835 1829
1836 1830
1837 1831 class progress(object):
1838 1832 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1839 1833 self.ui = ui
1840 1834 self.pos = 0
1841 1835 self.topic = topic
1842 1836 self.unit = unit
1843 1837 self.total = total
1844 1838 self.debug = ui.configbool(b'progress', b'debug')
1845 1839 self._updatebar = updatebar
1846 1840
1847 1841 def __enter__(self):
1848 1842 return self
1849 1843
1850 1844 def __exit__(self, exc_type, exc_value, exc_tb):
1851 1845 self.complete()
1852 1846
1853 1847 def update(self, pos, item=b"", total=None):
1854 1848 assert pos is not None
1855 1849 if total:
1856 1850 self.total = total
1857 1851 self.pos = pos
1858 1852 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1859 1853 if self.debug:
1860 1854 self._printdebug(item)
1861 1855
1862 1856 def increment(self, step=1, item=b"", total=None):
1863 1857 self.update(self.pos + step, item, total)
1864 1858
1865 1859 def complete(self):
1866 1860 self.pos = None
1867 1861 self.unit = b""
1868 1862 self.total = None
1869 1863 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1870 1864
1871 1865 def _printdebug(self, item):
1872 1866 unit = b''
1873 1867 if self.unit:
1874 1868 unit = b' ' + self.unit
1875 1869 if item:
1876 1870 item = b' ' + item
1877 1871
1878 1872 if self.total:
1879 1873 pct = 100.0 * self.pos / self.total
1880 1874 self.ui.debug(
1881 1875 b'%s:%s %d/%d%s (%4.2f%%)\n'
1882 1876 % (self.topic, item, self.pos, self.total, unit, pct)
1883 1877 )
1884 1878 else:
1885 1879 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1886 1880
1887 1881
1888 1882 def gdinitconfig(ui):
1889 1883 """helper function to know if a repo should be created as general delta
1890 1884 """
1891 1885 # experimental config: format.generaldelta
1892 1886 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1893 1887 b'format', b'usegeneraldelta'
1894 1888 )
1895 1889
1896 1890
1897 1891 def gddeltaconfig(ui):
1898 1892 """helper function to know if incoming delta should be optimised
1899 1893 """
1900 1894 # experimental config: format.generaldelta
1901 1895 return ui.configbool(b'format', b'generaldelta')
1902 1896
1903 1897
1904 1898 class simplekeyvaluefile(object):
1905 1899 """A simple file with key=value lines
1906 1900
1907 1901 Keys must be alphanumerics and start with a letter, values must not
1908 1902 contain '\n' characters"""
1909 1903
1910 1904 firstlinekey = b'__firstline'
1911 1905
1912 1906 def __init__(self, vfs, path, keys=None):
1913 1907 self.vfs = vfs
1914 1908 self.path = path
1915 1909
1916 1910 def read(self, firstlinenonkeyval=False):
1917 1911 """Read the contents of a simple key-value file
1918 1912
1919 1913 'firstlinenonkeyval' indicates whether the first line of file should
1920 1914 be treated as a key-value pair or reuturned fully under the
1921 1915 __firstline key."""
1922 1916 lines = self.vfs.readlines(self.path)
1923 1917 d = {}
1924 1918 if firstlinenonkeyval:
1925 1919 if not lines:
1926 1920 e = _(b"empty simplekeyvalue file")
1927 1921 raise error.CorruptedState(e)
1928 1922 # we don't want to include '\n' in the __firstline
1929 1923 d[self.firstlinekey] = lines[0][:-1]
1930 1924 del lines[0]
1931 1925
1932 1926 try:
1933 1927 # the 'if line.strip()' part prevents us from failing on empty
1934 1928 # lines which only contain '\n' therefore are not skipped
1935 1929 # by 'if line'
1936 1930 updatedict = dict(
1937 1931 line[:-1].split(b'=', 1) for line in lines if line.strip()
1938 1932 )
1939 1933 if self.firstlinekey in updatedict:
1940 1934 e = _(b"%r can't be used as a key")
1941 1935 raise error.CorruptedState(e % self.firstlinekey)
1942 1936 d.update(updatedict)
1943 1937 except ValueError as e:
1944 1938 raise error.CorruptedState(stringutil.forcebytestr(e))
1945 1939 return d
1946 1940
1947 1941 def write(self, data, firstline=None):
1948 1942 """Write key=>value mapping to a file
1949 1943 data is a dict. Keys must be alphanumerical and start with a letter.
1950 1944 Values must not contain newline characters.
1951 1945
1952 1946 If 'firstline' is not None, it is written to file before
1953 1947 everything else, as it is, not in a key=value form"""
1954 1948 lines = []
1955 1949 if firstline is not None:
1956 1950 lines.append(b'%s\n' % firstline)
1957 1951
1958 1952 for k, v in data.items():
1959 1953 if k == self.firstlinekey:
1960 1954 e = b"key name '%s' is reserved" % self.firstlinekey
1961 1955 raise error.ProgrammingError(e)
1962 1956 if not k[0:1].isalpha():
1963 1957 e = b"keys must start with a letter in a key-value file"
1964 1958 raise error.ProgrammingError(e)
1965 1959 if not k.isalnum():
1966 1960 e = b"invalid key name in a simple key-value file"
1967 1961 raise error.ProgrammingError(e)
1968 1962 if b'\n' in v:
1969 1963 e = b"invalid value in a simple key-value file"
1970 1964 raise error.ProgrammingError(e)
1971 1965 lines.append(b"%s=%s\n" % (k, v))
1972 1966 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1973 1967 fp.write(b''.join(lines))
1974 1968
1975 1969
1976 1970 _reportobsoletedsource = [
1977 1971 b'debugobsolete',
1978 1972 b'pull',
1979 1973 b'push',
1980 1974 b'serve',
1981 1975 b'unbundle',
1982 1976 ]
1983 1977
1984 1978 _reportnewcssource = [
1985 1979 b'pull',
1986 1980 b'unbundle',
1987 1981 ]
1988 1982
1989 1983
1990 1984 def prefetchfiles(repo, revmatches):
1991 1985 """Invokes the registered file prefetch functions, allowing extensions to
1992 1986 ensure the corresponding files are available locally, before the command
1993 1987 uses them.
1994 1988
1995 1989 Args:
1996 1990 revmatches: a list of (revision, match) tuples to indicate the files to
1997 1991 fetch at each revision. If any of the match elements is None, it matches
1998 1992 all files.
1999 1993 """
2000 1994
2001 1995 def _matcher(m):
2002 1996 if m:
2003 1997 assert isinstance(m, matchmod.basematcher)
2004 1998 # The command itself will complain about files that don't exist, so
2005 1999 # don't duplicate the message.
2006 2000 return matchmod.badmatch(m, lambda fn, msg: None)
2007 2001 else:
2008 2002 return matchall(repo)
2009 2003
2010 2004 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
2011 2005
2012 2006 fileprefetchhooks(repo, revbadmatches)
2013 2007
2014 2008
2015 2009 # a list of (repo, revs, match) prefetch functions
2016 2010 fileprefetchhooks = util.hooks()
2017 2011
2018 2012 # A marker that tells the evolve extension to suppress its own reporting
2019 2013 _reportstroubledchangesets = True
2020 2014
2021 2015
2022 2016 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
2023 2017 """register a callback to issue a summary after the transaction is closed
2024 2018
2025 2019 If as_validator is true, then the callbacks are registered as transaction
2026 2020 validators instead
2027 2021 """
2028 2022
2029 2023 def txmatch(sources):
2030 2024 return any(txnname.startswith(source) for source in sources)
2031 2025
2032 2026 categories = []
2033 2027
2034 2028 def reportsummary(func):
2035 2029 """decorator for report callbacks."""
2036 2030 # The repoview life cycle is shorter than the one of the actual
2037 2031 # underlying repository. So the filtered object can die before the
2038 2032 # weakref is used leading to troubles. We keep a reference to the
2039 2033 # unfiltered object and restore the filtering when retrieving the
2040 2034 # repository through the weakref.
2041 2035 filtername = repo.filtername
2042 2036 reporef = weakref.ref(repo.unfiltered())
2043 2037
2044 2038 def wrapped(tr):
2045 2039 repo = reporef()
2046 2040 if filtername:
2047 2041 assert repo is not None # help pytype
2048 2042 repo = repo.filtered(filtername)
2049 2043 func(repo, tr)
2050 2044
2051 2045 newcat = b'%02i-txnreport' % len(categories)
2052 2046 if as_validator:
2053 2047 otr.addvalidator(newcat, wrapped)
2054 2048 else:
2055 2049 otr.addpostclose(newcat, wrapped)
2056 2050 categories.append(newcat)
2057 2051 return wrapped
2058 2052
2059 2053 @reportsummary
2060 2054 def reportchangegroup(repo, tr):
2061 2055 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2062 2056 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2063 2057 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2064 2058 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2065 2059 if cgchangesets or cgrevisions or cgfiles:
2066 2060 htext = b""
2067 2061 if cgheads:
2068 2062 htext = _(b" (%+d heads)") % cgheads
2069 2063 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2070 2064 if as_validator:
2071 2065 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2072 2066 assert repo is not None # help pytype
2073 2067 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2074 2068
2075 2069 if txmatch(_reportobsoletedsource):
2076 2070
2077 2071 @reportsummary
2078 2072 def reportobsoleted(repo, tr):
2079 2073 obsoleted = obsutil.getobsoleted(repo, tr)
2080 2074 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2081 2075 if newmarkers:
2082 2076 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2083 2077 if obsoleted:
2084 2078 msg = _(b'obsoleted %i changesets\n')
2085 2079 if as_validator:
2086 2080 msg = _(b'obsoleting %i changesets\n')
2087 2081 repo.ui.status(msg % len(obsoleted))
2088 2082
2089 2083 if obsolete.isenabled(
2090 2084 repo, obsolete.createmarkersopt
2091 2085 ) and repo.ui.configbool(
2092 2086 b'experimental', b'evolution.report-instabilities'
2093 2087 ):
2094 2088 instabilitytypes = [
2095 2089 (b'orphan', b'orphan'),
2096 2090 (b'phase-divergent', b'phasedivergent'),
2097 2091 (b'content-divergent', b'contentdivergent'),
2098 2092 ]
2099 2093
2100 2094 def getinstabilitycounts(repo):
2101 2095 filtered = repo.changelog.filteredrevs
2102 2096 counts = {}
2103 2097 for instability, revset in instabilitytypes:
2104 2098 counts[instability] = len(
2105 2099 set(obsolete.getrevs(repo, revset)) - filtered
2106 2100 )
2107 2101 return counts
2108 2102
2109 2103 oldinstabilitycounts = getinstabilitycounts(repo)
2110 2104
2111 2105 @reportsummary
2112 2106 def reportnewinstabilities(repo, tr):
2113 2107 newinstabilitycounts = getinstabilitycounts(repo)
2114 2108 for instability, revset in instabilitytypes:
2115 2109 delta = (
2116 2110 newinstabilitycounts[instability]
2117 2111 - oldinstabilitycounts[instability]
2118 2112 )
2119 2113 msg = getinstabilitymessage(delta, instability)
2120 2114 if msg:
2121 2115 repo.ui.warn(msg)
2122 2116
2123 2117 if txmatch(_reportnewcssource):
2124 2118
2125 2119 @reportsummary
2126 2120 def reportnewcs(repo, tr):
2127 2121 """Report the range of new revisions pulled/unbundled."""
2128 2122 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2129 2123 unfi = repo.unfiltered()
2130 2124 if origrepolen >= len(unfi):
2131 2125 return
2132 2126
2133 2127 # Compute the bounds of new visible revisions' range.
2134 2128 revs = smartset.spanset(repo, start=origrepolen)
2135 2129 if revs:
2136 2130 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2137 2131
2138 2132 if minrev == maxrev:
2139 2133 revrange = minrev
2140 2134 else:
2141 2135 revrange = b'%s:%s' % (minrev, maxrev)
2142 2136 draft = len(repo.revs(b'%ld and draft()', revs))
2143 2137 secret = len(repo.revs(b'%ld and secret()', revs))
2144 2138 if not (draft or secret):
2145 2139 msg = _(b'new changesets %s\n') % revrange
2146 2140 elif draft and secret:
2147 2141 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2148 2142 msg %= (revrange, draft, secret)
2149 2143 elif draft:
2150 2144 msg = _(b'new changesets %s (%d drafts)\n')
2151 2145 msg %= (revrange, draft)
2152 2146 elif secret:
2153 2147 msg = _(b'new changesets %s (%d secrets)\n')
2154 2148 msg %= (revrange, secret)
2155 2149 else:
2156 2150 errormsg = b'entered unreachable condition'
2157 2151 raise error.ProgrammingError(errormsg)
2158 2152 repo.ui.status(msg)
2159 2153
2160 2154 # search new changesets directly pulled as obsolete
2161 2155 duplicates = tr.changes.get(b'revduplicates', ())
2162 2156 obsadded = unfi.revs(
2163 2157 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2164 2158 )
2165 2159 cl = repo.changelog
2166 2160 extinctadded = [r for r in obsadded if r not in cl]
2167 2161 if extinctadded:
2168 2162 # They are not just obsolete, but obsolete and invisible
2169 2163 # we call them "extinct" internally but the terms have not been
2170 2164 # exposed to users.
2171 2165 msg = b'(%d other changesets obsolete on arrival)\n'
2172 2166 repo.ui.status(msg % len(extinctadded))
2173 2167
2174 2168 @reportsummary
2175 2169 def reportphasechanges(repo, tr):
2176 2170 """Report statistics of phase changes for changesets pre-existing
2177 2171 pull/unbundle.
2178 2172 """
2179 2173 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2180 2174 published = []
2181 2175 for revs, (old, new) in tr.changes.get(b'phases', []):
2182 2176 if new != phases.public:
2183 2177 continue
2184 2178 published.extend(rev for rev in revs if rev < origrepolen)
2185 2179 if not published:
2186 2180 return
2187 2181 msg = _(b'%d local changesets published\n')
2188 2182 if as_validator:
2189 2183 msg = _(b'%d local changesets will be published\n')
2190 2184 repo.ui.status(msg % len(published))
2191 2185
2192 2186
2193 2187 def getinstabilitymessage(delta, instability):
2194 2188 """function to return the message to show warning about new instabilities
2195 2189
2196 2190 exists as a separate function so that extension can wrap to show more
2197 2191 information like how to fix instabilities"""
2198 2192 if delta > 0:
2199 2193 return _(b'%i new %s changesets\n') % (delta, instability)
2200 2194
2201 2195
2202 2196 def nodesummaries(repo, nodes, maxnumnodes=4):
2203 2197 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2204 2198 return b' '.join(short(h) for h in nodes)
2205 2199 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2206 2200 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2207 2201
2208 2202
2209 2203 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2210 2204 """check that no named branch has multiple heads"""
2211 2205 if desc in (b'strip', b'repair'):
2212 2206 # skip the logic during strip
2213 2207 return
2214 2208 visible = repo.filtered(b'visible')
2215 2209 # possible improvement: we could restrict the check to affected branch
2216 2210 bm = visible.branchmap()
2217 2211 for name in bm:
2218 2212 heads = bm.branchheads(name, closed=accountclosed)
2219 2213 if len(heads) > 1:
2220 2214 msg = _(b'rejecting multiple heads on branch "%s"')
2221 2215 msg %= name
2222 2216 hint = _(b'%d heads: %s')
2223 2217 hint %= (len(heads), nodesummaries(repo, heads))
2224 2218 raise error.Abort(msg, hint=hint)
2225 2219
2226 2220
2227 2221 def wrapconvertsink(sink):
2228 2222 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2229 2223 before it is used, whether or not the convert extension was formally loaded.
2230 2224 """
2231 2225 return sink
2232 2226
2233 2227
2234 2228 def unhidehashlikerevs(repo, specs, hiddentype):
2235 2229 """parse the user specs and unhide changesets whose hash or revision number
2236 2230 is passed.
2237 2231
2238 2232 hiddentype can be: 1) 'warn': warn while unhiding changesets
2239 2233 2) 'nowarn': don't warn while unhiding changesets
2240 2234
2241 2235 returns a repo object with the required changesets unhidden
2242 2236 """
2243 2237 if not repo.filtername or not repo.ui.configbool(
2244 2238 b'experimental', b'directaccess'
2245 2239 ):
2246 2240 return repo
2247 2241
2248 2242 if repo.filtername not in (b'visible', b'visible-hidden'):
2249 2243 return repo
2250 2244
2251 2245 symbols = set()
2252 2246 for spec in specs:
2253 2247 try:
2254 2248 tree = revsetlang.parse(spec)
2255 2249 except error.ParseError: # will be reported by scmutil.revrange()
2256 2250 continue
2257 2251
2258 2252 symbols.update(revsetlang.gethashlikesymbols(tree))
2259 2253
2260 2254 if not symbols:
2261 2255 return repo
2262 2256
2263 2257 revs = _getrevsfromsymbols(repo, symbols)
2264 2258
2265 2259 if not revs:
2266 2260 return repo
2267 2261
2268 2262 if hiddentype == b'warn':
2269 2263 unfi = repo.unfiltered()
2270 2264 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2271 2265 repo.ui.warn(
2272 2266 _(
2273 2267 b"warning: accessing hidden changesets for write "
2274 2268 b"operation: %s\n"
2275 2269 )
2276 2270 % revstr
2277 2271 )
2278 2272
2279 2273 # we have to use new filtername to separate branch/tags cache until we can
2280 2274 # disbale these cache when revisions are dynamically pinned.
2281 2275 return repo.filtered(b'visible-hidden', revs)
2282 2276
2283 2277
2284 2278 def _getrevsfromsymbols(repo, symbols):
2285 2279 """parse the list of symbols and returns a set of revision numbers of hidden
2286 2280 changesets present in symbols"""
2287 2281 revs = set()
2288 2282 unfi = repo.unfiltered()
2289 2283 unficl = unfi.changelog
2290 2284 cl = repo.changelog
2291 2285 tiprev = len(unficl)
2292 2286 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2293 2287 for s in symbols:
2294 2288 try:
2295 2289 n = int(s)
2296 2290 if n <= tiprev:
2297 2291 if not allowrevnums:
2298 2292 continue
2299 2293 else:
2300 2294 if n not in cl:
2301 2295 revs.add(n)
2302 2296 continue
2303 2297 except ValueError:
2304 2298 pass
2305 2299
2306 2300 try:
2307 2301 s = resolvehexnodeidprefix(unfi, s)
2308 2302 except (error.LookupError, error.WdirUnsupported):
2309 2303 s = None
2310 2304
2311 2305 if s is not None:
2312 2306 rev = unficl.rev(s)
2313 2307 if rev not in cl:
2314 2308 revs.add(rev)
2315 2309
2316 2310 return revs
2317 2311
2318 2312
2319 2313 def bookmarkrevs(repo, mark):
2320 2314 """
2321 2315 Select revisions reachable by a given bookmark
2322 2316 """
2323 2317 return repo.revs(
2324 2318 b"ancestors(bookmark(%s)) - "
2325 2319 b"ancestors(head() and not bookmark(%s)) - "
2326 2320 b"ancestors(bookmark() and not bookmark(%s))",
2327 2321 mark,
2328 2322 mark,
2329 2323 mark,
2330 2324 )
General Comments 0
You need to be logged in to leave comments. Login now