##// END OF EJS Templates
worker: raise exception instead of calling sys.exit() with child's code...
Martin von Zweigbergk -
r46429:8f07f5a9 default
parent child Browse files
Show More
@@ -1,491 +1,498 b''
1 # error.py - Mercurial exceptions
1 # error.py - Mercurial exceptions
2 #
2 #
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Mercurial exceptions.
8 """Mercurial exceptions.
9
9
10 This allows us to catch exceptions at higher levels without forcing
10 This allows us to catch exceptions at higher levels without forcing
11 imports.
11 imports.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 # Do not import anything but pycompat here, please
16 # Do not import anything but pycompat here, please
17 from . import pycompat
17 from . import pycompat
18
18
19
19
20 def _tobytes(exc):
20 def _tobytes(exc):
21 """Byte-stringify exception in the same way as BaseException_str()"""
21 """Byte-stringify exception in the same way as BaseException_str()"""
22 if not exc.args:
22 if not exc.args:
23 return b''
23 return b''
24 if len(exc.args) == 1:
24 if len(exc.args) == 1:
25 return pycompat.bytestr(exc.args[0])
25 return pycompat.bytestr(exc.args[0])
26 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
26 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
27
27
28
28
29 class Hint(object):
29 class Hint(object):
30 """Mix-in to provide a hint of an error
30 """Mix-in to provide a hint of an error
31
31
32 This should come first in the inheritance list to consume a hint and
32 This should come first in the inheritance list to consume a hint and
33 pass remaining arguments to the exception class.
33 pass remaining arguments to the exception class.
34 """
34 """
35
35
36 def __init__(self, *args, **kw):
36 def __init__(self, *args, **kw):
37 self.hint = kw.pop('hint', None)
37 self.hint = kw.pop('hint', None)
38 super(Hint, self).__init__(*args, **kw)
38 super(Hint, self).__init__(*args, **kw)
39
39
40
40
41 class StorageError(Hint, Exception):
41 class StorageError(Hint, Exception):
42 """Raised when an error occurs in a storage layer.
42 """Raised when an error occurs in a storage layer.
43
43
44 Usually subclassed by a storage-specific exception.
44 Usually subclassed by a storage-specific exception.
45 """
45 """
46
46
47 __bytes__ = _tobytes
47 __bytes__ = _tobytes
48
48
49
49
50 class RevlogError(StorageError):
50 class RevlogError(StorageError):
51 pass
51 pass
52
52
53
53
54 class SidedataHashError(RevlogError):
54 class SidedataHashError(RevlogError):
55 def __init__(self, key, expected, got):
55 def __init__(self, key, expected, got):
56 self.sidedatakey = key
56 self.sidedatakey = key
57 self.expecteddigest = expected
57 self.expecteddigest = expected
58 self.actualdigest = got
58 self.actualdigest = got
59
59
60
60
61 class FilteredIndexError(IndexError):
61 class FilteredIndexError(IndexError):
62 __bytes__ = _tobytes
62 __bytes__ = _tobytes
63
63
64
64
65 class LookupError(RevlogError, KeyError):
65 class LookupError(RevlogError, KeyError):
66 def __init__(self, name, index, message):
66 def __init__(self, name, index, message):
67 self.name = name
67 self.name = name
68 self.index = index
68 self.index = index
69 # this can't be called 'message' because at least some installs of
69 # this can't be called 'message' because at least some installs of
70 # Python 2.6+ complain about the 'message' property being deprecated
70 # Python 2.6+ complain about the 'message' property being deprecated
71 self.lookupmessage = message
71 self.lookupmessage = message
72 if isinstance(name, bytes) and len(name) == 20:
72 if isinstance(name, bytes) and len(name) == 20:
73 from .node import short
73 from .node import short
74
74
75 name = short(name)
75 name = short(name)
76 # if name is a binary node, it can be None
76 # if name is a binary node, it can be None
77 RevlogError.__init__(
77 RevlogError.__init__(
78 self, b'%s@%s: %s' % (index, pycompat.bytestr(name), message)
78 self, b'%s@%s: %s' % (index, pycompat.bytestr(name), message)
79 )
79 )
80
80
81 def __bytes__(self):
81 def __bytes__(self):
82 return RevlogError.__bytes__(self)
82 return RevlogError.__bytes__(self)
83
83
84 def __str__(self):
84 def __str__(self):
85 return RevlogError.__str__(self)
85 return RevlogError.__str__(self)
86
86
87
87
88 class AmbiguousPrefixLookupError(LookupError):
88 class AmbiguousPrefixLookupError(LookupError):
89 pass
89 pass
90
90
91
91
92 class FilteredLookupError(LookupError):
92 class FilteredLookupError(LookupError):
93 pass
93 pass
94
94
95
95
96 class ManifestLookupError(LookupError):
96 class ManifestLookupError(LookupError):
97 pass
97 pass
98
98
99
99
100 class CommandError(Exception):
100 class CommandError(Exception):
101 """Exception raised on errors in parsing the command line."""
101 """Exception raised on errors in parsing the command line."""
102
102
103 def __init__(self, command, message):
103 def __init__(self, command, message):
104 self.command = command
104 self.command = command
105 self.message = message
105 self.message = message
106 super(CommandError, self).__init__()
106 super(CommandError, self).__init__()
107
107
108 __bytes__ = _tobytes
108 __bytes__ = _tobytes
109
109
110
110
111 class UnknownCommand(Exception):
111 class UnknownCommand(Exception):
112 """Exception raised if command is not in the command table."""
112 """Exception raised if command is not in the command table."""
113
113
114 def __init__(self, command, all_commands=None):
114 def __init__(self, command, all_commands=None):
115 self.command = command
115 self.command = command
116 self.all_commands = all_commands
116 self.all_commands = all_commands
117 super(UnknownCommand, self).__init__()
117 super(UnknownCommand, self).__init__()
118
118
119 __bytes__ = _tobytes
119 __bytes__ = _tobytes
120
120
121
121
122 class AmbiguousCommand(Exception):
122 class AmbiguousCommand(Exception):
123 """Exception raised if command shortcut matches more than one command."""
123 """Exception raised if command shortcut matches more than one command."""
124
124
125 def __init__(self, prefix, matches):
125 def __init__(self, prefix, matches):
126 self.prefix = prefix
126 self.prefix = prefix
127 self.matches = matches
127 self.matches = matches
128 super(AmbiguousCommand, self).__init__()
128 super(AmbiguousCommand, self).__init__()
129
129
130 __bytes__ = _tobytes
130 __bytes__ = _tobytes
131
131
132
132
133 class WorkerError(Exception):
134 """Exception raised when a worker process dies."""
135
136 def __init__(self, status_code):
137 self.status_code = status_code
138
139
133 class InterventionRequired(Hint, Exception):
140 class InterventionRequired(Hint, Exception):
134 """Exception raised when a command requires human intervention."""
141 """Exception raised when a command requires human intervention."""
135
142
136 __bytes__ = _tobytes
143 __bytes__ = _tobytes
137
144
138
145
139 class ConflictResolutionRequired(InterventionRequired):
146 class ConflictResolutionRequired(InterventionRequired):
140 """Exception raised when a continuable command required merge conflict resolution."""
147 """Exception raised when a continuable command required merge conflict resolution."""
141
148
142 def __init__(self, opname):
149 def __init__(self, opname):
143 from .i18n import _
150 from .i18n import _
144
151
145 self.opname = opname
152 self.opname = opname
146 InterventionRequired.__init__(
153 InterventionRequired.__init__(
147 self,
154 self,
148 _(
155 _(
149 b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
156 b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
150 )
157 )
151 % opname,
158 % opname,
152 )
159 )
153
160
154
161
155 class Abort(Hint, Exception):
162 class Abort(Hint, Exception):
156 """Raised if a command needs to print an error and exit."""
163 """Raised if a command needs to print an error and exit."""
157
164
158 def __init__(self, message, hint=None):
165 def __init__(self, message, hint=None):
159 self.message = message
166 self.message = message
160 self.hint = hint
167 self.hint = hint
161 # Pass the message into the Exception constructor to help extensions
168 # Pass the message into the Exception constructor to help extensions
162 # that look for exc.args[0].
169 # that look for exc.args[0].
163 Exception.__init__(self, message)
170 Exception.__init__(self, message)
164
171
165 def __bytes__(self):
172 def __bytes__(self):
166 return self.message
173 return self.message
167
174
168 if pycompat.ispy3:
175 if pycompat.ispy3:
169
176
170 def __str__(self):
177 def __str__(self):
171 # the output would be unreadable if the message was translated,
178 # the output would be unreadable if the message was translated,
172 # but do not replace it with encoding.strfromlocal(), which
179 # but do not replace it with encoding.strfromlocal(), which
173 # may raise another exception.
180 # may raise another exception.
174 return pycompat.sysstr(self.__bytes__())
181 return pycompat.sysstr(self.__bytes__())
175
182
176
183
177 class HookLoadError(Abort):
184 class HookLoadError(Abort):
178 """raised when loading a hook fails, aborting an operation
185 """raised when loading a hook fails, aborting an operation
179
186
180 Exists to allow more specialized catching."""
187 Exists to allow more specialized catching."""
181
188
182
189
183 class HookAbort(Abort):
190 class HookAbort(Abort):
184 """raised when a validation hook fails, aborting an operation
191 """raised when a validation hook fails, aborting an operation
185
192
186 Exists to allow more specialized catching."""
193 Exists to allow more specialized catching."""
187
194
188
195
189 class ConfigError(Abort):
196 class ConfigError(Abort):
190 """Exception raised when parsing config files"""
197 """Exception raised when parsing config files"""
191
198
192
199
193 class UpdateAbort(Abort):
200 class UpdateAbort(Abort):
194 """Raised when an update is aborted for destination issue"""
201 """Raised when an update is aborted for destination issue"""
195
202
196
203
197 class MergeDestAbort(Abort):
204 class MergeDestAbort(Abort):
198 """Raised when an update is aborted for destination issues"""
205 """Raised when an update is aborted for destination issues"""
199
206
200
207
201 class NoMergeDestAbort(MergeDestAbort):
208 class NoMergeDestAbort(MergeDestAbort):
202 """Raised when an update is aborted because there is nothing to merge"""
209 """Raised when an update is aborted because there is nothing to merge"""
203
210
204
211
205 class ManyMergeDestAbort(MergeDestAbort):
212 class ManyMergeDestAbort(MergeDestAbort):
206 """Raised when an update is aborted because destination is ambiguous"""
213 """Raised when an update is aborted because destination is ambiguous"""
207
214
208
215
209 class ResponseExpected(Abort):
216 class ResponseExpected(Abort):
210 """Raised when an EOF is received for a prompt"""
217 """Raised when an EOF is received for a prompt"""
211
218
212 def __init__(self):
219 def __init__(self):
213 from .i18n import _
220 from .i18n import _
214
221
215 Abort.__init__(self, _(b'response expected'))
222 Abort.__init__(self, _(b'response expected'))
216
223
217
224
218 class OutOfBandError(Hint, Exception):
225 class OutOfBandError(Hint, Exception):
219 """Exception raised when a remote repo reports failure"""
226 """Exception raised when a remote repo reports failure"""
220
227
221 __bytes__ = _tobytes
228 __bytes__ = _tobytes
222
229
223
230
224 class ParseError(Hint, Exception):
231 class ParseError(Hint, Exception):
225 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
232 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
226
233
227 def __init__(self, message, location=None, hint=None):
234 def __init__(self, message, location=None, hint=None):
228 self.message = message
235 self.message = message
229 self.location = location
236 self.location = location
230 self.hint = hint
237 self.hint = hint
231 # Pass the message and possibly location into the Exception constructor
238 # Pass the message and possibly location into the Exception constructor
232 # to help code that looks for exc.args.
239 # to help code that looks for exc.args.
233 if location is not None:
240 if location is not None:
234 Exception.__init__(self, message, location)
241 Exception.__init__(self, message, location)
235 else:
242 else:
236 Exception.__init__(self, message)
243 Exception.__init__(self, message)
237
244
238 __bytes__ = _tobytes
245 __bytes__ = _tobytes
239
246
240
247
241 class PatchError(Exception):
248 class PatchError(Exception):
242 __bytes__ = _tobytes
249 __bytes__ = _tobytes
243
250
244
251
245 class UnknownIdentifier(ParseError):
252 class UnknownIdentifier(ParseError):
246 """Exception raised when a {rev,file}set references an unknown identifier"""
253 """Exception raised when a {rev,file}set references an unknown identifier"""
247
254
248 def __init__(self, function, symbols):
255 def __init__(self, function, symbols):
249 from .i18n import _
256 from .i18n import _
250
257
251 ParseError.__init__(self, _(b"unknown identifier: %s") % function)
258 ParseError.__init__(self, _(b"unknown identifier: %s") % function)
252 self.function = function
259 self.function = function
253 self.symbols = symbols
260 self.symbols = symbols
254
261
255
262
256 class RepoError(Hint, Exception):
263 class RepoError(Hint, Exception):
257 __bytes__ = _tobytes
264 __bytes__ = _tobytes
258
265
259
266
260 class RepoLookupError(RepoError):
267 class RepoLookupError(RepoError):
261 pass
268 pass
262
269
263
270
264 class FilteredRepoLookupError(RepoLookupError):
271 class FilteredRepoLookupError(RepoLookupError):
265 pass
272 pass
266
273
267
274
268 class CapabilityError(RepoError):
275 class CapabilityError(RepoError):
269 pass
276 pass
270
277
271
278
272 class RequirementError(RepoError):
279 class RequirementError(RepoError):
273 """Exception raised if .hg/requires has an unknown entry."""
280 """Exception raised if .hg/requires has an unknown entry."""
274
281
275
282
276 class StdioError(IOError):
283 class StdioError(IOError):
277 """Raised if I/O to stdout or stderr fails"""
284 """Raised if I/O to stdout or stderr fails"""
278
285
279 def __init__(self, err):
286 def __init__(self, err):
280 IOError.__init__(self, err.errno, err.strerror)
287 IOError.__init__(self, err.errno, err.strerror)
281
288
282 # no __bytes__() because error message is derived from the standard IOError
289 # no __bytes__() because error message is derived from the standard IOError
283
290
284
291
285 class UnsupportedMergeRecords(Abort):
292 class UnsupportedMergeRecords(Abort):
286 def __init__(self, recordtypes):
293 def __init__(self, recordtypes):
287 from .i18n import _
294 from .i18n import _
288
295
289 self.recordtypes = sorted(recordtypes)
296 self.recordtypes = sorted(recordtypes)
290 s = b' '.join(self.recordtypes)
297 s = b' '.join(self.recordtypes)
291 Abort.__init__(
298 Abort.__init__(
292 self,
299 self,
293 _(b'unsupported merge state records: %s') % s,
300 _(b'unsupported merge state records: %s') % s,
294 hint=_(
301 hint=_(
295 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
302 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
296 b'more information'
303 b'more information'
297 ),
304 ),
298 )
305 )
299
306
300
307
301 class UnknownVersion(Abort):
308 class UnknownVersion(Abort):
302 """generic exception for aborting from an encounter with an unknown version
309 """generic exception for aborting from an encounter with an unknown version
303 """
310 """
304
311
305 def __init__(self, msg, hint=None, version=None):
312 def __init__(self, msg, hint=None, version=None):
306 self.version = version
313 self.version = version
307 super(UnknownVersion, self).__init__(msg, hint=hint)
314 super(UnknownVersion, self).__init__(msg, hint=hint)
308
315
309
316
310 class LockError(IOError):
317 class LockError(IOError):
311 def __init__(self, errno, strerror, filename, desc):
318 def __init__(self, errno, strerror, filename, desc):
312 IOError.__init__(self, errno, strerror, filename)
319 IOError.__init__(self, errno, strerror, filename)
313 self.desc = desc
320 self.desc = desc
314
321
315 # no __bytes__() because error message is derived from the standard IOError
322 # no __bytes__() because error message is derived from the standard IOError
316
323
317
324
318 class LockHeld(LockError):
325 class LockHeld(LockError):
319 def __init__(self, errno, filename, desc, locker):
326 def __init__(self, errno, filename, desc, locker):
320 LockError.__init__(self, errno, b'Lock held', filename, desc)
327 LockError.__init__(self, errno, b'Lock held', filename, desc)
321 self.locker = locker
328 self.locker = locker
322
329
323
330
324 class LockUnavailable(LockError):
331 class LockUnavailable(LockError):
325 pass
332 pass
326
333
327
334
328 # LockError is for errors while acquiring the lock -- this is unrelated
335 # LockError is for errors while acquiring the lock -- this is unrelated
329 class LockInheritanceContractViolation(RuntimeError):
336 class LockInheritanceContractViolation(RuntimeError):
330 __bytes__ = _tobytes
337 __bytes__ = _tobytes
331
338
332
339
333 class ResponseError(Exception):
340 class ResponseError(Exception):
334 """Raised to print an error with part of output and exit."""
341 """Raised to print an error with part of output and exit."""
335
342
336 __bytes__ = _tobytes
343 __bytes__ = _tobytes
337
344
338
345
339 # derived from KeyboardInterrupt to simplify some breakout code
346 # derived from KeyboardInterrupt to simplify some breakout code
340 class SignalInterrupt(KeyboardInterrupt):
347 class SignalInterrupt(KeyboardInterrupt):
341 """Exception raised on SIGTERM and SIGHUP."""
348 """Exception raised on SIGTERM and SIGHUP."""
342
349
343
350
344 class SignatureError(Exception):
351 class SignatureError(Exception):
345 __bytes__ = _tobytes
352 __bytes__ = _tobytes
346
353
347
354
348 class PushRaced(RuntimeError):
355 class PushRaced(RuntimeError):
349 """An exception raised during unbundling that indicate a push race"""
356 """An exception raised during unbundling that indicate a push race"""
350
357
351 __bytes__ = _tobytes
358 __bytes__ = _tobytes
352
359
353
360
354 class ProgrammingError(Hint, RuntimeError):
361 class ProgrammingError(Hint, RuntimeError):
355 """Raised if a mercurial (core or extension) developer made a mistake"""
362 """Raised if a mercurial (core or extension) developer made a mistake"""
356
363
357 def __init__(self, msg, *args, **kwargs):
364 def __init__(self, msg, *args, **kwargs):
358 # On Python 3, turn the message back into a string since this is
365 # On Python 3, turn the message back into a string since this is
359 # an internal-only error that won't be printed except in a
366 # an internal-only error that won't be printed except in a
360 # stack traces.
367 # stack traces.
361 msg = pycompat.sysstr(msg)
368 msg = pycompat.sysstr(msg)
362 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
369 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
363
370
364 __bytes__ = _tobytes
371 __bytes__ = _tobytes
365
372
366
373
367 class WdirUnsupported(Exception):
374 class WdirUnsupported(Exception):
368 """An exception which is raised when 'wdir()' is not supported"""
375 """An exception which is raised when 'wdir()' is not supported"""
369
376
370 __bytes__ = _tobytes
377 __bytes__ = _tobytes
371
378
372
379
373 # bundle2 related errors
380 # bundle2 related errors
374 class BundleValueError(ValueError):
381 class BundleValueError(ValueError):
375 """error raised when bundle2 cannot be processed"""
382 """error raised when bundle2 cannot be processed"""
376
383
377 __bytes__ = _tobytes
384 __bytes__ = _tobytes
378
385
379
386
380 class BundleUnknownFeatureError(BundleValueError):
387 class BundleUnknownFeatureError(BundleValueError):
381 def __init__(self, parttype=None, params=(), values=()):
388 def __init__(self, parttype=None, params=(), values=()):
382 self.parttype = parttype
389 self.parttype = parttype
383 self.params = params
390 self.params = params
384 self.values = values
391 self.values = values
385 if self.parttype is None:
392 if self.parttype is None:
386 msg = b'Stream Parameter'
393 msg = b'Stream Parameter'
387 else:
394 else:
388 msg = parttype
395 msg = parttype
389 entries = self.params
396 entries = self.params
390 if self.params and self.values:
397 if self.params and self.values:
391 assert len(self.params) == len(self.values)
398 assert len(self.params) == len(self.values)
392 entries = []
399 entries = []
393 for idx, par in enumerate(self.params):
400 for idx, par in enumerate(self.params):
394 val = self.values[idx]
401 val = self.values[idx]
395 if val is None:
402 if val is None:
396 entries.append(val)
403 entries.append(val)
397 else:
404 else:
398 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
405 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
399 if entries:
406 if entries:
400 msg = b'%s - %s' % (msg, b', '.join(entries))
407 msg = b'%s - %s' % (msg, b', '.join(entries))
401 ValueError.__init__(self, msg)
408 ValueError.__init__(self, msg)
402
409
403
410
404 class ReadOnlyPartError(RuntimeError):
411 class ReadOnlyPartError(RuntimeError):
405 """error raised when code tries to alter a part being generated"""
412 """error raised when code tries to alter a part being generated"""
406
413
407 __bytes__ = _tobytes
414 __bytes__ = _tobytes
408
415
409
416
410 class PushkeyFailed(Abort):
417 class PushkeyFailed(Abort):
411 """error raised when a pushkey part failed to update a value"""
418 """error raised when a pushkey part failed to update a value"""
412
419
413 def __init__(
420 def __init__(
414 self, partid, namespace=None, key=None, new=None, old=None, ret=None
421 self, partid, namespace=None, key=None, new=None, old=None, ret=None
415 ):
422 ):
416 self.partid = partid
423 self.partid = partid
417 self.namespace = namespace
424 self.namespace = namespace
418 self.key = key
425 self.key = key
419 self.new = new
426 self.new = new
420 self.old = old
427 self.old = old
421 self.ret = ret
428 self.ret = ret
422 # no i18n expected to be processed into a better message
429 # no i18n expected to be processed into a better message
423 Abort.__init__(
430 Abort.__init__(
424 self, b'failed to update value for "%s/%s"' % (namespace, key)
431 self, b'failed to update value for "%s/%s"' % (namespace, key)
425 )
432 )
426
433
427
434
428 class CensoredNodeError(StorageError):
435 class CensoredNodeError(StorageError):
429 """error raised when content verification fails on a censored node
436 """error raised when content verification fails on a censored node
430
437
431 Also contains the tombstone data substituted for the uncensored data.
438 Also contains the tombstone data substituted for the uncensored data.
432 """
439 """
433
440
434 def __init__(self, filename, node, tombstone):
441 def __init__(self, filename, node, tombstone):
435 from .node import short
442 from .node import short
436
443
437 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
444 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
438 self.tombstone = tombstone
445 self.tombstone = tombstone
439
446
440
447
441 class CensoredBaseError(StorageError):
448 class CensoredBaseError(StorageError):
442 """error raised when a delta is rejected because its base is censored
449 """error raised when a delta is rejected because its base is censored
443
450
444 A delta based on a censored revision must be formed as single patch
451 A delta based on a censored revision must be formed as single patch
445 operation which replaces the entire base with new content. This ensures
452 operation which replaces the entire base with new content. This ensures
446 the delta may be applied by clones which have not censored the base.
453 the delta may be applied by clones which have not censored the base.
447 """
454 """
448
455
449
456
450 class InvalidBundleSpecification(Exception):
457 class InvalidBundleSpecification(Exception):
451 """error raised when a bundle specification is invalid.
458 """error raised when a bundle specification is invalid.
452
459
453 This is used for syntax errors as opposed to support errors.
460 This is used for syntax errors as opposed to support errors.
454 """
461 """
455
462
456 __bytes__ = _tobytes
463 __bytes__ = _tobytes
457
464
458
465
459 class UnsupportedBundleSpecification(Exception):
466 class UnsupportedBundleSpecification(Exception):
460 """error raised when a bundle specification is not supported."""
467 """error raised when a bundle specification is not supported."""
461
468
462 __bytes__ = _tobytes
469 __bytes__ = _tobytes
463
470
464
471
465 class CorruptedState(Exception):
472 class CorruptedState(Exception):
466 """error raised when a command is not able to read its state from file"""
473 """error raised when a command is not able to read its state from file"""
467
474
468 __bytes__ = _tobytes
475 __bytes__ = _tobytes
469
476
470
477
471 class PeerTransportError(Abort):
478 class PeerTransportError(Abort):
472 """Transport-level I/O error when communicating with a peer repo."""
479 """Transport-level I/O error when communicating with a peer repo."""
473
480
474
481
475 class InMemoryMergeConflictsError(Exception):
482 class InMemoryMergeConflictsError(Exception):
476 """Exception raised when merge conflicts arose during an in-memory merge."""
483 """Exception raised when merge conflicts arose during an in-memory merge."""
477
484
478 __bytes__ = _tobytes
485 __bytes__ = _tobytes
479
486
480
487
481 class WireprotoCommandError(Exception):
488 class WireprotoCommandError(Exception):
482 """Represents an error during execution of a wire protocol command.
489 """Represents an error during execution of a wire protocol command.
483
490
484 Should only be thrown by wire protocol version 2 commands.
491 Should only be thrown by wire protocol version 2 commands.
485
492
486 The error is a formatter string and an optional iterable of arguments.
493 The error is a formatter string and an optional iterable of arguments.
487 """
494 """
488
495
489 def __init__(self, message, args=None):
496 def __init__(self, message, args=None):
490 self.message = message
497 self.message = message
491 self.messageargs = args
498 self.messageargs = args
@@ -1,2287 +1,2290 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from .pycompat import getattr
28 from .pycompat import getattr
29 from .thirdparty import attr
29 from .thirdparty import attr
30 from . import (
30 from . import (
31 copies as copiesmod,
31 copies as copiesmod,
32 encoding,
32 encoding,
33 error,
33 error,
34 match as matchmod,
34 match as matchmod,
35 obsolete,
35 obsolete,
36 obsutil,
36 obsutil,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 policy,
39 policy,
40 pycompat,
40 pycompat,
41 requirements as requirementsmod,
41 requirements as requirementsmod,
42 revsetlang,
42 revsetlang,
43 similar,
43 similar,
44 smartset,
44 smartset,
45 url,
45 url,
46 util,
46 util,
47 vfs,
47 vfs,
48 )
48 )
49
49
50 from .utils import (
50 from .utils import (
51 hashutil,
51 hashutil,
52 procutil,
52 procutil,
53 stringutil,
53 stringutil,
54 )
54 )
55
55
56 if pycompat.iswindows:
56 if pycompat.iswindows:
57 from . import scmwindows as scmplatform
57 from . import scmwindows as scmplatform
58 else:
58 else:
59 from . import scmposix as scmplatform
59 from . import scmposix as scmplatform
60
60
61 parsers = policy.importmod('parsers')
61 parsers = policy.importmod('parsers')
62 rustrevlog = policy.importrust('revlog')
62 rustrevlog = policy.importrust('revlog')
63
63
64 termsize = scmplatform.termsize
64 termsize = scmplatform.termsize
65
65
66
66
67 @attr.s(slots=True, repr=False)
67 @attr.s(slots=True, repr=False)
68 class status(object):
68 class status(object):
69 '''Struct with a list of files per status.
69 '''Struct with a list of files per status.
70
70
71 The 'deleted', 'unknown' and 'ignored' properties are only
71 The 'deleted', 'unknown' and 'ignored' properties are only
72 relevant to the working copy.
72 relevant to the working copy.
73 '''
73 '''
74
74
75 modified = attr.ib(default=attr.Factory(list))
75 modified = attr.ib(default=attr.Factory(list))
76 added = attr.ib(default=attr.Factory(list))
76 added = attr.ib(default=attr.Factory(list))
77 removed = attr.ib(default=attr.Factory(list))
77 removed = attr.ib(default=attr.Factory(list))
78 deleted = attr.ib(default=attr.Factory(list))
78 deleted = attr.ib(default=attr.Factory(list))
79 unknown = attr.ib(default=attr.Factory(list))
79 unknown = attr.ib(default=attr.Factory(list))
80 ignored = attr.ib(default=attr.Factory(list))
80 ignored = attr.ib(default=attr.Factory(list))
81 clean = attr.ib(default=attr.Factory(list))
81 clean = attr.ib(default=attr.Factory(list))
82
82
83 def __iter__(self):
83 def __iter__(self):
84 yield self.modified
84 yield self.modified
85 yield self.added
85 yield self.added
86 yield self.removed
86 yield self.removed
87 yield self.deleted
87 yield self.deleted
88 yield self.unknown
88 yield self.unknown
89 yield self.ignored
89 yield self.ignored
90 yield self.clean
90 yield self.clean
91
91
92 def __repr__(self):
92 def __repr__(self):
93 return (
93 return (
94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
95 r'unknown=%s, ignored=%s, clean=%s>'
95 r'unknown=%s, ignored=%s, clean=%s>'
96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
97
97
98
98
99 def itersubrepos(ctx1, ctx2):
99 def itersubrepos(ctx1, ctx2):
100 """find subrepos in ctx1 or ctx2"""
100 """find subrepos in ctx1 or ctx2"""
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 # has been modified (in ctx2) but not yet committed (in ctx1).
103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106
106
107 missing = set()
107 missing = set()
108
108
109 for subpath in ctx2.substate:
109 for subpath in ctx2.substate:
110 if subpath not in ctx1.substate:
110 if subpath not in ctx1.substate:
111 del subpaths[subpath]
111 del subpaths[subpath]
112 missing.add(subpath)
112 missing.add(subpath)
113
113
114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
115 yield subpath, ctx.sub(subpath)
115 yield subpath, ctx.sub(subpath)
116
116
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 # status and diff will have an accurate result when it does
118 # status and diff will have an accurate result when it does
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 # against itself.
120 # against itself.
121 for subpath in missing:
121 for subpath in missing:
122 yield subpath, ctx2.nullsub(subpath, ctx1)
122 yield subpath, ctx2.nullsub(subpath, ctx1)
123
123
124
124
125 def nochangesfound(ui, repo, excluded=None):
125 def nochangesfound(ui, repo, excluded=None):
126 '''Report no changes for push/pull, excluded is None or a list of
126 '''Report no changes for push/pull, excluded is None or a list of
127 nodes excluded from the push/pull.
127 nodes excluded from the push/pull.
128 '''
128 '''
129 secretlist = []
129 secretlist = []
130 if excluded:
130 if excluded:
131 for n in excluded:
131 for n in excluded:
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(
137 ui.status(
138 _(b"no changes found (ignored %d secret changesets)\n")
138 _(b"no changes found (ignored %d secret changesets)\n")
139 % len(secretlist)
139 % len(secretlist)
140 )
140 )
141 else:
141 else:
142 ui.status(_(b"no changes found\n"))
142 ui.status(_(b"no changes found\n"))
143
143
144
144
145 def callcatch(ui, func):
145 def callcatch(ui, func):
146 """call func() with global exception handling
146 """call func() with global exception handling
147
147
148 return func() if no exception happens. otherwise do some error handling
148 return func() if no exception happens. otherwise do some error handling
149 and return an exit code accordingly. does not handle all exceptions.
149 and return an exit code accordingly. does not handle all exceptions.
150 """
150 """
151 try:
151 try:
152 try:
152 try:
153 return func()
153 return func()
154 except: # re-raises
154 except: # re-raises
155 ui.traceback()
155 ui.traceback()
156 raise
156 raise
157 # Global exception handling, alphabetically
157 # Global exception handling, alphabetically
158 # Mercurial-specific first, followed by built-in and library exceptions
158 # Mercurial-specific first, followed by built-in and library exceptions
159 except error.LockHeld as inst:
159 except error.LockHeld as inst:
160 if inst.errno == errno.ETIMEDOUT:
160 if inst.errno == errno.ETIMEDOUT:
161 reason = _(b'timed out waiting for lock held by %r') % (
161 reason = _(b'timed out waiting for lock held by %r') % (
162 pycompat.bytestr(inst.locker)
162 pycompat.bytestr(inst.locker)
163 )
163 )
164 else:
164 else:
165 reason = _(b'lock held by %r') % inst.locker
165 reason = _(b'lock held by %r') % inst.locker
166 ui.error(
166 ui.error(
167 _(b"abort: %s: %s\n")
167 _(b"abort: %s: %s\n")
168 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
168 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
169 )
169 )
170 if not inst.locker:
170 if not inst.locker:
171 ui.error(_(b"(lock might be very busy)\n"))
171 ui.error(_(b"(lock might be very busy)\n"))
172 except error.LockUnavailable as inst:
172 except error.LockUnavailable as inst:
173 ui.error(
173 ui.error(
174 _(b"abort: could not lock %s: %s\n")
174 _(b"abort: could not lock %s: %s\n")
175 % (
175 % (
176 inst.desc or stringutil.forcebytestr(inst.filename),
176 inst.desc or stringutil.forcebytestr(inst.filename),
177 encoding.strtolocal(inst.strerror),
177 encoding.strtolocal(inst.strerror),
178 )
178 )
179 )
179 )
180 except error.OutOfBandError as inst:
180 except error.OutOfBandError as inst:
181 if inst.args:
181 if inst.args:
182 msg = _(b"abort: remote error:\n")
182 msg = _(b"abort: remote error:\n")
183 else:
183 else:
184 msg = _(b"abort: remote error\n")
184 msg = _(b"abort: remote error\n")
185 ui.error(msg)
185 ui.error(msg)
186 if inst.args:
186 if inst.args:
187 ui.error(b''.join(inst.args))
187 ui.error(b''.join(inst.args))
188 if inst.hint:
188 if inst.hint:
189 ui.error(b'(%s)\n' % inst.hint)
189 ui.error(b'(%s)\n' % inst.hint)
190 except error.RepoError as inst:
190 except error.RepoError as inst:
191 ui.error(_(b"abort: %s!\n") % inst)
191 ui.error(_(b"abort: %s!\n") % inst)
192 if inst.hint:
192 if inst.hint:
193 ui.error(_(b"(%s)\n") % inst.hint)
193 ui.error(_(b"(%s)\n") % inst.hint)
194 except error.ResponseError as inst:
194 except error.ResponseError as inst:
195 ui.error(_(b"abort: %s") % inst.args[0])
195 ui.error(_(b"abort: %s") % inst.args[0])
196 msg = inst.args[1]
196 msg = inst.args[1]
197 if isinstance(msg, type(u'')):
197 if isinstance(msg, type(u'')):
198 msg = pycompat.sysbytes(msg)
198 msg = pycompat.sysbytes(msg)
199 if not isinstance(msg, bytes):
199 if not isinstance(msg, bytes):
200 ui.error(b" %r\n" % (msg,))
200 ui.error(b" %r\n" % (msg,))
201 elif not msg:
201 elif not msg:
202 ui.error(_(b" empty string\n"))
202 ui.error(_(b" empty string\n"))
203 else:
203 else:
204 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
204 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
205 except error.CensoredNodeError as inst:
205 except error.CensoredNodeError as inst:
206 ui.error(_(b"abort: file censored %s!\n") % inst)
206 ui.error(_(b"abort: file censored %s!\n") % inst)
207 except error.StorageError as inst:
207 except error.StorageError as inst:
208 ui.error(_(b"abort: %s!\n") % inst)
208 ui.error(_(b"abort: %s!\n") % inst)
209 if inst.hint:
209 if inst.hint:
210 ui.error(_(b"(%s)\n") % inst.hint)
210 ui.error(_(b"(%s)\n") % inst.hint)
211 except error.InterventionRequired as inst:
211 except error.InterventionRequired as inst:
212 ui.error(b"%s\n" % inst)
212 ui.error(b"%s\n" % inst)
213 if inst.hint:
213 if inst.hint:
214 ui.error(_(b"(%s)\n") % inst.hint)
214 ui.error(_(b"(%s)\n") % inst.hint)
215 return 1
215 return 1
216 except error.WdirUnsupported:
216 except error.WdirUnsupported:
217 ui.error(_(b"abort: working directory revision cannot be specified\n"))
217 ui.error(_(b"abort: working directory revision cannot be specified\n"))
218 except error.Abort as inst:
218 except error.Abort as inst:
219 ui.error(_(b"abort: %s\n") % inst.message)
219 ui.error(_(b"abort: %s\n") % inst.message)
220 if inst.hint:
220 if inst.hint:
221 ui.error(_(b"(%s)\n") % inst.hint)
221 ui.error(_(b"(%s)\n") % inst.hint)
222 except error.WorkerError as inst:
223 # Don't print a message -- the worker already should have
224 return inst.status_code
222 except ImportError as inst:
225 except ImportError as inst:
223 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
226 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
224 m = stringutil.forcebytestr(inst).split()[-1]
227 m = stringutil.forcebytestr(inst).split()[-1]
225 if m in b"mpatch bdiff".split():
228 if m in b"mpatch bdiff".split():
226 ui.error(_(b"(did you forget to compile extensions?)\n"))
229 ui.error(_(b"(did you forget to compile extensions?)\n"))
227 elif m in b"zlib".split():
230 elif m in b"zlib".split():
228 ui.error(_(b"(is your Python install correct?)\n"))
231 ui.error(_(b"(is your Python install correct?)\n"))
229 except (IOError, OSError) as inst:
232 except (IOError, OSError) as inst:
230 if util.safehasattr(inst, b"code"): # HTTPError
233 if util.safehasattr(inst, b"code"): # HTTPError
231 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
234 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
232 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
235 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
233 try: # usually it is in the form (errno, strerror)
236 try: # usually it is in the form (errno, strerror)
234 reason = inst.reason.args[1]
237 reason = inst.reason.args[1]
235 except (AttributeError, IndexError):
238 except (AttributeError, IndexError):
236 # it might be anything, for example a string
239 # it might be anything, for example a string
237 reason = inst.reason
240 reason = inst.reason
238 if isinstance(reason, pycompat.unicode):
241 if isinstance(reason, pycompat.unicode):
239 # SSLError of Python 2.7.9 contains a unicode
242 # SSLError of Python 2.7.9 contains a unicode
240 reason = encoding.unitolocal(reason)
243 reason = encoding.unitolocal(reason)
241 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
244 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
242 elif (
245 elif (
243 util.safehasattr(inst, b"args")
246 util.safehasattr(inst, b"args")
244 and inst.args
247 and inst.args
245 and inst.args[0] == errno.EPIPE
248 and inst.args[0] == errno.EPIPE
246 ):
249 ):
247 pass
250 pass
248 elif getattr(inst, "strerror", None): # common IOError or OSError
251 elif getattr(inst, "strerror", None): # common IOError or OSError
249 if getattr(inst, "filename", None) is not None:
252 if getattr(inst, "filename", None) is not None:
250 ui.error(
253 ui.error(
251 _(b"abort: %s: '%s'\n")
254 _(b"abort: %s: '%s'\n")
252 % (
255 % (
253 encoding.strtolocal(inst.strerror),
256 encoding.strtolocal(inst.strerror),
254 stringutil.forcebytestr(inst.filename),
257 stringutil.forcebytestr(inst.filename),
255 )
258 )
256 )
259 )
257 else:
260 else:
258 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
261 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
259 else: # suspicious IOError
262 else: # suspicious IOError
260 raise
263 raise
261 except MemoryError:
264 except MemoryError:
262 ui.error(_(b"abort: out of memory\n"))
265 ui.error(_(b"abort: out of memory\n"))
263 except SystemExit as inst:
266 except SystemExit as inst:
264 # Commands shouldn't sys.exit directly, but give a return code.
267 # Commands shouldn't sys.exit directly, but give a return code.
265 # Just in case catch this and and pass exit code to caller.
268 # Just in case catch this and and pass exit code to caller.
266 return inst.code
269 return inst.code
267
270
268 return -1
271 return -1
269
272
270
273
271 def checknewlabel(repo, lbl, kind):
274 def checknewlabel(repo, lbl, kind):
272 # Do not use the "kind" parameter in ui output.
275 # Do not use the "kind" parameter in ui output.
273 # It makes strings difficult to translate.
276 # It makes strings difficult to translate.
274 if lbl in [b'tip', b'.', b'null']:
277 if lbl in [b'tip', b'.', b'null']:
275 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
278 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
276 for c in (b':', b'\0', b'\n', b'\r'):
279 for c in (b':', b'\0', b'\n', b'\r'):
277 if c in lbl:
280 if c in lbl:
278 raise error.Abort(
281 raise error.Abort(
279 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
282 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
280 )
283 )
281 try:
284 try:
282 int(lbl)
285 int(lbl)
283 raise error.Abort(_(b"cannot use an integer as a name"))
286 raise error.Abort(_(b"cannot use an integer as a name"))
284 except ValueError:
287 except ValueError:
285 pass
288 pass
286 if lbl.strip() != lbl:
289 if lbl.strip() != lbl:
287 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
290 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
288
291
289
292
290 def checkfilename(f):
293 def checkfilename(f):
291 '''Check that the filename f is an acceptable filename for a tracked file'''
294 '''Check that the filename f is an acceptable filename for a tracked file'''
292 if b'\r' in f or b'\n' in f:
295 if b'\r' in f or b'\n' in f:
293 raise error.Abort(
296 raise error.Abort(
294 _(b"'\\n' and '\\r' disallowed in filenames: %r")
297 _(b"'\\n' and '\\r' disallowed in filenames: %r")
295 % pycompat.bytestr(f)
298 % pycompat.bytestr(f)
296 )
299 )
297
300
298
301
299 def checkportable(ui, f):
302 def checkportable(ui, f):
300 '''Check if filename f is portable and warn or abort depending on config'''
303 '''Check if filename f is portable and warn or abort depending on config'''
301 checkfilename(f)
304 checkfilename(f)
302 abort, warn = checkportabilityalert(ui)
305 abort, warn = checkportabilityalert(ui)
303 if abort or warn:
306 if abort or warn:
304 msg = util.checkwinfilename(f)
307 msg = util.checkwinfilename(f)
305 if msg:
308 if msg:
306 msg = b"%s: %s" % (msg, procutil.shellquote(f))
309 msg = b"%s: %s" % (msg, procutil.shellquote(f))
307 if abort:
310 if abort:
308 raise error.Abort(msg)
311 raise error.Abort(msg)
309 ui.warn(_(b"warning: %s\n") % msg)
312 ui.warn(_(b"warning: %s\n") % msg)
310
313
311
314
312 def checkportabilityalert(ui):
315 def checkportabilityalert(ui):
313 '''check if the user's config requests nothing, a warning, or abort for
316 '''check if the user's config requests nothing, a warning, or abort for
314 non-portable filenames'''
317 non-portable filenames'''
315 val = ui.config(b'ui', b'portablefilenames')
318 val = ui.config(b'ui', b'portablefilenames')
316 lval = val.lower()
319 lval = val.lower()
317 bval = stringutil.parsebool(val)
320 bval = stringutil.parsebool(val)
318 abort = pycompat.iswindows or lval == b'abort'
321 abort = pycompat.iswindows or lval == b'abort'
319 warn = bval or lval == b'warn'
322 warn = bval or lval == b'warn'
320 if bval is None and not (warn or abort or lval == b'ignore'):
323 if bval is None and not (warn or abort or lval == b'ignore'):
321 raise error.ConfigError(
324 raise error.ConfigError(
322 _(b"ui.portablefilenames value is invalid ('%s')") % val
325 _(b"ui.portablefilenames value is invalid ('%s')") % val
323 )
326 )
324 return abort, warn
327 return abort, warn
325
328
326
329
327 class casecollisionauditor(object):
330 class casecollisionauditor(object):
328 def __init__(self, ui, abort, dirstate):
331 def __init__(self, ui, abort, dirstate):
329 self._ui = ui
332 self._ui = ui
330 self._abort = abort
333 self._abort = abort
331 allfiles = b'\0'.join(dirstate)
334 allfiles = b'\0'.join(dirstate)
332 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
335 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
333 self._dirstate = dirstate
336 self._dirstate = dirstate
334 # The purpose of _newfiles is so that we don't complain about
337 # The purpose of _newfiles is so that we don't complain about
335 # case collisions if someone were to call this object with the
338 # case collisions if someone were to call this object with the
336 # same filename twice.
339 # same filename twice.
337 self._newfiles = set()
340 self._newfiles = set()
338
341
339 def __call__(self, f):
342 def __call__(self, f):
340 if f in self._newfiles:
343 if f in self._newfiles:
341 return
344 return
342 fl = encoding.lower(f)
345 fl = encoding.lower(f)
343 if fl in self._loweredfiles and f not in self._dirstate:
346 if fl in self._loweredfiles and f not in self._dirstate:
344 msg = _(b'possible case-folding collision for %s') % f
347 msg = _(b'possible case-folding collision for %s') % f
345 if self._abort:
348 if self._abort:
346 raise error.Abort(msg)
349 raise error.Abort(msg)
347 self._ui.warn(_(b"warning: %s\n") % msg)
350 self._ui.warn(_(b"warning: %s\n") % msg)
348 self._loweredfiles.add(fl)
351 self._loweredfiles.add(fl)
349 self._newfiles.add(f)
352 self._newfiles.add(f)
350
353
351
354
352 def filteredhash(repo, maxrev):
355 def filteredhash(repo, maxrev):
353 """build hash of filtered revisions in the current repoview.
356 """build hash of filtered revisions in the current repoview.
354
357
355 Multiple caches perform up-to-date validation by checking that the
358 Multiple caches perform up-to-date validation by checking that the
356 tiprev and tipnode stored in the cache file match the current repository.
359 tiprev and tipnode stored in the cache file match the current repository.
357 However, this is not sufficient for validating repoviews because the set
360 However, this is not sufficient for validating repoviews because the set
358 of revisions in the view may change without the repository tiprev and
361 of revisions in the view may change without the repository tiprev and
359 tipnode changing.
362 tipnode changing.
360
363
361 This function hashes all the revs filtered from the view and returns
364 This function hashes all the revs filtered from the view and returns
362 that SHA-1 digest.
365 that SHA-1 digest.
363 """
366 """
364 cl = repo.changelog
367 cl = repo.changelog
365 if not cl.filteredrevs:
368 if not cl.filteredrevs:
366 return None
369 return None
367 key = cl._filteredrevs_hashcache.get(maxrev)
370 key = cl._filteredrevs_hashcache.get(maxrev)
368 if not key:
371 if not key:
369 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
372 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
370 if revs:
373 if revs:
371 s = hashutil.sha1()
374 s = hashutil.sha1()
372 for rev in revs:
375 for rev in revs:
373 s.update(b'%d;' % rev)
376 s.update(b'%d;' % rev)
374 key = s.digest()
377 key = s.digest()
375 cl._filteredrevs_hashcache[maxrev] = key
378 cl._filteredrevs_hashcache[maxrev] = key
376 return key
379 return key
377
380
378
381
379 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
382 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
380 '''yield every hg repository under path, always recursively.
383 '''yield every hg repository under path, always recursively.
381 The recurse flag will only control recursion into repo working dirs'''
384 The recurse flag will only control recursion into repo working dirs'''
382
385
383 def errhandler(err):
386 def errhandler(err):
384 if err.filename == path:
387 if err.filename == path:
385 raise err
388 raise err
386
389
387 samestat = getattr(os.path, 'samestat', None)
390 samestat = getattr(os.path, 'samestat', None)
388 if followsym and samestat is not None:
391 if followsym and samestat is not None:
389
392
390 def adddir(dirlst, dirname):
393 def adddir(dirlst, dirname):
391 dirstat = os.stat(dirname)
394 dirstat = os.stat(dirname)
392 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
395 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
393 if not match:
396 if not match:
394 dirlst.append(dirstat)
397 dirlst.append(dirstat)
395 return not match
398 return not match
396
399
397 else:
400 else:
398 followsym = False
401 followsym = False
399
402
400 if (seen_dirs is None) and followsym:
403 if (seen_dirs is None) and followsym:
401 seen_dirs = []
404 seen_dirs = []
402 adddir(seen_dirs, path)
405 adddir(seen_dirs, path)
403 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
406 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
404 dirs.sort()
407 dirs.sort()
405 if b'.hg' in dirs:
408 if b'.hg' in dirs:
406 yield root # found a repository
409 yield root # found a repository
407 qroot = os.path.join(root, b'.hg', b'patches')
410 qroot = os.path.join(root, b'.hg', b'patches')
408 if os.path.isdir(os.path.join(qroot, b'.hg')):
411 if os.path.isdir(os.path.join(qroot, b'.hg')):
409 yield qroot # we have a patch queue repo here
412 yield qroot # we have a patch queue repo here
410 if recurse:
413 if recurse:
411 # avoid recursing inside the .hg directory
414 # avoid recursing inside the .hg directory
412 dirs.remove(b'.hg')
415 dirs.remove(b'.hg')
413 else:
416 else:
414 dirs[:] = [] # don't descend further
417 dirs[:] = [] # don't descend further
415 elif followsym:
418 elif followsym:
416 newdirs = []
419 newdirs = []
417 for d in dirs:
420 for d in dirs:
418 fname = os.path.join(root, d)
421 fname = os.path.join(root, d)
419 if adddir(seen_dirs, fname):
422 if adddir(seen_dirs, fname):
420 if os.path.islink(fname):
423 if os.path.islink(fname):
421 for hgname in walkrepos(fname, True, seen_dirs):
424 for hgname in walkrepos(fname, True, seen_dirs):
422 yield hgname
425 yield hgname
423 else:
426 else:
424 newdirs.append(d)
427 newdirs.append(d)
425 dirs[:] = newdirs
428 dirs[:] = newdirs
426
429
427
430
428 def binnode(ctx):
431 def binnode(ctx):
429 """Return binary node id for a given basectx"""
432 """Return binary node id for a given basectx"""
430 node = ctx.node()
433 node = ctx.node()
431 if node is None:
434 if node is None:
432 return wdirid
435 return wdirid
433 return node
436 return node
434
437
435
438
436 def intrev(ctx):
439 def intrev(ctx):
437 """Return integer for a given basectx that can be used in comparison or
440 """Return integer for a given basectx that can be used in comparison or
438 arithmetic operation"""
441 arithmetic operation"""
439 rev = ctx.rev()
442 rev = ctx.rev()
440 if rev is None:
443 if rev is None:
441 return wdirrev
444 return wdirrev
442 return rev
445 return rev
443
446
444
447
445 def formatchangeid(ctx):
448 def formatchangeid(ctx):
446 """Format changectx as '{rev}:{node|formatnode}', which is the default
449 """Format changectx as '{rev}:{node|formatnode}', which is the default
447 template provided by logcmdutil.changesettemplater"""
450 template provided by logcmdutil.changesettemplater"""
448 repo = ctx.repo()
451 repo = ctx.repo()
449 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
452 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
450
453
451
454
452 def formatrevnode(ui, rev, node):
455 def formatrevnode(ui, rev, node):
453 """Format given revision and node depending on the current verbosity"""
456 """Format given revision and node depending on the current verbosity"""
454 if ui.debugflag:
457 if ui.debugflag:
455 hexfunc = hex
458 hexfunc = hex
456 else:
459 else:
457 hexfunc = short
460 hexfunc = short
458 return b'%d:%s' % (rev, hexfunc(node))
461 return b'%d:%s' % (rev, hexfunc(node))
459
462
460
463
461 def resolvehexnodeidprefix(repo, prefix):
464 def resolvehexnodeidprefix(repo, prefix):
462 if prefix.startswith(b'x'):
465 if prefix.startswith(b'x'):
463 prefix = prefix[1:]
466 prefix = prefix[1:]
464 try:
467 try:
465 # Uses unfiltered repo because it's faster when prefix is ambiguous/
468 # Uses unfiltered repo because it's faster when prefix is ambiguous/
466 # This matches the shortesthexnodeidprefix() function below.
469 # This matches the shortesthexnodeidprefix() function below.
467 node = repo.unfiltered().changelog._partialmatch(prefix)
470 node = repo.unfiltered().changelog._partialmatch(prefix)
468 except error.AmbiguousPrefixLookupError:
471 except error.AmbiguousPrefixLookupError:
469 revset = repo.ui.config(
472 revset = repo.ui.config(
470 b'experimental', b'revisions.disambiguatewithin'
473 b'experimental', b'revisions.disambiguatewithin'
471 )
474 )
472 if revset:
475 if revset:
473 # Clear config to avoid infinite recursion
476 # Clear config to avoid infinite recursion
474 configoverrides = {
477 configoverrides = {
475 (b'experimental', b'revisions.disambiguatewithin'): None
478 (b'experimental', b'revisions.disambiguatewithin'): None
476 }
479 }
477 with repo.ui.configoverride(configoverrides):
480 with repo.ui.configoverride(configoverrides):
478 revs = repo.anyrevs([revset], user=True)
481 revs = repo.anyrevs([revset], user=True)
479 matches = []
482 matches = []
480 for rev in revs:
483 for rev in revs:
481 node = repo.changelog.node(rev)
484 node = repo.changelog.node(rev)
482 if hex(node).startswith(prefix):
485 if hex(node).startswith(prefix):
483 matches.append(node)
486 matches.append(node)
484 if len(matches) == 1:
487 if len(matches) == 1:
485 return matches[0]
488 return matches[0]
486 raise
489 raise
487 if node is None:
490 if node is None:
488 return
491 return
489 repo.changelog.rev(node) # make sure node isn't filtered
492 repo.changelog.rev(node) # make sure node isn't filtered
490 return node
493 return node
491
494
492
495
493 def mayberevnum(repo, prefix):
496 def mayberevnum(repo, prefix):
494 """Checks if the given prefix may be mistaken for a revision number"""
497 """Checks if the given prefix may be mistaken for a revision number"""
495 try:
498 try:
496 i = int(prefix)
499 i = int(prefix)
497 # if we are a pure int, then starting with zero will not be
500 # if we are a pure int, then starting with zero will not be
498 # confused as a rev; or, obviously, if the int is larger
501 # confused as a rev; or, obviously, if the int is larger
499 # than the value of the tip rev. We still need to disambiguate if
502 # than the value of the tip rev. We still need to disambiguate if
500 # prefix == '0', since that *is* a valid revnum.
503 # prefix == '0', since that *is* a valid revnum.
501 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
504 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
502 return False
505 return False
503 return True
506 return True
504 except ValueError:
507 except ValueError:
505 return False
508 return False
506
509
507
510
508 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
511 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
509 """Find the shortest unambiguous prefix that matches hexnode.
512 """Find the shortest unambiguous prefix that matches hexnode.
510
513
511 If "cache" is not None, it must be a dictionary that can be used for
514 If "cache" is not None, it must be a dictionary that can be used for
512 caching between calls to this method.
515 caching between calls to this method.
513 """
516 """
514 # _partialmatch() of filtered changelog could take O(len(repo)) time,
517 # _partialmatch() of filtered changelog could take O(len(repo)) time,
515 # which would be unacceptably slow. so we look for hash collision in
518 # which would be unacceptably slow. so we look for hash collision in
516 # unfiltered space, which means some hashes may be slightly longer.
519 # unfiltered space, which means some hashes may be slightly longer.
517
520
518 minlength = max(minlength, 1)
521 minlength = max(minlength, 1)
519
522
520 def disambiguate(prefix):
523 def disambiguate(prefix):
521 """Disambiguate against revnums."""
524 """Disambiguate against revnums."""
522 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
525 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
523 if mayberevnum(repo, prefix):
526 if mayberevnum(repo, prefix):
524 return b'x' + prefix
527 return b'x' + prefix
525 else:
528 else:
526 return prefix
529 return prefix
527
530
528 hexnode = hex(node)
531 hexnode = hex(node)
529 for length in range(len(prefix), len(hexnode) + 1):
532 for length in range(len(prefix), len(hexnode) + 1):
530 prefix = hexnode[:length]
533 prefix = hexnode[:length]
531 if not mayberevnum(repo, prefix):
534 if not mayberevnum(repo, prefix):
532 return prefix
535 return prefix
533
536
534 cl = repo.unfiltered().changelog
537 cl = repo.unfiltered().changelog
535 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
538 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
536 if revset:
539 if revset:
537 revs = None
540 revs = None
538 if cache is not None:
541 if cache is not None:
539 revs = cache.get(b'disambiguationrevset')
542 revs = cache.get(b'disambiguationrevset')
540 if revs is None:
543 if revs is None:
541 revs = repo.anyrevs([revset], user=True)
544 revs = repo.anyrevs([revset], user=True)
542 if cache is not None:
545 if cache is not None:
543 cache[b'disambiguationrevset'] = revs
546 cache[b'disambiguationrevset'] = revs
544 if cl.rev(node) in revs:
547 if cl.rev(node) in revs:
545 hexnode = hex(node)
548 hexnode = hex(node)
546 nodetree = None
549 nodetree = None
547 if cache is not None:
550 if cache is not None:
548 nodetree = cache.get(b'disambiguationnodetree')
551 nodetree = cache.get(b'disambiguationnodetree')
549 if not nodetree:
552 if not nodetree:
550 if util.safehasattr(parsers, 'nodetree'):
553 if util.safehasattr(parsers, 'nodetree'):
551 # The CExt is the only implementation to provide a nodetree
554 # The CExt is the only implementation to provide a nodetree
552 # class so far.
555 # class so far.
553 index = cl.index
556 index = cl.index
554 if util.safehasattr(index, 'get_cindex'):
557 if util.safehasattr(index, 'get_cindex'):
555 # the rust wrapped need to give access to its internal index
558 # the rust wrapped need to give access to its internal index
556 index = index.get_cindex()
559 index = index.get_cindex()
557 nodetree = parsers.nodetree(index, len(revs))
560 nodetree = parsers.nodetree(index, len(revs))
558 for r in revs:
561 for r in revs:
559 nodetree.insert(r)
562 nodetree.insert(r)
560 if cache is not None:
563 if cache is not None:
561 cache[b'disambiguationnodetree'] = nodetree
564 cache[b'disambiguationnodetree'] = nodetree
562 if nodetree is not None:
565 if nodetree is not None:
563 length = max(nodetree.shortest(node), minlength)
566 length = max(nodetree.shortest(node), minlength)
564 prefix = hexnode[:length]
567 prefix = hexnode[:length]
565 return disambiguate(prefix)
568 return disambiguate(prefix)
566 for length in range(minlength, len(hexnode) + 1):
569 for length in range(minlength, len(hexnode) + 1):
567 matches = []
570 matches = []
568 prefix = hexnode[:length]
571 prefix = hexnode[:length]
569 for rev in revs:
572 for rev in revs:
570 otherhexnode = repo[rev].hex()
573 otherhexnode = repo[rev].hex()
571 if prefix == otherhexnode[:length]:
574 if prefix == otherhexnode[:length]:
572 matches.append(otherhexnode)
575 matches.append(otherhexnode)
573 if len(matches) == 1:
576 if len(matches) == 1:
574 return disambiguate(prefix)
577 return disambiguate(prefix)
575
578
576 try:
579 try:
577 return disambiguate(cl.shortest(node, minlength))
580 return disambiguate(cl.shortest(node, minlength))
578 except error.LookupError:
581 except error.LookupError:
579 raise error.RepoLookupError()
582 raise error.RepoLookupError()
580
583
581
584
582 def isrevsymbol(repo, symbol):
585 def isrevsymbol(repo, symbol):
583 """Checks if a symbol exists in the repo.
586 """Checks if a symbol exists in the repo.
584
587
585 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
588 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
586 symbol is an ambiguous nodeid prefix.
589 symbol is an ambiguous nodeid prefix.
587 """
590 """
588 try:
591 try:
589 revsymbol(repo, symbol)
592 revsymbol(repo, symbol)
590 return True
593 return True
591 except error.RepoLookupError:
594 except error.RepoLookupError:
592 return False
595 return False
593
596
594
597
595 def revsymbol(repo, symbol):
598 def revsymbol(repo, symbol):
596 """Returns a context given a single revision symbol (as string).
599 """Returns a context given a single revision symbol (as string).
597
600
598 This is similar to revsingle(), but accepts only a single revision symbol,
601 This is similar to revsingle(), but accepts only a single revision symbol,
599 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
602 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
600 not "max(public())".
603 not "max(public())".
601 """
604 """
602 if not isinstance(symbol, bytes):
605 if not isinstance(symbol, bytes):
603 msg = (
606 msg = (
604 b"symbol (%s of type %s) was not a string, did you mean "
607 b"symbol (%s of type %s) was not a string, did you mean "
605 b"repo[symbol]?" % (symbol, type(symbol))
608 b"repo[symbol]?" % (symbol, type(symbol))
606 )
609 )
607 raise error.ProgrammingError(msg)
610 raise error.ProgrammingError(msg)
608 try:
611 try:
609 if symbol in (b'.', b'tip', b'null'):
612 if symbol in (b'.', b'tip', b'null'):
610 return repo[symbol]
613 return repo[symbol]
611
614
612 try:
615 try:
613 r = int(symbol)
616 r = int(symbol)
614 if b'%d' % r != symbol:
617 if b'%d' % r != symbol:
615 raise ValueError
618 raise ValueError
616 l = len(repo.changelog)
619 l = len(repo.changelog)
617 if r < 0:
620 if r < 0:
618 r += l
621 r += l
619 if r < 0 or r >= l and r != wdirrev:
622 if r < 0 or r >= l and r != wdirrev:
620 raise ValueError
623 raise ValueError
621 return repo[r]
624 return repo[r]
622 except error.FilteredIndexError:
625 except error.FilteredIndexError:
623 raise
626 raise
624 except (ValueError, OverflowError, IndexError):
627 except (ValueError, OverflowError, IndexError):
625 pass
628 pass
626
629
627 if len(symbol) == 40:
630 if len(symbol) == 40:
628 try:
631 try:
629 node = bin(symbol)
632 node = bin(symbol)
630 rev = repo.changelog.rev(node)
633 rev = repo.changelog.rev(node)
631 return repo[rev]
634 return repo[rev]
632 except error.FilteredLookupError:
635 except error.FilteredLookupError:
633 raise
636 raise
634 except (TypeError, LookupError):
637 except (TypeError, LookupError):
635 pass
638 pass
636
639
637 # look up bookmarks through the name interface
640 # look up bookmarks through the name interface
638 try:
641 try:
639 node = repo.names.singlenode(repo, symbol)
642 node = repo.names.singlenode(repo, symbol)
640 rev = repo.changelog.rev(node)
643 rev = repo.changelog.rev(node)
641 return repo[rev]
644 return repo[rev]
642 except KeyError:
645 except KeyError:
643 pass
646 pass
644
647
645 node = resolvehexnodeidprefix(repo, symbol)
648 node = resolvehexnodeidprefix(repo, symbol)
646 if node is not None:
649 if node is not None:
647 rev = repo.changelog.rev(node)
650 rev = repo.changelog.rev(node)
648 return repo[rev]
651 return repo[rev]
649
652
650 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
653 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
651
654
652 except error.WdirUnsupported:
655 except error.WdirUnsupported:
653 return repo[None]
656 return repo[None]
654 except (
657 except (
655 error.FilteredIndexError,
658 error.FilteredIndexError,
656 error.FilteredLookupError,
659 error.FilteredLookupError,
657 error.FilteredRepoLookupError,
660 error.FilteredRepoLookupError,
658 ):
661 ):
659 raise _filterederror(repo, symbol)
662 raise _filterederror(repo, symbol)
660
663
661
664
662 def _filterederror(repo, changeid):
665 def _filterederror(repo, changeid):
663 """build an exception to be raised about a filtered changeid
666 """build an exception to be raised about a filtered changeid
664
667
665 This is extracted in a function to help extensions (eg: evolve) to
668 This is extracted in a function to help extensions (eg: evolve) to
666 experiment with various message variants."""
669 experiment with various message variants."""
667 if repo.filtername.startswith(b'visible'):
670 if repo.filtername.startswith(b'visible'):
668
671
669 # Check if the changeset is obsolete
672 # Check if the changeset is obsolete
670 unfilteredrepo = repo.unfiltered()
673 unfilteredrepo = repo.unfiltered()
671 ctx = revsymbol(unfilteredrepo, changeid)
674 ctx = revsymbol(unfilteredrepo, changeid)
672
675
673 # If the changeset is obsolete, enrich the message with the reason
676 # If the changeset is obsolete, enrich the message with the reason
674 # that made this changeset not visible
677 # that made this changeset not visible
675 if ctx.obsolete():
678 if ctx.obsolete():
676 msg = obsutil._getfilteredreason(repo, changeid, ctx)
679 msg = obsutil._getfilteredreason(repo, changeid, ctx)
677 else:
680 else:
678 msg = _(b"hidden revision '%s'") % changeid
681 msg = _(b"hidden revision '%s'") % changeid
679
682
680 hint = _(b'use --hidden to access hidden revisions')
683 hint = _(b'use --hidden to access hidden revisions')
681
684
682 return error.FilteredRepoLookupError(msg, hint=hint)
685 return error.FilteredRepoLookupError(msg, hint=hint)
683 msg = _(b"filtered revision '%s' (not in '%s' subset)")
686 msg = _(b"filtered revision '%s' (not in '%s' subset)")
684 msg %= (changeid, repo.filtername)
687 msg %= (changeid, repo.filtername)
685 return error.FilteredRepoLookupError(msg)
688 return error.FilteredRepoLookupError(msg)
686
689
687
690
688 def revsingle(repo, revspec, default=b'.', localalias=None):
691 def revsingle(repo, revspec, default=b'.', localalias=None):
689 if not revspec and revspec != 0:
692 if not revspec and revspec != 0:
690 return repo[default]
693 return repo[default]
691
694
692 l = revrange(repo, [revspec], localalias=localalias)
695 l = revrange(repo, [revspec], localalias=localalias)
693 if not l:
696 if not l:
694 raise error.Abort(_(b'empty revision set'))
697 raise error.Abort(_(b'empty revision set'))
695 return repo[l.last()]
698 return repo[l.last()]
696
699
697
700
698 def _pairspec(revspec):
701 def _pairspec(revspec):
699 tree = revsetlang.parse(revspec)
702 tree = revsetlang.parse(revspec)
700 return tree and tree[0] in (
703 return tree and tree[0] in (
701 b'range',
704 b'range',
702 b'rangepre',
705 b'rangepre',
703 b'rangepost',
706 b'rangepost',
704 b'rangeall',
707 b'rangeall',
705 )
708 )
706
709
707
710
708 def revpair(repo, revs):
711 def revpair(repo, revs):
709 if not revs:
712 if not revs:
710 return repo[b'.'], repo[None]
713 return repo[b'.'], repo[None]
711
714
712 l = revrange(repo, revs)
715 l = revrange(repo, revs)
713
716
714 if not l:
717 if not l:
715 raise error.Abort(_(b'empty revision range'))
718 raise error.Abort(_(b'empty revision range'))
716
719
717 first = l.first()
720 first = l.first()
718 second = l.last()
721 second = l.last()
719
722
720 if (
723 if (
721 first == second
724 first == second
722 and len(revs) >= 2
725 and len(revs) >= 2
723 and not all(revrange(repo, [r]) for r in revs)
726 and not all(revrange(repo, [r]) for r in revs)
724 ):
727 ):
725 raise error.Abort(_(b'empty revision on one side of range'))
728 raise error.Abort(_(b'empty revision on one side of range'))
726
729
727 # if top-level is range expression, the result must always be a pair
730 # if top-level is range expression, the result must always be a pair
728 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
731 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
729 return repo[first], repo[None]
732 return repo[first], repo[None]
730
733
731 return repo[first], repo[second]
734 return repo[first], repo[second]
732
735
733
736
734 def revrange(repo, specs, localalias=None):
737 def revrange(repo, specs, localalias=None):
735 """Execute 1 to many revsets and return the union.
738 """Execute 1 to many revsets and return the union.
736
739
737 This is the preferred mechanism for executing revsets using user-specified
740 This is the preferred mechanism for executing revsets using user-specified
738 config options, such as revset aliases.
741 config options, such as revset aliases.
739
742
740 The revsets specified by ``specs`` will be executed via a chained ``OR``
743 The revsets specified by ``specs`` will be executed via a chained ``OR``
741 expression. If ``specs`` is empty, an empty result is returned.
744 expression. If ``specs`` is empty, an empty result is returned.
742
745
743 ``specs`` can contain integers, in which case they are assumed to be
746 ``specs`` can contain integers, in which case they are assumed to be
744 revision numbers.
747 revision numbers.
745
748
746 It is assumed the revsets are already formatted. If you have arguments
749 It is assumed the revsets are already formatted. If you have arguments
747 that need to be expanded in the revset, call ``revsetlang.formatspec()``
750 that need to be expanded in the revset, call ``revsetlang.formatspec()``
748 and pass the result as an element of ``specs``.
751 and pass the result as an element of ``specs``.
749
752
750 Specifying a single revset is allowed.
753 Specifying a single revset is allowed.
751
754
752 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
755 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
753 integer revisions.
756 integer revisions.
754 """
757 """
755 allspecs = []
758 allspecs = []
756 for spec in specs:
759 for spec in specs:
757 if isinstance(spec, int):
760 if isinstance(spec, int):
758 spec = revsetlang.formatspec(b'%d', spec)
761 spec = revsetlang.formatspec(b'%d', spec)
759 allspecs.append(spec)
762 allspecs.append(spec)
760 return repo.anyrevs(allspecs, user=True, localalias=localalias)
763 return repo.anyrevs(allspecs, user=True, localalias=localalias)
761
764
762
765
763 def increasingwindows(windowsize=8, sizelimit=512):
766 def increasingwindows(windowsize=8, sizelimit=512):
764 while True:
767 while True:
765 yield windowsize
768 yield windowsize
766 if windowsize < sizelimit:
769 if windowsize < sizelimit:
767 windowsize *= 2
770 windowsize *= 2
768
771
769
772
770 def walkchangerevs(repo, revs, makefilematcher, prepare):
773 def walkchangerevs(repo, revs, makefilematcher, prepare):
771 '''Iterate over files and the revs in a "windowed" way.
774 '''Iterate over files and the revs in a "windowed" way.
772
775
773 Callers most commonly need to iterate backwards over the history
776 Callers most commonly need to iterate backwards over the history
774 in which they are interested. Doing so has awful (quadratic-looking)
777 in which they are interested. Doing so has awful (quadratic-looking)
775 performance, so we use iterators in a "windowed" way.
778 performance, so we use iterators in a "windowed" way.
776
779
777 We walk a window of revisions in the desired order. Within the
780 We walk a window of revisions in the desired order. Within the
778 window, we first walk forwards to gather data, then in the desired
781 window, we first walk forwards to gather data, then in the desired
779 order (usually backwards) to display it.
782 order (usually backwards) to display it.
780
783
781 This function returns an iterator yielding contexts. Before
784 This function returns an iterator yielding contexts. Before
782 yielding each context, the iterator will first call the prepare
785 yielding each context, the iterator will first call the prepare
783 function on each context in the window in forward order.'''
786 function on each context in the window in forward order.'''
784
787
785 if not revs:
788 if not revs:
786 return []
789 return []
787 change = repo.__getitem__
790 change = repo.__getitem__
788
791
789 def iterate():
792 def iterate():
790 it = iter(revs)
793 it = iter(revs)
791 stopiteration = False
794 stopiteration = False
792 for windowsize in increasingwindows():
795 for windowsize in increasingwindows():
793 nrevs = []
796 nrevs = []
794 for i in pycompat.xrange(windowsize):
797 for i in pycompat.xrange(windowsize):
795 rev = next(it, None)
798 rev = next(it, None)
796 if rev is None:
799 if rev is None:
797 stopiteration = True
800 stopiteration = True
798 break
801 break
799 nrevs.append(rev)
802 nrevs.append(rev)
800 for rev in sorted(nrevs):
803 for rev in sorted(nrevs):
801 ctx = change(rev)
804 ctx = change(rev)
802 prepare(ctx, makefilematcher(ctx))
805 prepare(ctx, makefilematcher(ctx))
803 for rev in nrevs:
806 for rev in nrevs:
804 yield change(rev)
807 yield change(rev)
805
808
806 if stopiteration:
809 if stopiteration:
807 break
810 break
808
811
809 return iterate()
812 return iterate()
810
813
811
814
812 def meaningfulparents(repo, ctx):
815 def meaningfulparents(repo, ctx):
813 """Return list of meaningful (or all if debug) parentrevs for rev.
816 """Return list of meaningful (or all if debug) parentrevs for rev.
814
817
815 For merges (two non-nullrev revisions) both parents are meaningful.
818 For merges (two non-nullrev revisions) both parents are meaningful.
816 Otherwise the first parent revision is considered meaningful if it
819 Otherwise the first parent revision is considered meaningful if it
817 is not the preceding revision.
820 is not the preceding revision.
818 """
821 """
819 parents = ctx.parents()
822 parents = ctx.parents()
820 if len(parents) > 1:
823 if len(parents) > 1:
821 return parents
824 return parents
822 if repo.ui.debugflag:
825 if repo.ui.debugflag:
823 return [parents[0], repo[nullrev]]
826 return [parents[0], repo[nullrev]]
824 if parents[0].rev() >= intrev(ctx) - 1:
827 if parents[0].rev() >= intrev(ctx) - 1:
825 return []
828 return []
826 return parents
829 return parents
827
830
828
831
829 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
832 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
830 """Return a function that produced paths for presenting to the user.
833 """Return a function that produced paths for presenting to the user.
831
834
832 The returned function takes a repo-relative path and produces a path
835 The returned function takes a repo-relative path and produces a path
833 that can be presented in the UI.
836 that can be presented in the UI.
834
837
835 Depending on the value of ui.relative-paths, either a repo-relative or
838 Depending on the value of ui.relative-paths, either a repo-relative or
836 cwd-relative path will be produced.
839 cwd-relative path will be produced.
837
840
838 legacyrelativevalue is the value to use if ui.relative-paths=legacy
841 legacyrelativevalue is the value to use if ui.relative-paths=legacy
839
842
840 If forcerelativevalue is not None, then that value will be used regardless
843 If forcerelativevalue is not None, then that value will be used regardless
841 of what ui.relative-paths is set to.
844 of what ui.relative-paths is set to.
842 """
845 """
843 if forcerelativevalue is not None:
846 if forcerelativevalue is not None:
844 relative = forcerelativevalue
847 relative = forcerelativevalue
845 else:
848 else:
846 config = repo.ui.config(b'ui', b'relative-paths')
849 config = repo.ui.config(b'ui', b'relative-paths')
847 if config == b'legacy':
850 if config == b'legacy':
848 relative = legacyrelativevalue
851 relative = legacyrelativevalue
849 else:
852 else:
850 relative = stringutil.parsebool(config)
853 relative = stringutil.parsebool(config)
851 if relative is None:
854 if relative is None:
852 raise error.ConfigError(
855 raise error.ConfigError(
853 _(b"ui.relative-paths is not a boolean ('%s')") % config
856 _(b"ui.relative-paths is not a boolean ('%s')") % config
854 )
857 )
855
858
856 if relative:
859 if relative:
857 cwd = repo.getcwd()
860 cwd = repo.getcwd()
858 if cwd != b'':
861 if cwd != b'':
859 # this branch would work even if cwd == b'' (ie cwd = repo
862 # this branch would work even if cwd == b'' (ie cwd = repo
860 # root), but its generality makes the returned function slower
863 # root), but its generality makes the returned function slower
861 pathto = repo.pathto
864 pathto = repo.pathto
862 return lambda f: pathto(f, cwd)
865 return lambda f: pathto(f, cwd)
863 if repo.ui.configbool(b'ui', b'slash'):
866 if repo.ui.configbool(b'ui', b'slash'):
864 return lambda f: f
867 return lambda f: f
865 else:
868 else:
866 return util.localpath
869 return util.localpath
867
870
868
871
869 def subdiruipathfn(subpath, uipathfn):
872 def subdiruipathfn(subpath, uipathfn):
870 '''Create a new uipathfn that treats the file as relative to subpath.'''
873 '''Create a new uipathfn that treats the file as relative to subpath.'''
871 return lambda f: uipathfn(posixpath.join(subpath, f))
874 return lambda f: uipathfn(posixpath.join(subpath, f))
872
875
873
876
874 def anypats(pats, opts):
877 def anypats(pats, opts):
875 '''Checks if any patterns, including --include and --exclude were given.
878 '''Checks if any patterns, including --include and --exclude were given.
876
879
877 Some commands (e.g. addremove) use this condition for deciding whether to
880 Some commands (e.g. addremove) use this condition for deciding whether to
878 print absolute or relative paths.
881 print absolute or relative paths.
879 '''
882 '''
880 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
883 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
881
884
882
885
883 def expandpats(pats):
886 def expandpats(pats):
884 '''Expand bare globs when running on windows.
887 '''Expand bare globs when running on windows.
885 On posix we assume it already has already been done by sh.'''
888 On posix we assume it already has already been done by sh.'''
886 if not util.expandglobs:
889 if not util.expandglobs:
887 return list(pats)
890 return list(pats)
888 ret = []
891 ret = []
889 for kindpat in pats:
892 for kindpat in pats:
890 kind, pat = matchmod._patsplit(kindpat, None)
893 kind, pat = matchmod._patsplit(kindpat, None)
891 if kind is None:
894 if kind is None:
892 try:
895 try:
893 globbed = glob.glob(pat)
896 globbed = glob.glob(pat)
894 except re.error:
897 except re.error:
895 globbed = [pat]
898 globbed = [pat]
896 if globbed:
899 if globbed:
897 ret.extend(globbed)
900 ret.extend(globbed)
898 continue
901 continue
899 ret.append(kindpat)
902 ret.append(kindpat)
900 return ret
903 return ret
901
904
902
905
903 def matchandpats(
906 def matchandpats(
904 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
907 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
905 ):
908 ):
906 '''Return a matcher and the patterns that were used.
909 '''Return a matcher and the patterns that were used.
907 The matcher will warn about bad matches, unless an alternate badfn callback
910 The matcher will warn about bad matches, unless an alternate badfn callback
908 is provided.'''
911 is provided.'''
909 if opts is None:
912 if opts is None:
910 opts = {}
913 opts = {}
911 if not globbed and default == b'relpath':
914 if not globbed and default == b'relpath':
912 pats = expandpats(pats or [])
915 pats = expandpats(pats or [])
913
916
914 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
917 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
915
918
916 def bad(f, msg):
919 def bad(f, msg):
917 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
920 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
918
921
919 if badfn is None:
922 if badfn is None:
920 badfn = bad
923 badfn = bad
921
924
922 m = ctx.match(
925 m = ctx.match(
923 pats,
926 pats,
924 opts.get(b'include'),
927 opts.get(b'include'),
925 opts.get(b'exclude'),
928 opts.get(b'exclude'),
926 default,
929 default,
927 listsubrepos=opts.get(b'subrepos'),
930 listsubrepos=opts.get(b'subrepos'),
928 badfn=badfn,
931 badfn=badfn,
929 )
932 )
930
933
931 if m.always():
934 if m.always():
932 pats = []
935 pats = []
933 return m, pats
936 return m, pats
934
937
935
938
936 def match(
939 def match(
937 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
940 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
938 ):
941 ):
939 '''Return a matcher that will warn about bad matches.'''
942 '''Return a matcher that will warn about bad matches.'''
940 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
943 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
941
944
942
945
943 def matchall(repo):
946 def matchall(repo):
944 '''Return a matcher that will efficiently match everything.'''
947 '''Return a matcher that will efficiently match everything.'''
945 return matchmod.always()
948 return matchmod.always()
946
949
947
950
948 def matchfiles(repo, files, badfn=None):
951 def matchfiles(repo, files, badfn=None):
949 '''Return a matcher that will efficiently match exactly these files.'''
952 '''Return a matcher that will efficiently match exactly these files.'''
950 return matchmod.exact(files, badfn=badfn)
953 return matchmod.exact(files, badfn=badfn)
951
954
952
955
953 def parsefollowlinespattern(repo, rev, pat, msg):
956 def parsefollowlinespattern(repo, rev, pat, msg):
954 """Return a file name from `pat` pattern suitable for usage in followlines
957 """Return a file name from `pat` pattern suitable for usage in followlines
955 logic.
958 logic.
956 """
959 """
957 if not matchmod.patkind(pat):
960 if not matchmod.patkind(pat):
958 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
961 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
959 else:
962 else:
960 ctx = repo[rev]
963 ctx = repo[rev]
961 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
964 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
962 files = [f for f in ctx if m(f)]
965 files = [f for f in ctx if m(f)]
963 if len(files) != 1:
966 if len(files) != 1:
964 raise error.ParseError(msg)
967 raise error.ParseError(msg)
965 return files[0]
968 return files[0]
966
969
967
970
968 def getorigvfs(ui, repo):
971 def getorigvfs(ui, repo):
969 """return a vfs suitable to save 'orig' file
972 """return a vfs suitable to save 'orig' file
970
973
971 return None if no special directory is configured"""
974 return None if no special directory is configured"""
972 origbackuppath = ui.config(b'ui', b'origbackuppath')
975 origbackuppath = ui.config(b'ui', b'origbackuppath')
973 if not origbackuppath:
976 if not origbackuppath:
974 return None
977 return None
975 return vfs.vfs(repo.wvfs.join(origbackuppath))
978 return vfs.vfs(repo.wvfs.join(origbackuppath))
976
979
977
980
978 def backuppath(ui, repo, filepath):
981 def backuppath(ui, repo, filepath):
979 '''customize where working copy backup files (.orig files) are created
982 '''customize where working copy backup files (.orig files) are created
980
983
981 Fetch user defined path from config file: [ui] origbackuppath = <path>
984 Fetch user defined path from config file: [ui] origbackuppath = <path>
982 Fall back to default (filepath with .orig suffix) if not specified
985 Fall back to default (filepath with .orig suffix) if not specified
983
986
984 filepath is repo-relative
987 filepath is repo-relative
985
988
986 Returns an absolute path
989 Returns an absolute path
987 '''
990 '''
988 origvfs = getorigvfs(ui, repo)
991 origvfs = getorigvfs(ui, repo)
989 if origvfs is None:
992 if origvfs is None:
990 return repo.wjoin(filepath + b".orig")
993 return repo.wjoin(filepath + b".orig")
991
994
992 origbackupdir = origvfs.dirname(filepath)
995 origbackupdir = origvfs.dirname(filepath)
993 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
996 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
994 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
997 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
995
998
996 # Remove any files that conflict with the backup file's path
999 # Remove any files that conflict with the backup file's path
997 for f in reversed(list(pathutil.finddirs(filepath))):
1000 for f in reversed(list(pathutil.finddirs(filepath))):
998 if origvfs.isfileorlink(f):
1001 if origvfs.isfileorlink(f):
999 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1002 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1000 origvfs.unlink(f)
1003 origvfs.unlink(f)
1001 break
1004 break
1002
1005
1003 origvfs.makedirs(origbackupdir)
1006 origvfs.makedirs(origbackupdir)
1004
1007
1005 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1008 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1006 ui.note(
1009 ui.note(
1007 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1010 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1008 )
1011 )
1009 origvfs.rmtree(filepath, forcibly=True)
1012 origvfs.rmtree(filepath, forcibly=True)
1010
1013
1011 return origvfs.join(filepath)
1014 return origvfs.join(filepath)
1012
1015
1013
1016
1014 class _containsnode(object):
1017 class _containsnode(object):
1015 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1018 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1016
1019
1017 def __init__(self, repo, revcontainer):
1020 def __init__(self, repo, revcontainer):
1018 self._torev = repo.changelog.rev
1021 self._torev = repo.changelog.rev
1019 self._revcontains = revcontainer.__contains__
1022 self._revcontains = revcontainer.__contains__
1020
1023
1021 def __contains__(self, node):
1024 def __contains__(self, node):
1022 return self._revcontains(self._torev(node))
1025 return self._revcontains(self._torev(node))
1023
1026
1024
1027
1025 def cleanupnodes(
1028 def cleanupnodes(
1026 repo,
1029 repo,
1027 replacements,
1030 replacements,
1028 operation,
1031 operation,
1029 moves=None,
1032 moves=None,
1030 metadata=None,
1033 metadata=None,
1031 fixphase=False,
1034 fixphase=False,
1032 targetphase=None,
1035 targetphase=None,
1033 backup=True,
1036 backup=True,
1034 ):
1037 ):
1035 """do common cleanups when old nodes are replaced by new nodes
1038 """do common cleanups when old nodes are replaced by new nodes
1036
1039
1037 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1040 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1038 (we might also want to move working directory parent in the future)
1041 (we might also want to move working directory parent in the future)
1039
1042
1040 By default, bookmark moves are calculated automatically from 'replacements',
1043 By default, bookmark moves are calculated automatically from 'replacements',
1041 but 'moves' can be used to override that. Also, 'moves' may include
1044 but 'moves' can be used to override that. Also, 'moves' may include
1042 additional bookmark moves that should not have associated obsmarkers.
1045 additional bookmark moves that should not have associated obsmarkers.
1043
1046
1044 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1047 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1045 have replacements. operation is a string, like "rebase".
1048 have replacements. operation is a string, like "rebase".
1046
1049
1047 metadata is dictionary containing metadata to be stored in obsmarker if
1050 metadata is dictionary containing metadata to be stored in obsmarker if
1048 obsolescence is enabled.
1051 obsolescence is enabled.
1049 """
1052 """
1050 assert fixphase or targetphase is None
1053 assert fixphase or targetphase is None
1051 if not replacements and not moves:
1054 if not replacements and not moves:
1052 return
1055 return
1053
1056
1054 # translate mapping's other forms
1057 # translate mapping's other forms
1055 if not util.safehasattr(replacements, b'items'):
1058 if not util.safehasattr(replacements, b'items'):
1056 replacements = {(n,): () for n in replacements}
1059 replacements = {(n,): () for n in replacements}
1057 else:
1060 else:
1058 # upgrading non tuple "source" to tuple ones for BC
1061 # upgrading non tuple "source" to tuple ones for BC
1059 repls = {}
1062 repls = {}
1060 for key, value in replacements.items():
1063 for key, value in replacements.items():
1061 if not isinstance(key, tuple):
1064 if not isinstance(key, tuple):
1062 key = (key,)
1065 key = (key,)
1063 repls[key] = value
1066 repls[key] = value
1064 replacements = repls
1067 replacements = repls
1065
1068
1066 # Unfiltered repo is needed since nodes in replacements might be hidden.
1069 # Unfiltered repo is needed since nodes in replacements might be hidden.
1067 unfi = repo.unfiltered()
1070 unfi = repo.unfiltered()
1068
1071
1069 # Calculate bookmark movements
1072 # Calculate bookmark movements
1070 if moves is None:
1073 if moves is None:
1071 moves = {}
1074 moves = {}
1072 for oldnodes, newnodes in replacements.items():
1075 for oldnodes, newnodes in replacements.items():
1073 for oldnode in oldnodes:
1076 for oldnode in oldnodes:
1074 if oldnode in moves:
1077 if oldnode in moves:
1075 continue
1078 continue
1076 if len(newnodes) > 1:
1079 if len(newnodes) > 1:
1077 # usually a split, take the one with biggest rev number
1080 # usually a split, take the one with biggest rev number
1078 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1081 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1079 elif len(newnodes) == 0:
1082 elif len(newnodes) == 0:
1080 # move bookmark backwards
1083 # move bookmark backwards
1081 allreplaced = []
1084 allreplaced = []
1082 for rep in replacements:
1085 for rep in replacements:
1083 allreplaced.extend(rep)
1086 allreplaced.extend(rep)
1084 roots = list(
1087 roots = list(
1085 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1088 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1086 )
1089 )
1087 if roots:
1090 if roots:
1088 newnode = roots[0].node()
1091 newnode = roots[0].node()
1089 else:
1092 else:
1090 newnode = nullid
1093 newnode = nullid
1091 else:
1094 else:
1092 newnode = newnodes[0]
1095 newnode = newnodes[0]
1093 moves[oldnode] = newnode
1096 moves[oldnode] = newnode
1094
1097
1095 allnewnodes = [n for ns in replacements.values() for n in ns]
1098 allnewnodes = [n for ns in replacements.values() for n in ns]
1096 toretract = {}
1099 toretract = {}
1097 toadvance = {}
1100 toadvance = {}
1098 if fixphase:
1101 if fixphase:
1099 precursors = {}
1102 precursors = {}
1100 for oldnodes, newnodes in replacements.items():
1103 for oldnodes, newnodes in replacements.items():
1101 for oldnode in oldnodes:
1104 for oldnode in oldnodes:
1102 for newnode in newnodes:
1105 for newnode in newnodes:
1103 precursors.setdefault(newnode, []).append(oldnode)
1106 precursors.setdefault(newnode, []).append(oldnode)
1104
1107
1105 allnewnodes.sort(key=lambda n: unfi[n].rev())
1108 allnewnodes.sort(key=lambda n: unfi[n].rev())
1106 newphases = {}
1109 newphases = {}
1107
1110
1108 def phase(ctx):
1111 def phase(ctx):
1109 return newphases.get(ctx.node(), ctx.phase())
1112 return newphases.get(ctx.node(), ctx.phase())
1110
1113
1111 for newnode in allnewnodes:
1114 for newnode in allnewnodes:
1112 ctx = unfi[newnode]
1115 ctx = unfi[newnode]
1113 parentphase = max(phase(p) for p in ctx.parents())
1116 parentphase = max(phase(p) for p in ctx.parents())
1114 if targetphase is None:
1117 if targetphase is None:
1115 oldphase = max(
1118 oldphase = max(
1116 unfi[oldnode].phase() for oldnode in precursors[newnode]
1119 unfi[oldnode].phase() for oldnode in precursors[newnode]
1117 )
1120 )
1118 newphase = max(oldphase, parentphase)
1121 newphase = max(oldphase, parentphase)
1119 else:
1122 else:
1120 newphase = max(targetphase, parentphase)
1123 newphase = max(targetphase, parentphase)
1121 newphases[newnode] = newphase
1124 newphases[newnode] = newphase
1122 if newphase > ctx.phase():
1125 if newphase > ctx.phase():
1123 toretract.setdefault(newphase, []).append(newnode)
1126 toretract.setdefault(newphase, []).append(newnode)
1124 elif newphase < ctx.phase():
1127 elif newphase < ctx.phase():
1125 toadvance.setdefault(newphase, []).append(newnode)
1128 toadvance.setdefault(newphase, []).append(newnode)
1126
1129
1127 with repo.transaction(b'cleanup') as tr:
1130 with repo.transaction(b'cleanup') as tr:
1128 # Move bookmarks
1131 # Move bookmarks
1129 bmarks = repo._bookmarks
1132 bmarks = repo._bookmarks
1130 bmarkchanges = []
1133 bmarkchanges = []
1131 for oldnode, newnode in moves.items():
1134 for oldnode, newnode in moves.items():
1132 oldbmarks = repo.nodebookmarks(oldnode)
1135 oldbmarks = repo.nodebookmarks(oldnode)
1133 if not oldbmarks:
1136 if not oldbmarks:
1134 continue
1137 continue
1135 from . import bookmarks # avoid import cycle
1138 from . import bookmarks # avoid import cycle
1136
1139
1137 repo.ui.debug(
1140 repo.ui.debug(
1138 b'moving bookmarks %r from %s to %s\n'
1141 b'moving bookmarks %r from %s to %s\n'
1139 % (
1142 % (
1140 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1143 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1141 hex(oldnode),
1144 hex(oldnode),
1142 hex(newnode),
1145 hex(newnode),
1143 )
1146 )
1144 )
1147 )
1145 # Delete divergent bookmarks being parents of related newnodes
1148 # Delete divergent bookmarks being parents of related newnodes
1146 deleterevs = repo.revs(
1149 deleterevs = repo.revs(
1147 b'parents(roots(%ln & (::%n))) - parents(%n)',
1150 b'parents(roots(%ln & (::%n))) - parents(%n)',
1148 allnewnodes,
1151 allnewnodes,
1149 newnode,
1152 newnode,
1150 oldnode,
1153 oldnode,
1151 )
1154 )
1152 deletenodes = _containsnode(repo, deleterevs)
1155 deletenodes = _containsnode(repo, deleterevs)
1153 for name in oldbmarks:
1156 for name in oldbmarks:
1154 bmarkchanges.append((name, newnode))
1157 bmarkchanges.append((name, newnode))
1155 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1158 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1156 bmarkchanges.append((b, None))
1159 bmarkchanges.append((b, None))
1157
1160
1158 if bmarkchanges:
1161 if bmarkchanges:
1159 bmarks.applychanges(repo, tr, bmarkchanges)
1162 bmarks.applychanges(repo, tr, bmarkchanges)
1160
1163
1161 for phase, nodes in toretract.items():
1164 for phase, nodes in toretract.items():
1162 phases.retractboundary(repo, tr, phase, nodes)
1165 phases.retractboundary(repo, tr, phase, nodes)
1163 for phase, nodes in toadvance.items():
1166 for phase, nodes in toadvance.items():
1164 phases.advanceboundary(repo, tr, phase, nodes)
1167 phases.advanceboundary(repo, tr, phase, nodes)
1165
1168
1166 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1169 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1167 # Obsolete or strip nodes
1170 # Obsolete or strip nodes
1168 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1171 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1169 # If a node is already obsoleted, and we want to obsolete it
1172 # If a node is already obsoleted, and we want to obsolete it
1170 # without a successor, skip that obssolete request since it's
1173 # without a successor, skip that obssolete request since it's
1171 # unnecessary. That's the "if s or not isobs(n)" check below.
1174 # unnecessary. That's the "if s or not isobs(n)" check below.
1172 # Also sort the node in topology order, that might be useful for
1175 # Also sort the node in topology order, that might be useful for
1173 # some obsstore logic.
1176 # some obsstore logic.
1174 # NOTE: the sorting might belong to createmarkers.
1177 # NOTE: the sorting might belong to createmarkers.
1175 torev = unfi.changelog.rev
1178 torev = unfi.changelog.rev
1176 sortfunc = lambda ns: torev(ns[0][0])
1179 sortfunc = lambda ns: torev(ns[0][0])
1177 rels = []
1180 rels = []
1178 for ns, s in sorted(replacements.items(), key=sortfunc):
1181 for ns, s in sorted(replacements.items(), key=sortfunc):
1179 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1182 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1180 rels.append(rel)
1183 rels.append(rel)
1181 if rels:
1184 if rels:
1182 obsolete.createmarkers(
1185 obsolete.createmarkers(
1183 repo, rels, operation=operation, metadata=metadata
1186 repo, rels, operation=operation, metadata=metadata
1184 )
1187 )
1185 elif phases.supportinternal(repo) and mayusearchived:
1188 elif phases.supportinternal(repo) and mayusearchived:
1186 # this assume we do not have "unstable" nodes above the cleaned ones
1189 # this assume we do not have "unstable" nodes above the cleaned ones
1187 allreplaced = set()
1190 allreplaced = set()
1188 for ns in replacements.keys():
1191 for ns in replacements.keys():
1189 allreplaced.update(ns)
1192 allreplaced.update(ns)
1190 if backup:
1193 if backup:
1191 from . import repair # avoid import cycle
1194 from . import repair # avoid import cycle
1192
1195
1193 node = min(allreplaced, key=repo.changelog.rev)
1196 node = min(allreplaced, key=repo.changelog.rev)
1194 repair.backupbundle(
1197 repair.backupbundle(
1195 repo, allreplaced, allreplaced, node, operation
1198 repo, allreplaced, allreplaced, node, operation
1196 )
1199 )
1197 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1200 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1198 else:
1201 else:
1199 from . import repair # avoid import cycle
1202 from . import repair # avoid import cycle
1200
1203
1201 tostrip = list(n for ns in replacements for n in ns)
1204 tostrip = list(n for ns in replacements for n in ns)
1202 if tostrip:
1205 if tostrip:
1203 repair.delayedstrip(
1206 repair.delayedstrip(
1204 repo.ui, repo, tostrip, operation, backup=backup
1207 repo.ui, repo, tostrip, operation, backup=backup
1205 )
1208 )
1206
1209
1207
1210
1208 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1211 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1209 if opts is None:
1212 if opts is None:
1210 opts = {}
1213 opts = {}
1211 m = matcher
1214 m = matcher
1212 dry_run = opts.get(b'dry_run')
1215 dry_run = opts.get(b'dry_run')
1213 try:
1216 try:
1214 similarity = float(opts.get(b'similarity') or 0)
1217 similarity = float(opts.get(b'similarity') or 0)
1215 except ValueError:
1218 except ValueError:
1216 raise error.Abort(_(b'similarity must be a number'))
1219 raise error.Abort(_(b'similarity must be a number'))
1217 if similarity < 0 or similarity > 100:
1220 if similarity < 0 or similarity > 100:
1218 raise error.Abort(_(b'similarity must be between 0 and 100'))
1221 raise error.Abort(_(b'similarity must be between 0 and 100'))
1219 similarity /= 100.0
1222 similarity /= 100.0
1220
1223
1221 ret = 0
1224 ret = 0
1222
1225
1223 wctx = repo[None]
1226 wctx = repo[None]
1224 for subpath in sorted(wctx.substate):
1227 for subpath in sorted(wctx.substate):
1225 submatch = matchmod.subdirmatcher(subpath, m)
1228 submatch = matchmod.subdirmatcher(subpath, m)
1226 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1229 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1227 sub = wctx.sub(subpath)
1230 sub = wctx.sub(subpath)
1228 subprefix = repo.wvfs.reljoin(prefix, subpath)
1231 subprefix = repo.wvfs.reljoin(prefix, subpath)
1229 subuipathfn = subdiruipathfn(subpath, uipathfn)
1232 subuipathfn = subdiruipathfn(subpath, uipathfn)
1230 try:
1233 try:
1231 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1234 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1232 ret = 1
1235 ret = 1
1233 except error.LookupError:
1236 except error.LookupError:
1234 repo.ui.status(
1237 repo.ui.status(
1235 _(b"skipping missing subrepository: %s\n")
1238 _(b"skipping missing subrepository: %s\n")
1236 % uipathfn(subpath)
1239 % uipathfn(subpath)
1237 )
1240 )
1238
1241
1239 rejected = []
1242 rejected = []
1240
1243
1241 def badfn(f, msg):
1244 def badfn(f, msg):
1242 if f in m.files():
1245 if f in m.files():
1243 m.bad(f, msg)
1246 m.bad(f, msg)
1244 rejected.append(f)
1247 rejected.append(f)
1245
1248
1246 badmatch = matchmod.badmatch(m, badfn)
1249 badmatch = matchmod.badmatch(m, badfn)
1247 added, unknown, deleted, removed, forgotten = _interestingfiles(
1250 added, unknown, deleted, removed, forgotten = _interestingfiles(
1248 repo, badmatch
1251 repo, badmatch
1249 )
1252 )
1250
1253
1251 unknownset = set(unknown + forgotten)
1254 unknownset = set(unknown + forgotten)
1252 toprint = unknownset.copy()
1255 toprint = unknownset.copy()
1253 toprint.update(deleted)
1256 toprint.update(deleted)
1254 for abs in sorted(toprint):
1257 for abs in sorted(toprint):
1255 if repo.ui.verbose or not m.exact(abs):
1258 if repo.ui.verbose or not m.exact(abs):
1256 if abs in unknownset:
1259 if abs in unknownset:
1257 status = _(b'adding %s\n') % uipathfn(abs)
1260 status = _(b'adding %s\n') % uipathfn(abs)
1258 label = b'ui.addremove.added'
1261 label = b'ui.addremove.added'
1259 else:
1262 else:
1260 status = _(b'removing %s\n') % uipathfn(abs)
1263 status = _(b'removing %s\n') % uipathfn(abs)
1261 label = b'ui.addremove.removed'
1264 label = b'ui.addremove.removed'
1262 repo.ui.status(status, label=label)
1265 repo.ui.status(status, label=label)
1263
1266
1264 renames = _findrenames(
1267 renames = _findrenames(
1265 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1268 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1266 )
1269 )
1267
1270
1268 if not dry_run:
1271 if not dry_run:
1269 _markchanges(repo, unknown + forgotten, deleted, renames)
1272 _markchanges(repo, unknown + forgotten, deleted, renames)
1270
1273
1271 for f in rejected:
1274 for f in rejected:
1272 if f in m.files():
1275 if f in m.files():
1273 return 1
1276 return 1
1274 return ret
1277 return ret
1275
1278
1276
1279
1277 def marktouched(repo, files, similarity=0.0):
1280 def marktouched(repo, files, similarity=0.0):
1278 '''Assert that files have somehow been operated upon. files are relative to
1281 '''Assert that files have somehow been operated upon. files are relative to
1279 the repo root.'''
1282 the repo root.'''
1280 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1283 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1281 rejected = []
1284 rejected = []
1282
1285
1283 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1286 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1284
1287
1285 if repo.ui.verbose:
1288 if repo.ui.verbose:
1286 unknownset = set(unknown + forgotten)
1289 unknownset = set(unknown + forgotten)
1287 toprint = unknownset.copy()
1290 toprint = unknownset.copy()
1288 toprint.update(deleted)
1291 toprint.update(deleted)
1289 for abs in sorted(toprint):
1292 for abs in sorted(toprint):
1290 if abs in unknownset:
1293 if abs in unknownset:
1291 status = _(b'adding %s\n') % abs
1294 status = _(b'adding %s\n') % abs
1292 else:
1295 else:
1293 status = _(b'removing %s\n') % abs
1296 status = _(b'removing %s\n') % abs
1294 repo.ui.status(status)
1297 repo.ui.status(status)
1295
1298
1296 # TODO: We should probably have the caller pass in uipathfn and apply it to
1299 # TODO: We should probably have the caller pass in uipathfn and apply it to
1297 # the messages above too. legacyrelativevalue=True is consistent with how
1300 # the messages above too. legacyrelativevalue=True is consistent with how
1298 # it used to work.
1301 # it used to work.
1299 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1302 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1300 renames = _findrenames(
1303 renames = _findrenames(
1301 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1304 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1302 )
1305 )
1303
1306
1304 _markchanges(repo, unknown + forgotten, deleted, renames)
1307 _markchanges(repo, unknown + forgotten, deleted, renames)
1305
1308
1306 for f in rejected:
1309 for f in rejected:
1307 if f in m.files():
1310 if f in m.files():
1308 return 1
1311 return 1
1309 return 0
1312 return 0
1310
1313
1311
1314
1312 def _interestingfiles(repo, matcher):
1315 def _interestingfiles(repo, matcher):
1313 '''Walk dirstate with matcher, looking for files that addremove would care
1316 '''Walk dirstate with matcher, looking for files that addremove would care
1314 about.
1317 about.
1315
1318
1316 This is different from dirstate.status because it doesn't care about
1319 This is different from dirstate.status because it doesn't care about
1317 whether files are modified or clean.'''
1320 whether files are modified or clean.'''
1318 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1321 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1319 audit_path = pathutil.pathauditor(repo.root, cached=True)
1322 audit_path = pathutil.pathauditor(repo.root, cached=True)
1320
1323
1321 ctx = repo[None]
1324 ctx = repo[None]
1322 dirstate = repo.dirstate
1325 dirstate = repo.dirstate
1323 matcher = repo.narrowmatch(matcher, includeexact=True)
1326 matcher = repo.narrowmatch(matcher, includeexact=True)
1324 walkresults = dirstate.walk(
1327 walkresults = dirstate.walk(
1325 matcher,
1328 matcher,
1326 subrepos=sorted(ctx.substate),
1329 subrepos=sorted(ctx.substate),
1327 unknown=True,
1330 unknown=True,
1328 ignored=False,
1331 ignored=False,
1329 full=False,
1332 full=False,
1330 )
1333 )
1331 for abs, st in pycompat.iteritems(walkresults):
1334 for abs, st in pycompat.iteritems(walkresults):
1332 dstate = dirstate[abs]
1335 dstate = dirstate[abs]
1333 if dstate == b'?' and audit_path.check(abs):
1336 if dstate == b'?' and audit_path.check(abs):
1334 unknown.append(abs)
1337 unknown.append(abs)
1335 elif dstate != b'r' and not st:
1338 elif dstate != b'r' and not st:
1336 deleted.append(abs)
1339 deleted.append(abs)
1337 elif dstate == b'r' and st:
1340 elif dstate == b'r' and st:
1338 forgotten.append(abs)
1341 forgotten.append(abs)
1339 # for finding renames
1342 # for finding renames
1340 elif dstate == b'r' and not st:
1343 elif dstate == b'r' and not st:
1341 removed.append(abs)
1344 removed.append(abs)
1342 elif dstate == b'a':
1345 elif dstate == b'a':
1343 added.append(abs)
1346 added.append(abs)
1344
1347
1345 return added, unknown, deleted, removed, forgotten
1348 return added, unknown, deleted, removed, forgotten
1346
1349
1347
1350
1348 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1351 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1349 '''Find renames from removed files to added ones.'''
1352 '''Find renames from removed files to added ones.'''
1350 renames = {}
1353 renames = {}
1351 if similarity > 0:
1354 if similarity > 0:
1352 for old, new, score in similar.findrenames(
1355 for old, new, score in similar.findrenames(
1353 repo, added, removed, similarity
1356 repo, added, removed, similarity
1354 ):
1357 ):
1355 if (
1358 if (
1356 repo.ui.verbose
1359 repo.ui.verbose
1357 or not matcher.exact(old)
1360 or not matcher.exact(old)
1358 or not matcher.exact(new)
1361 or not matcher.exact(new)
1359 ):
1362 ):
1360 repo.ui.status(
1363 repo.ui.status(
1361 _(
1364 _(
1362 b'recording removal of %s as rename to %s '
1365 b'recording removal of %s as rename to %s '
1363 b'(%d%% similar)\n'
1366 b'(%d%% similar)\n'
1364 )
1367 )
1365 % (uipathfn(old), uipathfn(new), score * 100)
1368 % (uipathfn(old), uipathfn(new), score * 100)
1366 )
1369 )
1367 renames[new] = old
1370 renames[new] = old
1368 return renames
1371 return renames
1369
1372
1370
1373
1371 def _markchanges(repo, unknown, deleted, renames):
1374 def _markchanges(repo, unknown, deleted, renames):
1372 '''Marks the files in unknown as added, the files in deleted as removed,
1375 '''Marks the files in unknown as added, the files in deleted as removed,
1373 and the files in renames as copied.'''
1376 and the files in renames as copied.'''
1374 wctx = repo[None]
1377 wctx = repo[None]
1375 with repo.wlock():
1378 with repo.wlock():
1376 wctx.forget(deleted)
1379 wctx.forget(deleted)
1377 wctx.add(unknown)
1380 wctx.add(unknown)
1378 for new, old in pycompat.iteritems(renames):
1381 for new, old in pycompat.iteritems(renames):
1379 wctx.copy(old, new)
1382 wctx.copy(old, new)
1380
1383
1381
1384
1382 def getrenamedfn(repo, endrev=None):
1385 def getrenamedfn(repo, endrev=None):
1383 if copiesmod.usechangesetcentricalgo(repo):
1386 if copiesmod.usechangesetcentricalgo(repo):
1384
1387
1385 def getrenamed(fn, rev):
1388 def getrenamed(fn, rev):
1386 ctx = repo[rev]
1389 ctx = repo[rev]
1387 p1copies = ctx.p1copies()
1390 p1copies = ctx.p1copies()
1388 if fn in p1copies:
1391 if fn in p1copies:
1389 return p1copies[fn]
1392 return p1copies[fn]
1390 p2copies = ctx.p2copies()
1393 p2copies = ctx.p2copies()
1391 if fn in p2copies:
1394 if fn in p2copies:
1392 return p2copies[fn]
1395 return p2copies[fn]
1393 return None
1396 return None
1394
1397
1395 return getrenamed
1398 return getrenamed
1396
1399
1397 rcache = {}
1400 rcache = {}
1398 if endrev is None:
1401 if endrev is None:
1399 endrev = len(repo)
1402 endrev = len(repo)
1400
1403
1401 def getrenamed(fn, rev):
1404 def getrenamed(fn, rev):
1402 '''looks up all renames for a file (up to endrev) the first
1405 '''looks up all renames for a file (up to endrev) the first
1403 time the file is given. It indexes on the changerev and only
1406 time the file is given. It indexes on the changerev and only
1404 parses the manifest if linkrev != changerev.
1407 parses the manifest if linkrev != changerev.
1405 Returns rename info for fn at changerev rev.'''
1408 Returns rename info for fn at changerev rev.'''
1406 if fn not in rcache:
1409 if fn not in rcache:
1407 rcache[fn] = {}
1410 rcache[fn] = {}
1408 fl = repo.file(fn)
1411 fl = repo.file(fn)
1409 for i in fl:
1412 for i in fl:
1410 lr = fl.linkrev(i)
1413 lr = fl.linkrev(i)
1411 renamed = fl.renamed(fl.node(i))
1414 renamed = fl.renamed(fl.node(i))
1412 rcache[fn][lr] = renamed and renamed[0]
1415 rcache[fn][lr] = renamed and renamed[0]
1413 if lr >= endrev:
1416 if lr >= endrev:
1414 break
1417 break
1415 if rev in rcache[fn]:
1418 if rev in rcache[fn]:
1416 return rcache[fn][rev]
1419 return rcache[fn][rev]
1417
1420
1418 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1421 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1419 # filectx logic.
1422 # filectx logic.
1420 try:
1423 try:
1421 return repo[rev][fn].copysource()
1424 return repo[rev][fn].copysource()
1422 except error.LookupError:
1425 except error.LookupError:
1423 return None
1426 return None
1424
1427
1425 return getrenamed
1428 return getrenamed
1426
1429
1427
1430
1428 def getcopiesfn(repo, endrev=None):
1431 def getcopiesfn(repo, endrev=None):
1429 if copiesmod.usechangesetcentricalgo(repo):
1432 if copiesmod.usechangesetcentricalgo(repo):
1430
1433
1431 def copiesfn(ctx):
1434 def copiesfn(ctx):
1432 if ctx.p2copies():
1435 if ctx.p2copies():
1433 allcopies = ctx.p1copies().copy()
1436 allcopies = ctx.p1copies().copy()
1434 # There should be no overlap
1437 # There should be no overlap
1435 allcopies.update(ctx.p2copies())
1438 allcopies.update(ctx.p2copies())
1436 return sorted(allcopies.items())
1439 return sorted(allcopies.items())
1437 else:
1440 else:
1438 return sorted(ctx.p1copies().items())
1441 return sorted(ctx.p1copies().items())
1439
1442
1440 else:
1443 else:
1441 getrenamed = getrenamedfn(repo, endrev)
1444 getrenamed = getrenamedfn(repo, endrev)
1442
1445
1443 def copiesfn(ctx):
1446 def copiesfn(ctx):
1444 copies = []
1447 copies = []
1445 for fn in ctx.files():
1448 for fn in ctx.files():
1446 rename = getrenamed(fn, ctx.rev())
1449 rename = getrenamed(fn, ctx.rev())
1447 if rename:
1450 if rename:
1448 copies.append((fn, rename))
1451 copies.append((fn, rename))
1449 return copies
1452 return copies
1450
1453
1451 return copiesfn
1454 return copiesfn
1452
1455
1453
1456
1454 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1457 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1455 """Update the dirstate to reflect the intent of copying src to dst. For
1458 """Update the dirstate to reflect the intent of copying src to dst. For
1456 different reasons it might not end with dst being marked as copied from src.
1459 different reasons it might not end with dst being marked as copied from src.
1457 """
1460 """
1458 origsrc = repo.dirstate.copied(src) or src
1461 origsrc = repo.dirstate.copied(src) or src
1459 if dst == origsrc: # copying back a copy?
1462 if dst == origsrc: # copying back a copy?
1460 if repo.dirstate[dst] not in b'mn' and not dryrun:
1463 if repo.dirstate[dst] not in b'mn' and not dryrun:
1461 repo.dirstate.normallookup(dst)
1464 repo.dirstate.normallookup(dst)
1462 else:
1465 else:
1463 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1466 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1464 if not ui.quiet:
1467 if not ui.quiet:
1465 ui.warn(
1468 ui.warn(
1466 _(
1469 _(
1467 b"%s has not been committed yet, so no copy "
1470 b"%s has not been committed yet, so no copy "
1468 b"data will be stored for %s.\n"
1471 b"data will be stored for %s.\n"
1469 )
1472 )
1470 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1473 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1471 )
1474 )
1472 if repo.dirstate[dst] in b'?r' and not dryrun:
1475 if repo.dirstate[dst] in b'?r' and not dryrun:
1473 wctx.add([dst])
1476 wctx.add([dst])
1474 elif not dryrun:
1477 elif not dryrun:
1475 wctx.copy(origsrc, dst)
1478 wctx.copy(origsrc, dst)
1476
1479
1477
1480
1478 def movedirstate(repo, newctx, match=None):
1481 def movedirstate(repo, newctx, match=None):
1479 """Move the dirstate to newctx and adjust it as necessary.
1482 """Move the dirstate to newctx and adjust it as necessary.
1480
1483
1481 A matcher can be provided as an optimization. It is probably a bug to pass
1484 A matcher can be provided as an optimization. It is probably a bug to pass
1482 a matcher that doesn't match all the differences between the parent of the
1485 a matcher that doesn't match all the differences between the parent of the
1483 working copy and newctx.
1486 working copy and newctx.
1484 """
1487 """
1485 oldctx = repo[b'.']
1488 oldctx = repo[b'.']
1486 ds = repo.dirstate
1489 ds = repo.dirstate
1487 copies = dict(ds.copies())
1490 copies = dict(ds.copies())
1488 ds.setparents(newctx.node(), nullid)
1491 ds.setparents(newctx.node(), nullid)
1489 s = newctx.status(oldctx, match=match)
1492 s = newctx.status(oldctx, match=match)
1490 for f in s.modified:
1493 for f in s.modified:
1491 if ds[f] == b'r':
1494 if ds[f] == b'r':
1492 # modified + removed -> removed
1495 # modified + removed -> removed
1493 continue
1496 continue
1494 ds.normallookup(f)
1497 ds.normallookup(f)
1495
1498
1496 for f in s.added:
1499 for f in s.added:
1497 if ds[f] == b'r':
1500 if ds[f] == b'r':
1498 # added + removed -> unknown
1501 # added + removed -> unknown
1499 ds.drop(f)
1502 ds.drop(f)
1500 elif ds[f] != b'a':
1503 elif ds[f] != b'a':
1501 ds.add(f)
1504 ds.add(f)
1502
1505
1503 for f in s.removed:
1506 for f in s.removed:
1504 if ds[f] == b'a':
1507 if ds[f] == b'a':
1505 # removed + added -> normal
1508 # removed + added -> normal
1506 ds.normallookup(f)
1509 ds.normallookup(f)
1507 elif ds[f] != b'r':
1510 elif ds[f] != b'r':
1508 ds.remove(f)
1511 ds.remove(f)
1509
1512
1510 # Merge old parent and old working dir copies
1513 # Merge old parent and old working dir copies
1511 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1514 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1512 oldcopies.update(copies)
1515 oldcopies.update(copies)
1513 copies = {
1516 copies = {
1514 dst: oldcopies.get(src, src)
1517 dst: oldcopies.get(src, src)
1515 for dst, src in pycompat.iteritems(oldcopies)
1518 for dst, src in pycompat.iteritems(oldcopies)
1516 }
1519 }
1517 # Adjust the dirstate copies
1520 # Adjust the dirstate copies
1518 for dst, src in pycompat.iteritems(copies):
1521 for dst, src in pycompat.iteritems(copies):
1519 if src not in newctx or dst in newctx or ds[dst] != b'a':
1522 if src not in newctx or dst in newctx or ds[dst] != b'a':
1520 src = None
1523 src = None
1521 ds.copy(src, dst)
1524 ds.copy(src, dst)
1522 repo._quick_access_changeid_invalidate()
1525 repo._quick_access_changeid_invalidate()
1523
1526
1524
1527
1525 def filterrequirements(requirements):
1528 def filterrequirements(requirements):
1526 """ filters the requirements into two sets:
1529 """ filters the requirements into two sets:
1527
1530
1528 wcreq: requirements which should be written in .hg/requires
1531 wcreq: requirements which should be written in .hg/requires
1529 storereq: which should be written in .hg/store/requires
1532 storereq: which should be written in .hg/store/requires
1530
1533
1531 Returns (wcreq, storereq)
1534 Returns (wcreq, storereq)
1532 """
1535 """
1533 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1536 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1534 wc, store = set(), set()
1537 wc, store = set(), set()
1535 for r in requirements:
1538 for r in requirements:
1536 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1539 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1537 wc.add(r)
1540 wc.add(r)
1538 else:
1541 else:
1539 store.add(r)
1542 store.add(r)
1540 return wc, store
1543 return wc, store
1541 return requirements, None
1544 return requirements, None
1542
1545
1543
1546
1544 def istreemanifest(repo):
1547 def istreemanifest(repo):
1545 """ returns whether the repository is using treemanifest or not """
1548 """ returns whether the repository is using treemanifest or not """
1546 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1549 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1547
1550
1548
1551
1549 def writereporequirements(repo, requirements=None):
1552 def writereporequirements(repo, requirements=None):
1550 """ writes requirements for the repo to .hg/requires """
1553 """ writes requirements for the repo to .hg/requires """
1551 if requirements:
1554 if requirements:
1552 repo.requirements = requirements
1555 repo.requirements = requirements
1553 wcreq, storereq = filterrequirements(repo.requirements)
1556 wcreq, storereq = filterrequirements(repo.requirements)
1554 if wcreq is not None:
1557 if wcreq is not None:
1555 writerequires(repo.vfs, wcreq)
1558 writerequires(repo.vfs, wcreq)
1556 if storereq is not None:
1559 if storereq is not None:
1557 writerequires(repo.svfs, storereq)
1560 writerequires(repo.svfs, storereq)
1558
1561
1559
1562
1560 def writerequires(opener, requirements):
1563 def writerequires(opener, requirements):
1561 with opener(b'requires', b'w', atomictemp=True) as fp:
1564 with opener(b'requires', b'w', atomictemp=True) as fp:
1562 for r in sorted(requirements):
1565 for r in sorted(requirements):
1563 fp.write(b"%s\n" % r)
1566 fp.write(b"%s\n" % r)
1564
1567
1565
1568
1566 class filecachesubentry(object):
1569 class filecachesubentry(object):
1567 def __init__(self, path, stat):
1570 def __init__(self, path, stat):
1568 self.path = path
1571 self.path = path
1569 self.cachestat = None
1572 self.cachestat = None
1570 self._cacheable = None
1573 self._cacheable = None
1571
1574
1572 if stat:
1575 if stat:
1573 self.cachestat = filecachesubentry.stat(self.path)
1576 self.cachestat = filecachesubentry.stat(self.path)
1574
1577
1575 if self.cachestat:
1578 if self.cachestat:
1576 self._cacheable = self.cachestat.cacheable()
1579 self._cacheable = self.cachestat.cacheable()
1577 else:
1580 else:
1578 # None means we don't know yet
1581 # None means we don't know yet
1579 self._cacheable = None
1582 self._cacheable = None
1580
1583
1581 def refresh(self):
1584 def refresh(self):
1582 if self.cacheable():
1585 if self.cacheable():
1583 self.cachestat = filecachesubentry.stat(self.path)
1586 self.cachestat = filecachesubentry.stat(self.path)
1584
1587
1585 def cacheable(self):
1588 def cacheable(self):
1586 if self._cacheable is not None:
1589 if self._cacheable is not None:
1587 return self._cacheable
1590 return self._cacheable
1588
1591
1589 # we don't know yet, assume it is for now
1592 # we don't know yet, assume it is for now
1590 return True
1593 return True
1591
1594
1592 def changed(self):
1595 def changed(self):
1593 # no point in going further if we can't cache it
1596 # no point in going further if we can't cache it
1594 if not self.cacheable():
1597 if not self.cacheable():
1595 return True
1598 return True
1596
1599
1597 newstat = filecachesubentry.stat(self.path)
1600 newstat = filecachesubentry.stat(self.path)
1598
1601
1599 # we may not know if it's cacheable yet, check again now
1602 # we may not know if it's cacheable yet, check again now
1600 if newstat and self._cacheable is None:
1603 if newstat and self._cacheable is None:
1601 self._cacheable = newstat.cacheable()
1604 self._cacheable = newstat.cacheable()
1602
1605
1603 # check again
1606 # check again
1604 if not self._cacheable:
1607 if not self._cacheable:
1605 return True
1608 return True
1606
1609
1607 if self.cachestat != newstat:
1610 if self.cachestat != newstat:
1608 self.cachestat = newstat
1611 self.cachestat = newstat
1609 return True
1612 return True
1610 else:
1613 else:
1611 return False
1614 return False
1612
1615
1613 @staticmethod
1616 @staticmethod
1614 def stat(path):
1617 def stat(path):
1615 try:
1618 try:
1616 return util.cachestat(path)
1619 return util.cachestat(path)
1617 except OSError as e:
1620 except OSError as e:
1618 if e.errno != errno.ENOENT:
1621 if e.errno != errno.ENOENT:
1619 raise
1622 raise
1620
1623
1621
1624
1622 class filecacheentry(object):
1625 class filecacheentry(object):
1623 def __init__(self, paths, stat=True):
1626 def __init__(self, paths, stat=True):
1624 self._entries = []
1627 self._entries = []
1625 for path in paths:
1628 for path in paths:
1626 self._entries.append(filecachesubentry(path, stat))
1629 self._entries.append(filecachesubentry(path, stat))
1627
1630
1628 def changed(self):
1631 def changed(self):
1629 '''true if any entry has changed'''
1632 '''true if any entry has changed'''
1630 for entry in self._entries:
1633 for entry in self._entries:
1631 if entry.changed():
1634 if entry.changed():
1632 return True
1635 return True
1633 return False
1636 return False
1634
1637
1635 def refresh(self):
1638 def refresh(self):
1636 for entry in self._entries:
1639 for entry in self._entries:
1637 entry.refresh()
1640 entry.refresh()
1638
1641
1639
1642
1640 class filecache(object):
1643 class filecache(object):
1641 """A property like decorator that tracks files under .hg/ for updates.
1644 """A property like decorator that tracks files under .hg/ for updates.
1642
1645
1643 On first access, the files defined as arguments are stat()ed and the
1646 On first access, the files defined as arguments are stat()ed and the
1644 results cached. The decorated function is called. The results are stashed
1647 results cached. The decorated function is called. The results are stashed
1645 away in a ``_filecache`` dict on the object whose method is decorated.
1648 away in a ``_filecache`` dict on the object whose method is decorated.
1646
1649
1647 On subsequent access, the cached result is used as it is set to the
1650 On subsequent access, the cached result is used as it is set to the
1648 instance dictionary.
1651 instance dictionary.
1649
1652
1650 On external property set/delete operations, the caller must update the
1653 On external property set/delete operations, the caller must update the
1651 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1654 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1652 instead of directly setting <attr>.
1655 instead of directly setting <attr>.
1653
1656
1654 When using the property API, the cached data is always used if available.
1657 When using the property API, the cached data is always used if available.
1655 No stat() is performed to check if the file has changed.
1658 No stat() is performed to check if the file has changed.
1656
1659
1657 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1660 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1658 can populate an entry before the property's getter is called. In this case,
1661 can populate an entry before the property's getter is called. In this case,
1659 entries in ``_filecache`` will be used during property operations,
1662 entries in ``_filecache`` will be used during property operations,
1660 if available. If the underlying file changes, it is up to external callers
1663 if available. If the underlying file changes, it is up to external callers
1661 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1664 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1662 method result as well as possibly calling ``del obj._filecache[attr]`` to
1665 method result as well as possibly calling ``del obj._filecache[attr]`` to
1663 remove the ``filecacheentry``.
1666 remove the ``filecacheentry``.
1664 """
1667 """
1665
1668
1666 def __init__(self, *paths):
1669 def __init__(self, *paths):
1667 self.paths = paths
1670 self.paths = paths
1668
1671
1669 def join(self, obj, fname):
1672 def join(self, obj, fname):
1670 """Used to compute the runtime path of a cached file.
1673 """Used to compute the runtime path of a cached file.
1671
1674
1672 Users should subclass filecache and provide their own version of this
1675 Users should subclass filecache and provide their own version of this
1673 function to call the appropriate join function on 'obj' (an instance
1676 function to call the appropriate join function on 'obj' (an instance
1674 of the class that its member function was decorated).
1677 of the class that its member function was decorated).
1675 """
1678 """
1676 raise NotImplementedError
1679 raise NotImplementedError
1677
1680
1678 def __call__(self, func):
1681 def __call__(self, func):
1679 self.func = func
1682 self.func = func
1680 self.sname = func.__name__
1683 self.sname = func.__name__
1681 self.name = pycompat.sysbytes(self.sname)
1684 self.name = pycompat.sysbytes(self.sname)
1682 return self
1685 return self
1683
1686
1684 def __get__(self, obj, type=None):
1687 def __get__(self, obj, type=None):
1685 # if accessed on the class, return the descriptor itself.
1688 # if accessed on the class, return the descriptor itself.
1686 if obj is None:
1689 if obj is None:
1687 return self
1690 return self
1688
1691
1689 assert self.sname not in obj.__dict__
1692 assert self.sname not in obj.__dict__
1690
1693
1691 entry = obj._filecache.get(self.name)
1694 entry = obj._filecache.get(self.name)
1692
1695
1693 if entry:
1696 if entry:
1694 if entry.changed():
1697 if entry.changed():
1695 entry.obj = self.func(obj)
1698 entry.obj = self.func(obj)
1696 else:
1699 else:
1697 paths = [self.join(obj, path) for path in self.paths]
1700 paths = [self.join(obj, path) for path in self.paths]
1698
1701
1699 # We stat -before- creating the object so our cache doesn't lie if
1702 # We stat -before- creating the object so our cache doesn't lie if
1700 # a writer modified between the time we read and stat
1703 # a writer modified between the time we read and stat
1701 entry = filecacheentry(paths, True)
1704 entry = filecacheentry(paths, True)
1702 entry.obj = self.func(obj)
1705 entry.obj = self.func(obj)
1703
1706
1704 obj._filecache[self.name] = entry
1707 obj._filecache[self.name] = entry
1705
1708
1706 obj.__dict__[self.sname] = entry.obj
1709 obj.__dict__[self.sname] = entry.obj
1707 return entry.obj
1710 return entry.obj
1708
1711
1709 # don't implement __set__(), which would make __dict__ lookup as slow as
1712 # don't implement __set__(), which would make __dict__ lookup as slow as
1710 # function call.
1713 # function call.
1711
1714
1712 def set(self, obj, value):
1715 def set(self, obj, value):
1713 if self.name not in obj._filecache:
1716 if self.name not in obj._filecache:
1714 # we add an entry for the missing value because X in __dict__
1717 # we add an entry for the missing value because X in __dict__
1715 # implies X in _filecache
1718 # implies X in _filecache
1716 paths = [self.join(obj, path) for path in self.paths]
1719 paths = [self.join(obj, path) for path in self.paths]
1717 ce = filecacheentry(paths, False)
1720 ce = filecacheentry(paths, False)
1718 obj._filecache[self.name] = ce
1721 obj._filecache[self.name] = ce
1719 else:
1722 else:
1720 ce = obj._filecache[self.name]
1723 ce = obj._filecache[self.name]
1721
1724
1722 ce.obj = value # update cached copy
1725 ce.obj = value # update cached copy
1723 obj.__dict__[self.sname] = value # update copy returned by obj.x
1726 obj.__dict__[self.sname] = value # update copy returned by obj.x
1724
1727
1725
1728
1726 def extdatasource(repo, source):
1729 def extdatasource(repo, source):
1727 """Gather a map of rev -> value dict from the specified source
1730 """Gather a map of rev -> value dict from the specified source
1728
1731
1729 A source spec is treated as a URL, with a special case shell: type
1732 A source spec is treated as a URL, with a special case shell: type
1730 for parsing the output from a shell command.
1733 for parsing the output from a shell command.
1731
1734
1732 The data is parsed as a series of newline-separated records where
1735 The data is parsed as a series of newline-separated records where
1733 each record is a revision specifier optionally followed by a space
1736 each record is a revision specifier optionally followed by a space
1734 and a freeform string value. If the revision is known locally, it
1737 and a freeform string value. If the revision is known locally, it
1735 is converted to a rev, otherwise the record is skipped.
1738 is converted to a rev, otherwise the record is skipped.
1736
1739
1737 Note that both key and value are treated as UTF-8 and converted to
1740 Note that both key and value are treated as UTF-8 and converted to
1738 the local encoding. This allows uniformity between local and
1741 the local encoding. This allows uniformity between local and
1739 remote data sources.
1742 remote data sources.
1740 """
1743 """
1741
1744
1742 spec = repo.ui.config(b"extdata", source)
1745 spec = repo.ui.config(b"extdata", source)
1743 if not spec:
1746 if not spec:
1744 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1747 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1745
1748
1746 data = {}
1749 data = {}
1747 src = proc = None
1750 src = proc = None
1748 try:
1751 try:
1749 if spec.startswith(b"shell:"):
1752 if spec.startswith(b"shell:"):
1750 # external commands should be run relative to the repo root
1753 # external commands should be run relative to the repo root
1751 cmd = spec[6:]
1754 cmd = spec[6:]
1752 proc = subprocess.Popen(
1755 proc = subprocess.Popen(
1753 procutil.tonativestr(cmd),
1756 procutil.tonativestr(cmd),
1754 shell=True,
1757 shell=True,
1755 bufsize=-1,
1758 bufsize=-1,
1756 close_fds=procutil.closefds,
1759 close_fds=procutil.closefds,
1757 stdout=subprocess.PIPE,
1760 stdout=subprocess.PIPE,
1758 cwd=procutil.tonativestr(repo.root),
1761 cwd=procutil.tonativestr(repo.root),
1759 )
1762 )
1760 src = proc.stdout
1763 src = proc.stdout
1761 else:
1764 else:
1762 # treat as a URL or file
1765 # treat as a URL or file
1763 src = url.open(repo.ui, spec)
1766 src = url.open(repo.ui, spec)
1764 for l in src:
1767 for l in src:
1765 if b" " in l:
1768 if b" " in l:
1766 k, v = l.strip().split(b" ", 1)
1769 k, v = l.strip().split(b" ", 1)
1767 else:
1770 else:
1768 k, v = l.strip(), b""
1771 k, v = l.strip(), b""
1769
1772
1770 k = encoding.tolocal(k)
1773 k = encoding.tolocal(k)
1771 try:
1774 try:
1772 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1775 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1773 except (error.LookupError, error.RepoLookupError):
1776 except (error.LookupError, error.RepoLookupError):
1774 pass # we ignore data for nodes that don't exist locally
1777 pass # we ignore data for nodes that don't exist locally
1775 finally:
1778 finally:
1776 if proc:
1779 if proc:
1777 try:
1780 try:
1778 proc.communicate()
1781 proc.communicate()
1779 except ValueError:
1782 except ValueError:
1780 # This happens if we started iterating src and then
1783 # This happens if we started iterating src and then
1781 # get a parse error on a line. It should be safe to ignore.
1784 # get a parse error on a line. It should be safe to ignore.
1782 pass
1785 pass
1783 if src:
1786 if src:
1784 src.close()
1787 src.close()
1785 if proc and proc.returncode != 0:
1788 if proc and proc.returncode != 0:
1786 raise error.Abort(
1789 raise error.Abort(
1787 _(b"extdata command '%s' failed: %s")
1790 _(b"extdata command '%s' failed: %s")
1788 % (cmd, procutil.explainexit(proc.returncode))
1791 % (cmd, procutil.explainexit(proc.returncode))
1789 )
1792 )
1790
1793
1791 return data
1794 return data
1792
1795
1793
1796
1794 class progress(object):
1797 class progress(object):
1795 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1798 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1796 self.ui = ui
1799 self.ui = ui
1797 self.pos = 0
1800 self.pos = 0
1798 self.topic = topic
1801 self.topic = topic
1799 self.unit = unit
1802 self.unit = unit
1800 self.total = total
1803 self.total = total
1801 self.debug = ui.configbool(b'progress', b'debug')
1804 self.debug = ui.configbool(b'progress', b'debug')
1802 self._updatebar = updatebar
1805 self._updatebar = updatebar
1803
1806
1804 def __enter__(self):
1807 def __enter__(self):
1805 return self
1808 return self
1806
1809
1807 def __exit__(self, exc_type, exc_value, exc_tb):
1810 def __exit__(self, exc_type, exc_value, exc_tb):
1808 self.complete()
1811 self.complete()
1809
1812
1810 def update(self, pos, item=b"", total=None):
1813 def update(self, pos, item=b"", total=None):
1811 assert pos is not None
1814 assert pos is not None
1812 if total:
1815 if total:
1813 self.total = total
1816 self.total = total
1814 self.pos = pos
1817 self.pos = pos
1815 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1818 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1816 if self.debug:
1819 if self.debug:
1817 self._printdebug(item)
1820 self._printdebug(item)
1818
1821
1819 def increment(self, step=1, item=b"", total=None):
1822 def increment(self, step=1, item=b"", total=None):
1820 self.update(self.pos + step, item, total)
1823 self.update(self.pos + step, item, total)
1821
1824
1822 def complete(self):
1825 def complete(self):
1823 self.pos = None
1826 self.pos = None
1824 self.unit = b""
1827 self.unit = b""
1825 self.total = None
1828 self.total = None
1826 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1829 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1827
1830
1828 def _printdebug(self, item):
1831 def _printdebug(self, item):
1829 unit = b''
1832 unit = b''
1830 if self.unit:
1833 if self.unit:
1831 unit = b' ' + self.unit
1834 unit = b' ' + self.unit
1832 if item:
1835 if item:
1833 item = b' ' + item
1836 item = b' ' + item
1834
1837
1835 if self.total:
1838 if self.total:
1836 pct = 100.0 * self.pos / self.total
1839 pct = 100.0 * self.pos / self.total
1837 self.ui.debug(
1840 self.ui.debug(
1838 b'%s:%s %d/%d%s (%4.2f%%)\n'
1841 b'%s:%s %d/%d%s (%4.2f%%)\n'
1839 % (self.topic, item, self.pos, self.total, unit, pct)
1842 % (self.topic, item, self.pos, self.total, unit, pct)
1840 )
1843 )
1841 else:
1844 else:
1842 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1845 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1843
1846
1844
1847
1845 def gdinitconfig(ui):
1848 def gdinitconfig(ui):
1846 """helper function to know if a repo should be created as general delta
1849 """helper function to know if a repo should be created as general delta
1847 """
1850 """
1848 # experimental config: format.generaldelta
1851 # experimental config: format.generaldelta
1849 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1852 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1850 b'format', b'usegeneraldelta'
1853 b'format', b'usegeneraldelta'
1851 )
1854 )
1852
1855
1853
1856
1854 def gddeltaconfig(ui):
1857 def gddeltaconfig(ui):
1855 """helper function to know if incoming delta should be optimised
1858 """helper function to know if incoming delta should be optimised
1856 """
1859 """
1857 # experimental config: format.generaldelta
1860 # experimental config: format.generaldelta
1858 return ui.configbool(b'format', b'generaldelta')
1861 return ui.configbool(b'format', b'generaldelta')
1859
1862
1860
1863
1861 class simplekeyvaluefile(object):
1864 class simplekeyvaluefile(object):
1862 """A simple file with key=value lines
1865 """A simple file with key=value lines
1863
1866
1864 Keys must be alphanumerics and start with a letter, values must not
1867 Keys must be alphanumerics and start with a letter, values must not
1865 contain '\n' characters"""
1868 contain '\n' characters"""
1866
1869
1867 firstlinekey = b'__firstline'
1870 firstlinekey = b'__firstline'
1868
1871
1869 def __init__(self, vfs, path, keys=None):
1872 def __init__(self, vfs, path, keys=None):
1870 self.vfs = vfs
1873 self.vfs = vfs
1871 self.path = path
1874 self.path = path
1872
1875
1873 def read(self, firstlinenonkeyval=False):
1876 def read(self, firstlinenonkeyval=False):
1874 """Read the contents of a simple key-value file
1877 """Read the contents of a simple key-value file
1875
1878
1876 'firstlinenonkeyval' indicates whether the first line of file should
1879 'firstlinenonkeyval' indicates whether the first line of file should
1877 be treated as a key-value pair or reuturned fully under the
1880 be treated as a key-value pair or reuturned fully under the
1878 __firstline key."""
1881 __firstline key."""
1879 lines = self.vfs.readlines(self.path)
1882 lines = self.vfs.readlines(self.path)
1880 d = {}
1883 d = {}
1881 if firstlinenonkeyval:
1884 if firstlinenonkeyval:
1882 if not lines:
1885 if not lines:
1883 e = _(b"empty simplekeyvalue file")
1886 e = _(b"empty simplekeyvalue file")
1884 raise error.CorruptedState(e)
1887 raise error.CorruptedState(e)
1885 # we don't want to include '\n' in the __firstline
1888 # we don't want to include '\n' in the __firstline
1886 d[self.firstlinekey] = lines[0][:-1]
1889 d[self.firstlinekey] = lines[0][:-1]
1887 del lines[0]
1890 del lines[0]
1888
1891
1889 try:
1892 try:
1890 # the 'if line.strip()' part prevents us from failing on empty
1893 # the 'if line.strip()' part prevents us from failing on empty
1891 # lines which only contain '\n' therefore are not skipped
1894 # lines which only contain '\n' therefore are not skipped
1892 # by 'if line'
1895 # by 'if line'
1893 updatedict = dict(
1896 updatedict = dict(
1894 line[:-1].split(b'=', 1) for line in lines if line.strip()
1897 line[:-1].split(b'=', 1) for line in lines if line.strip()
1895 )
1898 )
1896 if self.firstlinekey in updatedict:
1899 if self.firstlinekey in updatedict:
1897 e = _(b"%r can't be used as a key")
1900 e = _(b"%r can't be used as a key")
1898 raise error.CorruptedState(e % self.firstlinekey)
1901 raise error.CorruptedState(e % self.firstlinekey)
1899 d.update(updatedict)
1902 d.update(updatedict)
1900 except ValueError as e:
1903 except ValueError as e:
1901 raise error.CorruptedState(stringutil.forcebytestr(e))
1904 raise error.CorruptedState(stringutil.forcebytestr(e))
1902 return d
1905 return d
1903
1906
1904 def write(self, data, firstline=None):
1907 def write(self, data, firstline=None):
1905 """Write key=>value mapping to a file
1908 """Write key=>value mapping to a file
1906 data is a dict. Keys must be alphanumerical and start with a letter.
1909 data is a dict. Keys must be alphanumerical and start with a letter.
1907 Values must not contain newline characters.
1910 Values must not contain newline characters.
1908
1911
1909 If 'firstline' is not None, it is written to file before
1912 If 'firstline' is not None, it is written to file before
1910 everything else, as it is, not in a key=value form"""
1913 everything else, as it is, not in a key=value form"""
1911 lines = []
1914 lines = []
1912 if firstline is not None:
1915 if firstline is not None:
1913 lines.append(b'%s\n' % firstline)
1916 lines.append(b'%s\n' % firstline)
1914
1917
1915 for k, v in data.items():
1918 for k, v in data.items():
1916 if k == self.firstlinekey:
1919 if k == self.firstlinekey:
1917 e = b"key name '%s' is reserved" % self.firstlinekey
1920 e = b"key name '%s' is reserved" % self.firstlinekey
1918 raise error.ProgrammingError(e)
1921 raise error.ProgrammingError(e)
1919 if not k[0:1].isalpha():
1922 if not k[0:1].isalpha():
1920 e = b"keys must start with a letter in a key-value file"
1923 e = b"keys must start with a letter in a key-value file"
1921 raise error.ProgrammingError(e)
1924 raise error.ProgrammingError(e)
1922 if not k.isalnum():
1925 if not k.isalnum():
1923 e = b"invalid key name in a simple key-value file"
1926 e = b"invalid key name in a simple key-value file"
1924 raise error.ProgrammingError(e)
1927 raise error.ProgrammingError(e)
1925 if b'\n' in v:
1928 if b'\n' in v:
1926 e = b"invalid value in a simple key-value file"
1929 e = b"invalid value in a simple key-value file"
1927 raise error.ProgrammingError(e)
1930 raise error.ProgrammingError(e)
1928 lines.append(b"%s=%s\n" % (k, v))
1931 lines.append(b"%s=%s\n" % (k, v))
1929 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1932 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1930 fp.write(b''.join(lines))
1933 fp.write(b''.join(lines))
1931
1934
1932
1935
1933 _reportobsoletedsource = [
1936 _reportobsoletedsource = [
1934 b'debugobsolete',
1937 b'debugobsolete',
1935 b'pull',
1938 b'pull',
1936 b'push',
1939 b'push',
1937 b'serve',
1940 b'serve',
1938 b'unbundle',
1941 b'unbundle',
1939 ]
1942 ]
1940
1943
1941 _reportnewcssource = [
1944 _reportnewcssource = [
1942 b'pull',
1945 b'pull',
1943 b'unbundle',
1946 b'unbundle',
1944 ]
1947 ]
1945
1948
1946
1949
1947 def prefetchfiles(repo, revmatches):
1950 def prefetchfiles(repo, revmatches):
1948 """Invokes the registered file prefetch functions, allowing extensions to
1951 """Invokes the registered file prefetch functions, allowing extensions to
1949 ensure the corresponding files are available locally, before the command
1952 ensure the corresponding files are available locally, before the command
1950 uses them.
1953 uses them.
1951
1954
1952 Args:
1955 Args:
1953 revmatches: a list of (revision, match) tuples to indicate the files to
1956 revmatches: a list of (revision, match) tuples to indicate the files to
1954 fetch at each revision. If any of the match elements is None, it matches
1957 fetch at each revision. If any of the match elements is None, it matches
1955 all files.
1958 all files.
1956 """
1959 """
1957
1960
1958 def _matcher(m):
1961 def _matcher(m):
1959 if m:
1962 if m:
1960 assert isinstance(m, matchmod.basematcher)
1963 assert isinstance(m, matchmod.basematcher)
1961 # The command itself will complain about files that don't exist, so
1964 # The command itself will complain about files that don't exist, so
1962 # don't duplicate the message.
1965 # don't duplicate the message.
1963 return matchmod.badmatch(m, lambda fn, msg: None)
1966 return matchmod.badmatch(m, lambda fn, msg: None)
1964 else:
1967 else:
1965 return matchall(repo)
1968 return matchall(repo)
1966
1969
1967 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1970 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1968
1971
1969 fileprefetchhooks(repo, revbadmatches)
1972 fileprefetchhooks(repo, revbadmatches)
1970
1973
1971
1974
1972 # a list of (repo, revs, match) prefetch functions
1975 # a list of (repo, revs, match) prefetch functions
1973 fileprefetchhooks = util.hooks()
1976 fileprefetchhooks = util.hooks()
1974
1977
1975 # A marker that tells the evolve extension to suppress its own reporting
1978 # A marker that tells the evolve extension to suppress its own reporting
1976 _reportstroubledchangesets = True
1979 _reportstroubledchangesets = True
1977
1980
1978
1981
1979 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1982 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1980 """register a callback to issue a summary after the transaction is closed
1983 """register a callback to issue a summary after the transaction is closed
1981
1984
1982 If as_validator is true, then the callbacks are registered as transaction
1985 If as_validator is true, then the callbacks are registered as transaction
1983 validators instead
1986 validators instead
1984 """
1987 """
1985
1988
1986 def txmatch(sources):
1989 def txmatch(sources):
1987 return any(txnname.startswith(source) for source in sources)
1990 return any(txnname.startswith(source) for source in sources)
1988
1991
1989 categories = []
1992 categories = []
1990
1993
1991 def reportsummary(func):
1994 def reportsummary(func):
1992 """decorator for report callbacks."""
1995 """decorator for report callbacks."""
1993 # The repoview life cycle is shorter than the one of the actual
1996 # The repoview life cycle is shorter than the one of the actual
1994 # underlying repository. So the filtered object can die before the
1997 # underlying repository. So the filtered object can die before the
1995 # weakref is used leading to troubles. We keep a reference to the
1998 # weakref is used leading to troubles. We keep a reference to the
1996 # unfiltered object and restore the filtering when retrieving the
1999 # unfiltered object and restore the filtering when retrieving the
1997 # repository through the weakref.
2000 # repository through the weakref.
1998 filtername = repo.filtername
2001 filtername = repo.filtername
1999 reporef = weakref.ref(repo.unfiltered())
2002 reporef = weakref.ref(repo.unfiltered())
2000
2003
2001 def wrapped(tr):
2004 def wrapped(tr):
2002 repo = reporef()
2005 repo = reporef()
2003 if filtername:
2006 if filtername:
2004 assert repo is not None # help pytype
2007 assert repo is not None # help pytype
2005 repo = repo.filtered(filtername)
2008 repo = repo.filtered(filtername)
2006 func(repo, tr)
2009 func(repo, tr)
2007
2010
2008 newcat = b'%02i-txnreport' % len(categories)
2011 newcat = b'%02i-txnreport' % len(categories)
2009 if as_validator:
2012 if as_validator:
2010 otr.addvalidator(newcat, wrapped)
2013 otr.addvalidator(newcat, wrapped)
2011 else:
2014 else:
2012 otr.addpostclose(newcat, wrapped)
2015 otr.addpostclose(newcat, wrapped)
2013 categories.append(newcat)
2016 categories.append(newcat)
2014 return wrapped
2017 return wrapped
2015
2018
2016 @reportsummary
2019 @reportsummary
2017 def reportchangegroup(repo, tr):
2020 def reportchangegroup(repo, tr):
2018 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2021 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2019 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2022 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2020 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2023 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2021 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2024 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2022 if cgchangesets or cgrevisions or cgfiles:
2025 if cgchangesets or cgrevisions or cgfiles:
2023 htext = b""
2026 htext = b""
2024 if cgheads:
2027 if cgheads:
2025 htext = _(b" (%+d heads)") % cgheads
2028 htext = _(b" (%+d heads)") % cgheads
2026 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2029 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2027 if as_validator:
2030 if as_validator:
2028 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2031 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2029 assert repo is not None # help pytype
2032 assert repo is not None # help pytype
2030 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2033 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2031
2034
2032 if txmatch(_reportobsoletedsource):
2035 if txmatch(_reportobsoletedsource):
2033
2036
2034 @reportsummary
2037 @reportsummary
2035 def reportobsoleted(repo, tr):
2038 def reportobsoleted(repo, tr):
2036 obsoleted = obsutil.getobsoleted(repo, tr)
2039 obsoleted = obsutil.getobsoleted(repo, tr)
2037 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2040 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2038 if newmarkers:
2041 if newmarkers:
2039 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2042 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2040 if obsoleted:
2043 if obsoleted:
2041 msg = _(b'obsoleted %i changesets\n')
2044 msg = _(b'obsoleted %i changesets\n')
2042 if as_validator:
2045 if as_validator:
2043 msg = _(b'obsoleting %i changesets\n')
2046 msg = _(b'obsoleting %i changesets\n')
2044 repo.ui.status(msg % len(obsoleted))
2047 repo.ui.status(msg % len(obsoleted))
2045
2048
2046 if obsolete.isenabled(
2049 if obsolete.isenabled(
2047 repo, obsolete.createmarkersopt
2050 repo, obsolete.createmarkersopt
2048 ) and repo.ui.configbool(
2051 ) and repo.ui.configbool(
2049 b'experimental', b'evolution.report-instabilities'
2052 b'experimental', b'evolution.report-instabilities'
2050 ):
2053 ):
2051 instabilitytypes = [
2054 instabilitytypes = [
2052 (b'orphan', b'orphan'),
2055 (b'orphan', b'orphan'),
2053 (b'phase-divergent', b'phasedivergent'),
2056 (b'phase-divergent', b'phasedivergent'),
2054 (b'content-divergent', b'contentdivergent'),
2057 (b'content-divergent', b'contentdivergent'),
2055 ]
2058 ]
2056
2059
2057 def getinstabilitycounts(repo):
2060 def getinstabilitycounts(repo):
2058 filtered = repo.changelog.filteredrevs
2061 filtered = repo.changelog.filteredrevs
2059 counts = {}
2062 counts = {}
2060 for instability, revset in instabilitytypes:
2063 for instability, revset in instabilitytypes:
2061 counts[instability] = len(
2064 counts[instability] = len(
2062 set(obsolete.getrevs(repo, revset)) - filtered
2065 set(obsolete.getrevs(repo, revset)) - filtered
2063 )
2066 )
2064 return counts
2067 return counts
2065
2068
2066 oldinstabilitycounts = getinstabilitycounts(repo)
2069 oldinstabilitycounts = getinstabilitycounts(repo)
2067
2070
2068 @reportsummary
2071 @reportsummary
2069 def reportnewinstabilities(repo, tr):
2072 def reportnewinstabilities(repo, tr):
2070 newinstabilitycounts = getinstabilitycounts(repo)
2073 newinstabilitycounts = getinstabilitycounts(repo)
2071 for instability, revset in instabilitytypes:
2074 for instability, revset in instabilitytypes:
2072 delta = (
2075 delta = (
2073 newinstabilitycounts[instability]
2076 newinstabilitycounts[instability]
2074 - oldinstabilitycounts[instability]
2077 - oldinstabilitycounts[instability]
2075 )
2078 )
2076 msg = getinstabilitymessage(delta, instability)
2079 msg = getinstabilitymessage(delta, instability)
2077 if msg:
2080 if msg:
2078 repo.ui.warn(msg)
2081 repo.ui.warn(msg)
2079
2082
2080 if txmatch(_reportnewcssource):
2083 if txmatch(_reportnewcssource):
2081
2084
2082 @reportsummary
2085 @reportsummary
2083 def reportnewcs(repo, tr):
2086 def reportnewcs(repo, tr):
2084 """Report the range of new revisions pulled/unbundled."""
2087 """Report the range of new revisions pulled/unbundled."""
2085 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2088 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2086 unfi = repo.unfiltered()
2089 unfi = repo.unfiltered()
2087 if origrepolen >= len(unfi):
2090 if origrepolen >= len(unfi):
2088 return
2091 return
2089
2092
2090 # Compute the bounds of new visible revisions' range.
2093 # Compute the bounds of new visible revisions' range.
2091 revs = smartset.spanset(repo, start=origrepolen)
2094 revs = smartset.spanset(repo, start=origrepolen)
2092 if revs:
2095 if revs:
2093 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2096 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2094
2097
2095 if minrev == maxrev:
2098 if minrev == maxrev:
2096 revrange = minrev
2099 revrange = minrev
2097 else:
2100 else:
2098 revrange = b'%s:%s' % (minrev, maxrev)
2101 revrange = b'%s:%s' % (minrev, maxrev)
2099 draft = len(repo.revs(b'%ld and draft()', revs))
2102 draft = len(repo.revs(b'%ld and draft()', revs))
2100 secret = len(repo.revs(b'%ld and secret()', revs))
2103 secret = len(repo.revs(b'%ld and secret()', revs))
2101 if not (draft or secret):
2104 if not (draft or secret):
2102 msg = _(b'new changesets %s\n') % revrange
2105 msg = _(b'new changesets %s\n') % revrange
2103 elif draft and secret:
2106 elif draft and secret:
2104 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2107 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2105 msg %= (revrange, draft, secret)
2108 msg %= (revrange, draft, secret)
2106 elif draft:
2109 elif draft:
2107 msg = _(b'new changesets %s (%d drafts)\n')
2110 msg = _(b'new changesets %s (%d drafts)\n')
2108 msg %= (revrange, draft)
2111 msg %= (revrange, draft)
2109 elif secret:
2112 elif secret:
2110 msg = _(b'new changesets %s (%d secrets)\n')
2113 msg = _(b'new changesets %s (%d secrets)\n')
2111 msg %= (revrange, secret)
2114 msg %= (revrange, secret)
2112 else:
2115 else:
2113 errormsg = b'entered unreachable condition'
2116 errormsg = b'entered unreachable condition'
2114 raise error.ProgrammingError(errormsg)
2117 raise error.ProgrammingError(errormsg)
2115 repo.ui.status(msg)
2118 repo.ui.status(msg)
2116
2119
2117 # search new changesets directly pulled as obsolete
2120 # search new changesets directly pulled as obsolete
2118 duplicates = tr.changes.get(b'revduplicates', ())
2121 duplicates = tr.changes.get(b'revduplicates', ())
2119 obsadded = unfi.revs(
2122 obsadded = unfi.revs(
2120 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2123 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2121 )
2124 )
2122 cl = repo.changelog
2125 cl = repo.changelog
2123 extinctadded = [r for r in obsadded if r not in cl]
2126 extinctadded = [r for r in obsadded if r not in cl]
2124 if extinctadded:
2127 if extinctadded:
2125 # They are not just obsolete, but obsolete and invisible
2128 # They are not just obsolete, but obsolete and invisible
2126 # we call them "extinct" internally but the terms have not been
2129 # we call them "extinct" internally but the terms have not been
2127 # exposed to users.
2130 # exposed to users.
2128 msg = b'(%d other changesets obsolete on arrival)\n'
2131 msg = b'(%d other changesets obsolete on arrival)\n'
2129 repo.ui.status(msg % len(extinctadded))
2132 repo.ui.status(msg % len(extinctadded))
2130
2133
2131 @reportsummary
2134 @reportsummary
2132 def reportphasechanges(repo, tr):
2135 def reportphasechanges(repo, tr):
2133 """Report statistics of phase changes for changesets pre-existing
2136 """Report statistics of phase changes for changesets pre-existing
2134 pull/unbundle.
2137 pull/unbundle.
2135 """
2138 """
2136 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2139 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2137 published = []
2140 published = []
2138 for revs, (old, new) in tr.changes.get(b'phases', []):
2141 for revs, (old, new) in tr.changes.get(b'phases', []):
2139 if new != phases.public:
2142 if new != phases.public:
2140 continue
2143 continue
2141 published.extend(rev for rev in revs if rev < origrepolen)
2144 published.extend(rev for rev in revs if rev < origrepolen)
2142 if not published:
2145 if not published:
2143 return
2146 return
2144 msg = _(b'%d local changesets published\n')
2147 msg = _(b'%d local changesets published\n')
2145 if as_validator:
2148 if as_validator:
2146 msg = _(b'%d local changesets will be published\n')
2149 msg = _(b'%d local changesets will be published\n')
2147 repo.ui.status(msg % len(published))
2150 repo.ui.status(msg % len(published))
2148
2151
2149
2152
2150 def getinstabilitymessage(delta, instability):
2153 def getinstabilitymessage(delta, instability):
2151 """function to return the message to show warning about new instabilities
2154 """function to return the message to show warning about new instabilities
2152
2155
2153 exists as a separate function so that extension can wrap to show more
2156 exists as a separate function so that extension can wrap to show more
2154 information like how to fix instabilities"""
2157 information like how to fix instabilities"""
2155 if delta > 0:
2158 if delta > 0:
2156 return _(b'%i new %s changesets\n') % (delta, instability)
2159 return _(b'%i new %s changesets\n') % (delta, instability)
2157
2160
2158
2161
2159 def nodesummaries(repo, nodes, maxnumnodes=4):
2162 def nodesummaries(repo, nodes, maxnumnodes=4):
2160 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2163 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2161 return b' '.join(short(h) for h in nodes)
2164 return b' '.join(short(h) for h in nodes)
2162 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2165 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2163 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2166 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2164
2167
2165
2168
2166 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2169 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2167 """check that no named branch has multiple heads"""
2170 """check that no named branch has multiple heads"""
2168 if desc in (b'strip', b'repair'):
2171 if desc in (b'strip', b'repair'):
2169 # skip the logic during strip
2172 # skip the logic during strip
2170 return
2173 return
2171 visible = repo.filtered(b'visible')
2174 visible = repo.filtered(b'visible')
2172 # possible improvement: we could restrict the check to affected branch
2175 # possible improvement: we could restrict the check to affected branch
2173 bm = visible.branchmap()
2176 bm = visible.branchmap()
2174 for name in bm:
2177 for name in bm:
2175 heads = bm.branchheads(name, closed=accountclosed)
2178 heads = bm.branchheads(name, closed=accountclosed)
2176 if len(heads) > 1:
2179 if len(heads) > 1:
2177 msg = _(b'rejecting multiple heads on branch "%s"')
2180 msg = _(b'rejecting multiple heads on branch "%s"')
2178 msg %= name
2181 msg %= name
2179 hint = _(b'%d heads: %s')
2182 hint = _(b'%d heads: %s')
2180 hint %= (len(heads), nodesummaries(repo, heads))
2183 hint %= (len(heads), nodesummaries(repo, heads))
2181 raise error.Abort(msg, hint=hint)
2184 raise error.Abort(msg, hint=hint)
2182
2185
2183
2186
2184 def wrapconvertsink(sink):
2187 def wrapconvertsink(sink):
2185 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2188 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2186 before it is used, whether or not the convert extension was formally loaded.
2189 before it is used, whether or not the convert extension was formally loaded.
2187 """
2190 """
2188 return sink
2191 return sink
2189
2192
2190
2193
2191 def unhidehashlikerevs(repo, specs, hiddentype):
2194 def unhidehashlikerevs(repo, specs, hiddentype):
2192 """parse the user specs and unhide changesets whose hash or revision number
2195 """parse the user specs and unhide changesets whose hash or revision number
2193 is passed.
2196 is passed.
2194
2197
2195 hiddentype can be: 1) 'warn': warn while unhiding changesets
2198 hiddentype can be: 1) 'warn': warn while unhiding changesets
2196 2) 'nowarn': don't warn while unhiding changesets
2199 2) 'nowarn': don't warn while unhiding changesets
2197
2200
2198 returns a repo object with the required changesets unhidden
2201 returns a repo object with the required changesets unhidden
2199 """
2202 """
2200 if not repo.filtername or not repo.ui.configbool(
2203 if not repo.filtername or not repo.ui.configbool(
2201 b'experimental', b'directaccess'
2204 b'experimental', b'directaccess'
2202 ):
2205 ):
2203 return repo
2206 return repo
2204
2207
2205 if repo.filtername not in (b'visible', b'visible-hidden'):
2208 if repo.filtername not in (b'visible', b'visible-hidden'):
2206 return repo
2209 return repo
2207
2210
2208 symbols = set()
2211 symbols = set()
2209 for spec in specs:
2212 for spec in specs:
2210 try:
2213 try:
2211 tree = revsetlang.parse(spec)
2214 tree = revsetlang.parse(spec)
2212 except error.ParseError: # will be reported by scmutil.revrange()
2215 except error.ParseError: # will be reported by scmutil.revrange()
2213 continue
2216 continue
2214
2217
2215 symbols.update(revsetlang.gethashlikesymbols(tree))
2218 symbols.update(revsetlang.gethashlikesymbols(tree))
2216
2219
2217 if not symbols:
2220 if not symbols:
2218 return repo
2221 return repo
2219
2222
2220 revs = _getrevsfromsymbols(repo, symbols)
2223 revs = _getrevsfromsymbols(repo, symbols)
2221
2224
2222 if not revs:
2225 if not revs:
2223 return repo
2226 return repo
2224
2227
2225 if hiddentype == b'warn':
2228 if hiddentype == b'warn':
2226 unfi = repo.unfiltered()
2229 unfi = repo.unfiltered()
2227 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2230 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2228 repo.ui.warn(
2231 repo.ui.warn(
2229 _(
2232 _(
2230 b"warning: accessing hidden changesets for write "
2233 b"warning: accessing hidden changesets for write "
2231 b"operation: %s\n"
2234 b"operation: %s\n"
2232 )
2235 )
2233 % revstr
2236 % revstr
2234 )
2237 )
2235
2238
2236 # we have to use new filtername to separate branch/tags cache until we can
2239 # we have to use new filtername to separate branch/tags cache until we can
2237 # disbale these cache when revisions are dynamically pinned.
2240 # disbale these cache when revisions are dynamically pinned.
2238 return repo.filtered(b'visible-hidden', revs)
2241 return repo.filtered(b'visible-hidden', revs)
2239
2242
2240
2243
2241 def _getrevsfromsymbols(repo, symbols):
2244 def _getrevsfromsymbols(repo, symbols):
2242 """parse the list of symbols and returns a set of revision numbers of hidden
2245 """parse the list of symbols and returns a set of revision numbers of hidden
2243 changesets present in symbols"""
2246 changesets present in symbols"""
2244 revs = set()
2247 revs = set()
2245 unfi = repo.unfiltered()
2248 unfi = repo.unfiltered()
2246 unficl = unfi.changelog
2249 unficl = unfi.changelog
2247 cl = repo.changelog
2250 cl = repo.changelog
2248 tiprev = len(unficl)
2251 tiprev = len(unficl)
2249 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2252 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2250 for s in symbols:
2253 for s in symbols:
2251 try:
2254 try:
2252 n = int(s)
2255 n = int(s)
2253 if n <= tiprev:
2256 if n <= tiprev:
2254 if not allowrevnums:
2257 if not allowrevnums:
2255 continue
2258 continue
2256 else:
2259 else:
2257 if n not in cl:
2260 if n not in cl:
2258 revs.add(n)
2261 revs.add(n)
2259 continue
2262 continue
2260 except ValueError:
2263 except ValueError:
2261 pass
2264 pass
2262
2265
2263 try:
2266 try:
2264 s = resolvehexnodeidprefix(unfi, s)
2267 s = resolvehexnodeidprefix(unfi, s)
2265 except (error.LookupError, error.WdirUnsupported):
2268 except (error.LookupError, error.WdirUnsupported):
2266 s = None
2269 s = None
2267
2270
2268 if s is not None:
2271 if s is not None:
2269 rev = unficl.rev(s)
2272 rev = unficl.rev(s)
2270 if rev not in cl:
2273 if rev not in cl:
2271 revs.add(rev)
2274 revs.add(rev)
2272
2275
2273 return revs
2276 return revs
2274
2277
2275
2278
2276 def bookmarkrevs(repo, mark):
2279 def bookmarkrevs(repo, mark):
2277 """
2280 """
2278 Select revisions reachable by a given bookmark
2281 Select revisions reachable by a given bookmark
2279 """
2282 """
2280 return repo.revs(
2283 return repo.revs(
2281 b"ancestors(bookmark(%s)) - "
2284 b"ancestors(bookmark(%s)) - "
2282 b"ancestors(head() and not bookmark(%s)) - "
2285 b"ancestors(head() and not bookmark(%s)) - "
2283 b"ancestors(bookmark() and not bookmark(%s))",
2286 b"ancestors(bookmark() and not bookmark(%s))",
2284 mark,
2287 mark,
2285 mark,
2288 mark,
2286 mark,
2289 mark,
2287 )
2290 )
@@ -1,455 +1,455 b''
1 # worker.py - master-slave parallelism support
1 # worker.py - master-slave parallelism support
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import signal
12 import signal
13 import sys
13 import sys
14 import threading
14 import threading
15 import time
15 import time
16
16
17 try:
17 try:
18 import selectors
18 import selectors
19
19
20 selectors.BaseSelector
20 selectors.BaseSelector
21 except ImportError:
21 except ImportError:
22 from .thirdparty import selectors2 as selectors
22 from .thirdparty import selectors2 as selectors
23
23
24 from .i18n import _
24 from .i18n import _
25 from . import (
25 from . import (
26 encoding,
26 encoding,
27 error,
27 error,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 util,
30 util,
31 )
31 )
32
32
33
33
34 def countcpus():
34 def countcpus():
35 '''try to count the number of CPUs on the system'''
35 '''try to count the number of CPUs on the system'''
36
36
37 # posix
37 # posix
38 try:
38 try:
39 n = int(os.sysconf('SC_NPROCESSORS_ONLN'))
39 n = int(os.sysconf('SC_NPROCESSORS_ONLN'))
40 if n > 0:
40 if n > 0:
41 return n
41 return n
42 except (AttributeError, ValueError):
42 except (AttributeError, ValueError):
43 pass
43 pass
44
44
45 # windows
45 # windows
46 try:
46 try:
47 n = int(encoding.environ[b'NUMBER_OF_PROCESSORS'])
47 n = int(encoding.environ[b'NUMBER_OF_PROCESSORS'])
48 if n > 0:
48 if n > 0:
49 return n
49 return n
50 except (KeyError, ValueError):
50 except (KeyError, ValueError):
51 pass
51 pass
52
52
53 return 1
53 return 1
54
54
55
55
56 def _numworkers(ui):
56 def _numworkers(ui):
57 s = ui.config(b'worker', b'numcpus')
57 s = ui.config(b'worker', b'numcpus')
58 if s:
58 if s:
59 try:
59 try:
60 n = int(s)
60 n = int(s)
61 if n >= 1:
61 if n >= 1:
62 return n
62 return n
63 except ValueError:
63 except ValueError:
64 raise error.Abort(_(b'number of cpus must be an integer'))
64 raise error.Abort(_(b'number of cpus must be an integer'))
65 return min(max(countcpus(), 4), 32)
65 return min(max(countcpus(), 4), 32)
66
66
67
67
68 if pycompat.ispy3:
68 if pycompat.ispy3:
69
69
70 class _blockingreader(object):
70 class _blockingreader(object):
71 def __init__(self, wrapped):
71 def __init__(self, wrapped):
72 self._wrapped = wrapped
72 self._wrapped = wrapped
73
73
74 # Do NOT implement readinto() by making it delegate to
74 # Do NOT implement readinto() by making it delegate to
75 # _wrapped.readinto(), since that is unbuffered. The unpickler is fine
75 # _wrapped.readinto(), since that is unbuffered. The unpickler is fine
76 # with just read() and readline(), so we don't need to implement it.
76 # with just read() and readline(), so we don't need to implement it.
77
77
78 def readline(self):
78 def readline(self):
79 return self._wrapped.readline()
79 return self._wrapped.readline()
80
80
81 # issue multiple reads until size is fulfilled
81 # issue multiple reads until size is fulfilled
82 def read(self, size=-1):
82 def read(self, size=-1):
83 if size < 0:
83 if size < 0:
84 return self._wrapped.readall()
84 return self._wrapped.readall()
85
85
86 buf = bytearray(size)
86 buf = bytearray(size)
87 view = memoryview(buf)
87 view = memoryview(buf)
88 pos = 0
88 pos = 0
89
89
90 while pos < size:
90 while pos < size:
91 ret = self._wrapped.readinto(view[pos:])
91 ret = self._wrapped.readinto(view[pos:])
92 if not ret:
92 if not ret:
93 break
93 break
94 pos += ret
94 pos += ret
95
95
96 del view
96 del view
97 del buf[pos:]
97 del buf[pos:]
98 return bytes(buf)
98 return bytes(buf)
99
99
100
100
101 else:
101 else:
102
102
103 def _blockingreader(wrapped):
103 def _blockingreader(wrapped):
104 return wrapped
104 return wrapped
105
105
106
106
107 if pycompat.isposix or pycompat.iswindows:
107 if pycompat.isposix or pycompat.iswindows:
108 _STARTUP_COST = 0.01
108 _STARTUP_COST = 0.01
109 # The Windows worker is thread based. If tasks are CPU bound, threads
109 # The Windows worker is thread based. If tasks are CPU bound, threads
110 # in the presence of the GIL result in excessive context switching and
110 # in the presence of the GIL result in excessive context switching and
111 # this overhead can slow down execution.
111 # this overhead can slow down execution.
112 _DISALLOW_THREAD_UNSAFE = pycompat.iswindows
112 _DISALLOW_THREAD_UNSAFE = pycompat.iswindows
113 else:
113 else:
114 _STARTUP_COST = 1e30
114 _STARTUP_COST = 1e30
115 _DISALLOW_THREAD_UNSAFE = False
115 _DISALLOW_THREAD_UNSAFE = False
116
116
117
117
118 def worthwhile(ui, costperop, nops, threadsafe=True):
118 def worthwhile(ui, costperop, nops, threadsafe=True):
119 '''try to determine whether the benefit of multiple processes can
119 '''try to determine whether the benefit of multiple processes can
120 outweigh the cost of starting them'''
120 outweigh the cost of starting them'''
121
121
122 if not threadsafe and _DISALLOW_THREAD_UNSAFE:
122 if not threadsafe and _DISALLOW_THREAD_UNSAFE:
123 return False
123 return False
124
124
125 linear = costperop * nops
125 linear = costperop * nops
126 workers = _numworkers(ui)
126 workers = _numworkers(ui)
127 benefit = linear - (_STARTUP_COST * workers + linear / workers)
127 benefit = linear - (_STARTUP_COST * workers + linear / workers)
128 return benefit >= 0.15
128 return benefit >= 0.15
129
129
130
130
131 def worker(
131 def worker(
132 ui, costperarg, func, staticargs, args, hasretval=False, threadsafe=True
132 ui, costperarg, func, staticargs, args, hasretval=False, threadsafe=True
133 ):
133 ):
134 '''run a function, possibly in parallel in multiple worker
134 '''run a function, possibly in parallel in multiple worker
135 processes.
135 processes.
136
136
137 returns a progress iterator
137 returns a progress iterator
138
138
139 costperarg - cost of a single task
139 costperarg - cost of a single task
140
140
141 func - function to run. It is expected to return a progress iterator.
141 func - function to run. It is expected to return a progress iterator.
142
142
143 staticargs - arguments to pass to every invocation of the function
143 staticargs - arguments to pass to every invocation of the function
144
144
145 args - arguments to split into chunks, to pass to individual
145 args - arguments to split into chunks, to pass to individual
146 workers
146 workers
147
147
148 hasretval - when True, func and the current function return an progress
148 hasretval - when True, func and the current function return an progress
149 iterator then a dict (encoded as an iterator that yield many (False, ..)
149 iterator then a dict (encoded as an iterator that yield many (False, ..)
150 then a (True, dict)). The dicts are joined in some arbitrary order, so
150 then a (True, dict)). The dicts are joined in some arbitrary order, so
151 overlapping keys are a bad idea.
151 overlapping keys are a bad idea.
152
152
153 threadsafe - whether work items are thread safe and can be executed using
153 threadsafe - whether work items are thread safe and can be executed using
154 a thread-based worker. Should be disabled for CPU heavy tasks that don't
154 a thread-based worker. Should be disabled for CPU heavy tasks that don't
155 release the GIL.
155 release the GIL.
156 '''
156 '''
157 enabled = ui.configbool(b'worker', b'enabled')
157 enabled = ui.configbool(b'worker', b'enabled')
158 if enabled and worthwhile(ui, costperarg, len(args), threadsafe=threadsafe):
158 if enabled and worthwhile(ui, costperarg, len(args), threadsafe=threadsafe):
159 return _platformworker(ui, func, staticargs, args, hasretval)
159 return _platformworker(ui, func, staticargs, args, hasretval)
160 return func(*staticargs + (args,))
160 return func(*staticargs + (args,))
161
161
162
162
163 def _posixworker(ui, func, staticargs, args, hasretval):
163 def _posixworker(ui, func, staticargs, args, hasretval):
164 workers = _numworkers(ui)
164 workers = _numworkers(ui)
165 oldhandler = signal.getsignal(signal.SIGINT)
165 oldhandler = signal.getsignal(signal.SIGINT)
166 signal.signal(signal.SIGINT, signal.SIG_IGN)
166 signal.signal(signal.SIGINT, signal.SIG_IGN)
167 pids, problem = set(), [0]
167 pids, problem = set(), [0]
168
168
169 def killworkers():
169 def killworkers():
170 # unregister SIGCHLD handler as all children will be killed. This
170 # unregister SIGCHLD handler as all children will be killed. This
171 # function shouldn't be interrupted by another SIGCHLD; otherwise pids
171 # function shouldn't be interrupted by another SIGCHLD; otherwise pids
172 # could be updated while iterating, which would cause inconsistency.
172 # could be updated while iterating, which would cause inconsistency.
173 signal.signal(signal.SIGCHLD, oldchldhandler)
173 signal.signal(signal.SIGCHLD, oldchldhandler)
174 # if one worker bails, there's no good reason to wait for the rest
174 # if one worker bails, there's no good reason to wait for the rest
175 for p in pids:
175 for p in pids:
176 try:
176 try:
177 os.kill(p, signal.SIGTERM)
177 os.kill(p, signal.SIGTERM)
178 except OSError as err:
178 except OSError as err:
179 if err.errno != errno.ESRCH:
179 if err.errno != errno.ESRCH:
180 raise
180 raise
181
181
182 def waitforworkers(blocking=True):
182 def waitforworkers(blocking=True):
183 for pid in pids.copy():
183 for pid in pids.copy():
184 p = st = 0
184 p = st = 0
185 while True:
185 while True:
186 try:
186 try:
187 p, st = os.waitpid(pid, (0 if blocking else os.WNOHANG))
187 p, st = os.waitpid(pid, (0 if blocking else os.WNOHANG))
188 break
188 break
189 except OSError as e:
189 except OSError as e:
190 if e.errno == errno.EINTR:
190 if e.errno == errno.EINTR:
191 continue
191 continue
192 elif e.errno == errno.ECHILD:
192 elif e.errno == errno.ECHILD:
193 # child would already be reaped, but pids yet been
193 # child would already be reaped, but pids yet been
194 # updated (maybe interrupted just after waitpid)
194 # updated (maybe interrupted just after waitpid)
195 pids.discard(pid)
195 pids.discard(pid)
196 break
196 break
197 else:
197 else:
198 raise
198 raise
199 if not p:
199 if not p:
200 # skip subsequent steps, because child process should
200 # skip subsequent steps, because child process should
201 # be still running in this case
201 # be still running in this case
202 continue
202 continue
203 pids.discard(p)
203 pids.discard(p)
204 st = _exitstatus(st)
204 st = _exitstatus(st)
205 if st and not problem[0]:
205 if st and not problem[0]:
206 problem[0] = st
206 problem[0] = st
207
207
208 def sigchldhandler(signum, frame):
208 def sigchldhandler(signum, frame):
209 waitforworkers(blocking=False)
209 waitforworkers(blocking=False)
210 if problem[0]:
210 if problem[0]:
211 killworkers()
211 killworkers()
212
212
213 oldchldhandler = signal.signal(signal.SIGCHLD, sigchldhandler)
213 oldchldhandler = signal.signal(signal.SIGCHLD, sigchldhandler)
214 ui.flush()
214 ui.flush()
215 parentpid = os.getpid()
215 parentpid = os.getpid()
216 pipes = []
216 pipes = []
217 retval = {}
217 retval = {}
218 for pargs in partition(args, min(workers, len(args))):
218 for pargs in partition(args, min(workers, len(args))):
219 # Every worker gets its own pipe to send results on, so we don't have to
219 # Every worker gets its own pipe to send results on, so we don't have to
220 # implement atomic writes larger than PIPE_BUF. Each forked process has
220 # implement atomic writes larger than PIPE_BUF. Each forked process has
221 # its own pipe's descriptors in the local variables, and the parent
221 # its own pipe's descriptors in the local variables, and the parent
222 # process has the full list of pipe descriptors (and it doesn't really
222 # process has the full list of pipe descriptors (and it doesn't really
223 # care what order they're in).
223 # care what order they're in).
224 rfd, wfd = os.pipe()
224 rfd, wfd = os.pipe()
225 pipes.append((rfd, wfd))
225 pipes.append((rfd, wfd))
226 # make sure we use os._exit in all worker code paths. otherwise the
226 # make sure we use os._exit in all worker code paths. otherwise the
227 # worker may do some clean-ups which could cause surprises like
227 # worker may do some clean-ups which could cause surprises like
228 # deadlock. see sshpeer.cleanup for example.
228 # deadlock. see sshpeer.cleanup for example.
229 # override error handling *before* fork. this is necessary because
229 # override error handling *before* fork. this is necessary because
230 # exception (signal) may arrive after fork, before "pid =" assignment
230 # exception (signal) may arrive after fork, before "pid =" assignment
231 # completes, and other exception handler (dispatch.py) can lead to
231 # completes, and other exception handler (dispatch.py) can lead to
232 # unexpected code path without os._exit.
232 # unexpected code path without os._exit.
233 ret = -1
233 ret = -1
234 try:
234 try:
235 pid = os.fork()
235 pid = os.fork()
236 if pid == 0:
236 if pid == 0:
237 signal.signal(signal.SIGINT, oldhandler)
237 signal.signal(signal.SIGINT, oldhandler)
238 signal.signal(signal.SIGCHLD, oldchldhandler)
238 signal.signal(signal.SIGCHLD, oldchldhandler)
239
239
240 def workerfunc():
240 def workerfunc():
241 for r, w in pipes[:-1]:
241 for r, w in pipes[:-1]:
242 os.close(r)
242 os.close(r)
243 os.close(w)
243 os.close(w)
244 os.close(rfd)
244 os.close(rfd)
245 for result in func(*(staticargs + (pargs,))):
245 for result in func(*(staticargs + (pargs,))):
246 os.write(wfd, util.pickle.dumps(result))
246 os.write(wfd, util.pickle.dumps(result))
247 return 0
247 return 0
248
248
249 ret = scmutil.callcatch(ui, workerfunc)
249 ret = scmutil.callcatch(ui, workerfunc)
250 except: # parent re-raises, child never returns
250 except: # parent re-raises, child never returns
251 if os.getpid() == parentpid:
251 if os.getpid() == parentpid:
252 raise
252 raise
253 exctype = sys.exc_info()[0]
253 exctype = sys.exc_info()[0]
254 force = not issubclass(exctype, KeyboardInterrupt)
254 force = not issubclass(exctype, KeyboardInterrupt)
255 ui.traceback(force=force)
255 ui.traceback(force=force)
256 finally:
256 finally:
257 if os.getpid() != parentpid:
257 if os.getpid() != parentpid:
258 try:
258 try:
259 ui.flush()
259 ui.flush()
260 except: # never returns, no re-raises
260 except: # never returns, no re-raises
261 pass
261 pass
262 finally:
262 finally:
263 os._exit(ret & 255)
263 os._exit(ret & 255)
264 pids.add(pid)
264 pids.add(pid)
265 selector = selectors.DefaultSelector()
265 selector = selectors.DefaultSelector()
266 for rfd, wfd in pipes:
266 for rfd, wfd in pipes:
267 os.close(wfd)
267 os.close(wfd)
268 selector.register(os.fdopen(rfd, 'rb', 0), selectors.EVENT_READ)
268 selector.register(os.fdopen(rfd, 'rb', 0), selectors.EVENT_READ)
269
269
270 def cleanup():
270 def cleanup():
271 signal.signal(signal.SIGINT, oldhandler)
271 signal.signal(signal.SIGINT, oldhandler)
272 waitforworkers()
272 waitforworkers()
273 signal.signal(signal.SIGCHLD, oldchldhandler)
273 signal.signal(signal.SIGCHLD, oldchldhandler)
274 selector.close()
274 selector.close()
275 return problem[0]
275 return problem[0]
276
276
277 try:
277 try:
278 openpipes = len(pipes)
278 openpipes = len(pipes)
279 while openpipes > 0:
279 while openpipes > 0:
280 for key, events in selector.select():
280 for key, events in selector.select():
281 try:
281 try:
282 res = util.pickle.load(_blockingreader(key.fileobj))
282 res = util.pickle.load(_blockingreader(key.fileobj))
283 if hasretval and res[0]:
283 if hasretval and res[0]:
284 retval.update(res[1])
284 retval.update(res[1])
285 else:
285 else:
286 yield res
286 yield res
287 except EOFError:
287 except EOFError:
288 selector.unregister(key.fileobj)
288 selector.unregister(key.fileobj)
289 key.fileobj.close()
289 key.fileobj.close()
290 openpipes -= 1
290 openpipes -= 1
291 except IOError as e:
291 except IOError as e:
292 if e.errno == errno.EINTR:
292 if e.errno == errno.EINTR:
293 continue
293 continue
294 raise
294 raise
295 except: # re-raises
295 except: # re-raises
296 killworkers()
296 killworkers()
297 cleanup()
297 cleanup()
298 raise
298 raise
299 status = cleanup()
299 status = cleanup()
300 if status:
300 if status:
301 if status < 0:
301 if status < 0:
302 os.kill(os.getpid(), -status)
302 os.kill(os.getpid(), -status)
303 sys.exit(status)
303 raise error.WorkerError(status)
304 if hasretval:
304 if hasretval:
305 yield True, retval
305 yield True, retval
306
306
307
307
308 def _posixexitstatus(code):
308 def _posixexitstatus(code):
309 '''convert a posix exit status into the same form returned by
309 '''convert a posix exit status into the same form returned by
310 os.spawnv
310 os.spawnv
311
311
312 returns None if the process was stopped instead of exiting'''
312 returns None if the process was stopped instead of exiting'''
313 if os.WIFEXITED(code):
313 if os.WIFEXITED(code):
314 return os.WEXITSTATUS(code)
314 return os.WEXITSTATUS(code)
315 elif os.WIFSIGNALED(code):
315 elif os.WIFSIGNALED(code):
316 return -(os.WTERMSIG(code))
316 return -(os.WTERMSIG(code))
317
317
318
318
319 def _windowsworker(ui, func, staticargs, args, hasretval):
319 def _windowsworker(ui, func, staticargs, args, hasretval):
320 class Worker(threading.Thread):
320 class Worker(threading.Thread):
321 def __init__(
321 def __init__(
322 self, taskqueue, resultqueue, func, staticargs, *args, **kwargs
322 self, taskqueue, resultqueue, func, staticargs, *args, **kwargs
323 ):
323 ):
324 threading.Thread.__init__(self, *args, **kwargs)
324 threading.Thread.__init__(self, *args, **kwargs)
325 self._taskqueue = taskqueue
325 self._taskqueue = taskqueue
326 self._resultqueue = resultqueue
326 self._resultqueue = resultqueue
327 self._func = func
327 self._func = func
328 self._staticargs = staticargs
328 self._staticargs = staticargs
329 self._interrupted = False
329 self._interrupted = False
330 self.daemon = True
330 self.daemon = True
331 self.exception = None
331 self.exception = None
332
332
333 def interrupt(self):
333 def interrupt(self):
334 self._interrupted = True
334 self._interrupted = True
335
335
336 def run(self):
336 def run(self):
337 try:
337 try:
338 while not self._taskqueue.empty():
338 while not self._taskqueue.empty():
339 try:
339 try:
340 args = self._taskqueue.get_nowait()
340 args = self._taskqueue.get_nowait()
341 for res in self._func(*self._staticargs + (args,)):
341 for res in self._func(*self._staticargs + (args,)):
342 self._resultqueue.put(res)
342 self._resultqueue.put(res)
343 # threading doesn't provide a native way to
343 # threading doesn't provide a native way to
344 # interrupt execution. handle it manually at every
344 # interrupt execution. handle it manually at every
345 # iteration.
345 # iteration.
346 if self._interrupted:
346 if self._interrupted:
347 return
347 return
348 except pycompat.queue.Empty:
348 except pycompat.queue.Empty:
349 break
349 break
350 except Exception as e:
350 except Exception as e:
351 # store the exception such that the main thread can resurface
351 # store the exception such that the main thread can resurface
352 # it as if the func was running without workers.
352 # it as if the func was running without workers.
353 self.exception = e
353 self.exception = e
354 raise
354 raise
355
355
356 threads = []
356 threads = []
357
357
358 def trykillworkers():
358 def trykillworkers():
359 # Allow up to 1 second to clean worker threads nicely
359 # Allow up to 1 second to clean worker threads nicely
360 cleanupend = time.time() + 1
360 cleanupend = time.time() + 1
361 for t in threads:
361 for t in threads:
362 t.interrupt()
362 t.interrupt()
363 for t in threads:
363 for t in threads:
364 remainingtime = cleanupend - time.time()
364 remainingtime = cleanupend - time.time()
365 t.join(remainingtime)
365 t.join(remainingtime)
366 if t.is_alive():
366 if t.is_alive():
367 # pass over the workers joining failure. it is more
367 # pass over the workers joining failure. it is more
368 # important to surface the inital exception than the
368 # important to surface the inital exception than the
369 # fact that one of workers may be processing a large
369 # fact that one of workers may be processing a large
370 # task and does not get to handle the interruption.
370 # task and does not get to handle the interruption.
371 ui.warn(
371 ui.warn(
372 _(
372 _(
373 b"failed to kill worker threads while "
373 b"failed to kill worker threads while "
374 b"handling an exception\n"
374 b"handling an exception\n"
375 )
375 )
376 )
376 )
377 return
377 return
378
378
379 workers = _numworkers(ui)
379 workers = _numworkers(ui)
380 resultqueue = pycompat.queue.Queue()
380 resultqueue = pycompat.queue.Queue()
381 taskqueue = pycompat.queue.Queue()
381 taskqueue = pycompat.queue.Queue()
382 retval = {}
382 retval = {}
383 # partition work to more pieces than workers to minimize the chance
383 # partition work to more pieces than workers to minimize the chance
384 # of uneven distribution of large tasks between the workers
384 # of uneven distribution of large tasks between the workers
385 for pargs in partition(args, workers * 20):
385 for pargs in partition(args, workers * 20):
386 taskqueue.put(pargs)
386 taskqueue.put(pargs)
387 for _i in range(workers):
387 for _i in range(workers):
388 t = Worker(taskqueue, resultqueue, func, staticargs)
388 t = Worker(taskqueue, resultqueue, func, staticargs)
389 threads.append(t)
389 threads.append(t)
390 t.start()
390 t.start()
391 try:
391 try:
392 while len(threads) > 0:
392 while len(threads) > 0:
393 while not resultqueue.empty():
393 while not resultqueue.empty():
394 res = resultqueue.get()
394 res = resultqueue.get()
395 if hasretval and res[0]:
395 if hasretval and res[0]:
396 retval.update(res[1])
396 retval.update(res[1])
397 else:
397 else:
398 yield res
398 yield res
399 threads[0].join(0.05)
399 threads[0].join(0.05)
400 finishedthreads = [_t for _t in threads if not _t.is_alive()]
400 finishedthreads = [_t for _t in threads if not _t.is_alive()]
401 for t in finishedthreads:
401 for t in finishedthreads:
402 if t.exception is not None:
402 if t.exception is not None:
403 raise t.exception
403 raise t.exception
404 threads.remove(t)
404 threads.remove(t)
405 except (Exception, KeyboardInterrupt): # re-raises
405 except (Exception, KeyboardInterrupt): # re-raises
406 trykillworkers()
406 trykillworkers()
407 raise
407 raise
408 while not resultqueue.empty():
408 while not resultqueue.empty():
409 res = resultqueue.get()
409 res = resultqueue.get()
410 if hasretval and res[0]:
410 if hasretval and res[0]:
411 retval.update(res[1])
411 retval.update(res[1])
412 else:
412 else:
413 yield res
413 yield res
414 if hasretval:
414 if hasretval:
415 yield True, retval
415 yield True, retval
416
416
417
417
418 if pycompat.iswindows:
418 if pycompat.iswindows:
419 _platformworker = _windowsworker
419 _platformworker = _windowsworker
420 else:
420 else:
421 _platformworker = _posixworker
421 _platformworker = _posixworker
422 _exitstatus = _posixexitstatus
422 _exitstatus = _posixexitstatus
423
423
424
424
425 def partition(lst, nslices):
425 def partition(lst, nslices):
426 '''partition a list into N slices of roughly equal size
426 '''partition a list into N slices of roughly equal size
427
427
428 The current strategy takes every Nth element from the input. If
428 The current strategy takes every Nth element from the input. If
429 we ever write workers that need to preserve grouping in input
429 we ever write workers that need to preserve grouping in input
430 we should consider allowing callers to specify a partition strategy.
430 we should consider allowing callers to specify a partition strategy.
431
431
432 mpm is not a fan of this partitioning strategy when files are involved.
432 mpm is not a fan of this partitioning strategy when files are involved.
433 In his words:
433 In his words:
434
434
435 Single-threaded Mercurial makes a point of creating and visiting
435 Single-threaded Mercurial makes a point of creating and visiting
436 files in a fixed order (alphabetical). When creating files in order,
436 files in a fixed order (alphabetical). When creating files in order,
437 a typical filesystem is likely to allocate them on nearby regions on
437 a typical filesystem is likely to allocate them on nearby regions on
438 disk. Thus, when revisiting in the same order, locality is maximized
438 disk. Thus, when revisiting in the same order, locality is maximized
439 and various forms of OS and disk-level caching and read-ahead get a
439 and various forms of OS and disk-level caching and read-ahead get a
440 chance to work.
440 chance to work.
441
441
442 This effect can be quite significant on spinning disks. I discovered it
442 This effect can be quite significant on spinning disks. I discovered it
443 circa Mercurial v0.4 when revlogs were named by hashes of filenames.
443 circa Mercurial v0.4 when revlogs were named by hashes of filenames.
444 Tarring a repo and copying it to another disk effectively randomized
444 Tarring a repo and copying it to another disk effectively randomized
445 the revlog ordering on disk by sorting the revlogs by hash and suddenly
445 the revlog ordering on disk by sorting the revlogs by hash and suddenly
446 performance of my kernel checkout benchmark dropped by ~10x because the
446 performance of my kernel checkout benchmark dropped by ~10x because the
447 "working set" of sectors visited no longer fit in the drive's cache and
447 "working set" of sectors visited no longer fit in the drive's cache and
448 the workload switched from streaming to random I/O.
448 the workload switched from streaming to random I/O.
449
449
450 What we should really be doing is have workers read filenames from a
450 What we should really be doing is have workers read filenames from a
451 ordered queue. This preserves locality and also keeps any worker from
451 ordered queue. This preserves locality and also keeps any worker from
452 getting more than one file out of balance.
452 getting more than one file out of balance.
453 '''
453 '''
454 for i in range(nslices):
454 for i in range(nslices):
455 yield lst[i::nslices]
455 yield lst[i::nslices]
@@ -1,165 +1,166 b''
1 Test UI worker interaction
1 Test UI worker interaction
2
2
3 $ cat > t.py <<EOF
3 $ cat > t.py <<EOF
4 > from __future__ import absolute_import, print_function
4 > from __future__ import absolute_import, print_function
5 > import sys
5 > import sys
6 > import time
6 > import time
7 > from mercurial import (
7 > from mercurial import (
8 > error,
8 > error,
9 > registrar,
9 > registrar,
10 > ui as uimod,
10 > ui as uimod,
11 > worker,
11 > worker,
12 > )
12 > )
13 > sys.unraisablehook = lambda x: None
13 > sys.unraisablehook = lambda x: None
14 > def abort(ui, args):
14 > def abort(ui, args):
15 > if args[0] == 0:
15 > if args[0] == 0:
16 > # by first worker for test stability
16 > # by first worker for test stability
17 > raise error.Abort(b'known exception')
17 > raise error.Abort(b'known exception')
18 > return runme(ui, [])
18 > return runme(ui, [])
19 > def exc(ui, args):
19 > def exc(ui, args):
20 > if args[0] == 0:
20 > if args[0] == 0:
21 > # by first worker for test stability
21 > # by first worker for test stability
22 > raise Exception('unknown exception')
22 > raise Exception('unknown exception')
23 > return runme(ui, [])
23 > return runme(ui, [])
24 > def runme(ui, args):
24 > def runme(ui, args):
25 > for arg in args:
25 > for arg in args:
26 > ui.status(b'run\n')
26 > ui.status(b'run\n')
27 > yield 1, arg
27 > yield 1, arg
28 > time.sleep(0.1) # easier to trigger killworkers code path
28 > time.sleep(0.1) # easier to trigger killworkers code path
29 > functable = {
29 > functable = {
30 > b'abort': abort,
30 > b'abort': abort,
31 > b'exc': exc,
31 > b'exc': exc,
32 > b'runme': runme,
32 > b'runme': runme,
33 > }
33 > }
34 > cmdtable = {}
34 > cmdtable = {}
35 > command = registrar.command(cmdtable)
35 > command = registrar.command(cmdtable)
36 > @command(b'test', [], b'hg test [COST] [FUNC]')
36 > @command(b'test', [], b'hg test [COST] [FUNC]')
37 > def t(ui, repo, cost=1.0, func=b'runme'):
37 > def t(ui, repo, cost=1.0, func=b'runme'):
38 > cost = float(cost)
38 > cost = float(cost)
39 > func = functable[func]
39 > func = functable[func]
40 > ui.status(b'start\n')
40 > ui.status(b'start\n')
41 > runs = worker.worker(ui, cost, func, (ui,), range(8))
41 > runs = worker.worker(ui, cost, func, (ui,), range(8))
42 > for n, i in runs:
42 > for n, i in runs:
43 > pass
43 > pass
44 > ui.status(b'done\n')
44 > ui.status(b'done\n')
45 > EOF
45 > EOF
46 $ abspath=`pwd`/t.py
46 $ abspath=`pwd`/t.py
47 $ hg init
47 $ hg init
48
48
49 Run tests with worker enable by forcing a heigh cost
49 Run tests with worker enable by forcing a heigh cost
50
50
51 $ hg --config "extensions.t=$abspath" test 100000.0
51 $ hg --config "extensions.t=$abspath" test 100000.0
52 start
52 start
53 run
53 run
54 run
54 run
55 run
55 run
56 run
56 run
57 run
57 run
58 run
58 run
59 run
59 run
60 run
60 run
61 done
61 done
62
62
63 Run tests without worker by forcing a low cost
63 Run tests without worker by forcing a low cost
64
64
65 $ hg --config "extensions.t=$abspath" test 0.0000001
65 $ hg --config "extensions.t=$abspath" test 0.0000001
66 start
66 start
67 run
67 run
68 run
68 run
69 run
69 run
70 run
70 run
71 run
71 run
72 run
72 run
73 run
73 run
74 run
74 run
75 done
75 done
76
76
77 #if no-windows
77 #if no-windows
78
78
79 Known exception should be caught, but printed if --traceback is enabled
79 Known exception should be caught, but printed if --traceback is enabled
80
80
81 $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=8' \
81 $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=8' \
82 > test 100000.0 abort 2>&1
82 > test 100000.0 abort 2>&1
83 start
83 start
84 abort: known exception
84 abort: known exception
85 [255]
85 [255]
86
86
87 $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=8' \
87 $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=8' \
88 > test 100000.0 abort --traceback 2>&1 | egrep '(SystemExit|Abort)'
88 > test 100000.0 abort --traceback 2>&1 | egrep '(WorkerError|Abort)'
89 raise error.Abort(b'known exception')
89 raise error.Abort(b'known exception')
90 mercurial.error.Abort: known exception (py3 !)
90 mercurial.error.Abort: known exception (py3 !)
91 Abort: known exception (no-py3 !)
91 Abort: known exception (no-py3 !)
92 SystemExit: 255
92 raise error.WorkerError(status)
93 mercurial.error.WorkerError: 255
93
94
94 Traceback must be printed for unknown exceptions
95 Traceback must be printed for unknown exceptions
95
96
96 $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=8' \
97 $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=8' \
97 > test 100000.0 exc 2>&1 | grep '^Exception'
98 > test 100000.0 exc 2>&1 | grep '^Exception'
98 Exception: unknown exception
99 Exception: unknown exception
99
100
100 Workers should not do cleanups in all cases
101 Workers should not do cleanups in all cases
101
102
102 $ cat > $TESTTMP/detectcleanup.py <<EOF
103 $ cat > $TESTTMP/detectcleanup.py <<EOF
103 > from __future__ import absolute_import
104 > from __future__ import absolute_import
104 > import atexit
105 > import atexit
105 > import os
106 > import os
106 > import sys
107 > import sys
107 > import time
108 > import time
108 > sys.unraisablehook = lambda x: None
109 > sys.unraisablehook = lambda x: None
109 > oldfork = os.fork
110 > oldfork = os.fork
110 > count = 0
111 > count = 0
111 > parentpid = os.getpid()
112 > parentpid = os.getpid()
112 > def delayedfork():
113 > def delayedfork():
113 > global count
114 > global count
114 > count += 1
115 > count += 1
115 > pid = oldfork()
116 > pid = oldfork()
116 > # make it easier to test SIGTERM hitting other workers when they have
117 > # make it easier to test SIGTERM hitting other workers when they have
117 > # not set up error handling yet.
118 > # not set up error handling yet.
118 > if count > 1 and pid == 0:
119 > if count > 1 and pid == 0:
119 > time.sleep(0.1)
120 > time.sleep(0.1)
120 > return pid
121 > return pid
121 > os.fork = delayedfork
122 > os.fork = delayedfork
122 > def cleanup():
123 > def cleanup():
123 > if os.getpid() != parentpid:
124 > if os.getpid() != parentpid:
124 > os.write(1, 'should never happen\n')
125 > os.write(1, 'should never happen\n')
125 > atexit.register(cleanup)
126 > atexit.register(cleanup)
126 > EOF
127 > EOF
127
128
128 $ hg --config "extensions.t=$abspath" --config worker.numcpus=8 --config \
129 $ hg --config "extensions.t=$abspath" --config worker.numcpus=8 --config \
129 > "extensions.d=$TESTTMP/detectcleanup.py" test 100000 abort
130 > "extensions.d=$TESTTMP/detectcleanup.py" test 100000 abort
130 start
131 start
131 abort: known exception
132 abort: known exception
132 [255]
133 [255]
133
134
134 Do not crash on partially read result
135 Do not crash on partially read result
135
136
136 $ cat > $TESTTMP/detecttruncated.py <<EOF
137 $ cat > $TESTTMP/detecttruncated.py <<EOF
137 > from __future__ import absolute_import
138 > from __future__ import absolute_import
138 > import os
139 > import os
139 > import sys
140 > import sys
140 > import time
141 > import time
141 > sys.unraisablehook = lambda x: None
142 > sys.unraisablehook = lambda x: None
142 > oldwrite = os.write
143 > oldwrite = os.write
143 > def splitwrite(fd, string):
144 > def splitwrite(fd, string):
144 > ret = oldwrite(fd, string[:9])
145 > ret = oldwrite(fd, string[:9])
145 > if ret == 9:
146 > if ret == 9:
146 > time.sleep(0.1)
147 > time.sleep(0.1)
147 > ret += oldwrite(fd, string[9:])
148 > ret += oldwrite(fd, string[9:])
148 > return ret
149 > return ret
149 > os.write = splitwrite
150 > os.write = splitwrite
150 > EOF
151 > EOF
151
152
152 $ hg --config "extensions.t=$abspath" --config worker.numcpus=8 --config \
153 $ hg --config "extensions.t=$abspath" --config worker.numcpus=8 --config \
153 > "extensions.d=$TESTTMP/detecttruncated.py" test 100000.0
154 > "extensions.d=$TESTTMP/detecttruncated.py" test 100000.0
154 start
155 start
155 run
156 run
156 run
157 run
157 run
158 run
158 run
159 run
159 run
160 run
160 run
161 run
161 run
162 run
162 run
163 run
163 done
164 done
164
165
165 #endif
166 #endif
General Comments 0
You need to be logged in to leave comments. Login now