##// END OF EJS Templates
worker: raise exception instead of calling sys.exit() with child's code...
Martin von Zweigbergk -
r46429:8f07f5a9 default
parent child Browse files
Show More
@@ -1,491 +1,498 b''
1 1 # error.py - Mercurial exceptions
2 2 #
3 3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Mercurial exceptions.
9 9
10 10 This allows us to catch exceptions at higher levels without forcing
11 11 imports.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 # Do not import anything but pycompat here, please
17 17 from . import pycompat
18 18
19 19
20 20 def _tobytes(exc):
21 21 """Byte-stringify exception in the same way as BaseException_str()"""
22 22 if not exc.args:
23 23 return b''
24 24 if len(exc.args) == 1:
25 25 return pycompat.bytestr(exc.args[0])
26 26 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
27 27
28 28
29 29 class Hint(object):
30 30 """Mix-in to provide a hint of an error
31 31
32 32 This should come first in the inheritance list to consume a hint and
33 33 pass remaining arguments to the exception class.
34 34 """
35 35
36 36 def __init__(self, *args, **kw):
37 37 self.hint = kw.pop('hint', None)
38 38 super(Hint, self).__init__(*args, **kw)
39 39
40 40
41 41 class StorageError(Hint, Exception):
42 42 """Raised when an error occurs in a storage layer.
43 43
44 44 Usually subclassed by a storage-specific exception.
45 45 """
46 46
47 47 __bytes__ = _tobytes
48 48
49 49
50 50 class RevlogError(StorageError):
51 51 pass
52 52
53 53
54 54 class SidedataHashError(RevlogError):
55 55 def __init__(self, key, expected, got):
56 56 self.sidedatakey = key
57 57 self.expecteddigest = expected
58 58 self.actualdigest = got
59 59
60 60
61 61 class FilteredIndexError(IndexError):
62 62 __bytes__ = _tobytes
63 63
64 64
65 65 class LookupError(RevlogError, KeyError):
66 66 def __init__(self, name, index, message):
67 67 self.name = name
68 68 self.index = index
69 69 # this can't be called 'message' because at least some installs of
70 70 # Python 2.6+ complain about the 'message' property being deprecated
71 71 self.lookupmessage = message
72 72 if isinstance(name, bytes) and len(name) == 20:
73 73 from .node import short
74 74
75 75 name = short(name)
76 76 # if name is a binary node, it can be None
77 77 RevlogError.__init__(
78 78 self, b'%s@%s: %s' % (index, pycompat.bytestr(name), message)
79 79 )
80 80
81 81 def __bytes__(self):
82 82 return RevlogError.__bytes__(self)
83 83
84 84 def __str__(self):
85 85 return RevlogError.__str__(self)
86 86
87 87
88 88 class AmbiguousPrefixLookupError(LookupError):
89 89 pass
90 90
91 91
92 92 class FilteredLookupError(LookupError):
93 93 pass
94 94
95 95
96 96 class ManifestLookupError(LookupError):
97 97 pass
98 98
99 99
100 100 class CommandError(Exception):
101 101 """Exception raised on errors in parsing the command line."""
102 102
103 103 def __init__(self, command, message):
104 104 self.command = command
105 105 self.message = message
106 106 super(CommandError, self).__init__()
107 107
108 108 __bytes__ = _tobytes
109 109
110 110
111 111 class UnknownCommand(Exception):
112 112 """Exception raised if command is not in the command table."""
113 113
114 114 def __init__(self, command, all_commands=None):
115 115 self.command = command
116 116 self.all_commands = all_commands
117 117 super(UnknownCommand, self).__init__()
118 118
119 119 __bytes__ = _tobytes
120 120
121 121
122 122 class AmbiguousCommand(Exception):
123 123 """Exception raised if command shortcut matches more than one command."""
124 124
125 125 def __init__(self, prefix, matches):
126 126 self.prefix = prefix
127 127 self.matches = matches
128 128 super(AmbiguousCommand, self).__init__()
129 129
130 130 __bytes__ = _tobytes
131 131
132 132
133 class WorkerError(Exception):
134 """Exception raised when a worker process dies."""
135
136 def __init__(self, status_code):
137 self.status_code = status_code
138
139
133 140 class InterventionRequired(Hint, Exception):
134 141 """Exception raised when a command requires human intervention."""
135 142
136 143 __bytes__ = _tobytes
137 144
138 145
139 146 class ConflictResolutionRequired(InterventionRequired):
140 147 """Exception raised when a continuable command required merge conflict resolution."""
141 148
142 149 def __init__(self, opname):
143 150 from .i18n import _
144 151
145 152 self.opname = opname
146 153 InterventionRequired.__init__(
147 154 self,
148 155 _(
149 156 b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
150 157 )
151 158 % opname,
152 159 )
153 160
154 161
155 162 class Abort(Hint, Exception):
156 163 """Raised if a command needs to print an error and exit."""
157 164
158 165 def __init__(self, message, hint=None):
159 166 self.message = message
160 167 self.hint = hint
161 168 # Pass the message into the Exception constructor to help extensions
162 169 # that look for exc.args[0].
163 170 Exception.__init__(self, message)
164 171
165 172 def __bytes__(self):
166 173 return self.message
167 174
168 175 if pycompat.ispy3:
169 176
170 177 def __str__(self):
171 178 # the output would be unreadable if the message was translated,
172 179 # but do not replace it with encoding.strfromlocal(), which
173 180 # may raise another exception.
174 181 return pycompat.sysstr(self.__bytes__())
175 182
176 183
177 184 class HookLoadError(Abort):
178 185 """raised when loading a hook fails, aborting an operation
179 186
180 187 Exists to allow more specialized catching."""
181 188
182 189
183 190 class HookAbort(Abort):
184 191 """raised when a validation hook fails, aborting an operation
185 192
186 193 Exists to allow more specialized catching."""
187 194
188 195
189 196 class ConfigError(Abort):
190 197 """Exception raised when parsing config files"""
191 198
192 199
193 200 class UpdateAbort(Abort):
194 201 """Raised when an update is aborted for destination issue"""
195 202
196 203
197 204 class MergeDestAbort(Abort):
198 205 """Raised when an update is aborted for destination issues"""
199 206
200 207
201 208 class NoMergeDestAbort(MergeDestAbort):
202 209 """Raised when an update is aborted because there is nothing to merge"""
203 210
204 211
205 212 class ManyMergeDestAbort(MergeDestAbort):
206 213 """Raised when an update is aborted because destination is ambiguous"""
207 214
208 215
209 216 class ResponseExpected(Abort):
210 217 """Raised when an EOF is received for a prompt"""
211 218
212 219 def __init__(self):
213 220 from .i18n import _
214 221
215 222 Abort.__init__(self, _(b'response expected'))
216 223
217 224
218 225 class OutOfBandError(Hint, Exception):
219 226 """Exception raised when a remote repo reports failure"""
220 227
221 228 __bytes__ = _tobytes
222 229
223 230
224 231 class ParseError(Hint, Exception):
225 232 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
226 233
227 234 def __init__(self, message, location=None, hint=None):
228 235 self.message = message
229 236 self.location = location
230 237 self.hint = hint
231 238 # Pass the message and possibly location into the Exception constructor
232 239 # to help code that looks for exc.args.
233 240 if location is not None:
234 241 Exception.__init__(self, message, location)
235 242 else:
236 243 Exception.__init__(self, message)
237 244
238 245 __bytes__ = _tobytes
239 246
240 247
241 248 class PatchError(Exception):
242 249 __bytes__ = _tobytes
243 250
244 251
245 252 class UnknownIdentifier(ParseError):
246 253 """Exception raised when a {rev,file}set references an unknown identifier"""
247 254
248 255 def __init__(self, function, symbols):
249 256 from .i18n import _
250 257
251 258 ParseError.__init__(self, _(b"unknown identifier: %s") % function)
252 259 self.function = function
253 260 self.symbols = symbols
254 261
255 262
256 263 class RepoError(Hint, Exception):
257 264 __bytes__ = _tobytes
258 265
259 266
260 267 class RepoLookupError(RepoError):
261 268 pass
262 269
263 270
264 271 class FilteredRepoLookupError(RepoLookupError):
265 272 pass
266 273
267 274
268 275 class CapabilityError(RepoError):
269 276 pass
270 277
271 278
272 279 class RequirementError(RepoError):
273 280 """Exception raised if .hg/requires has an unknown entry."""
274 281
275 282
276 283 class StdioError(IOError):
277 284 """Raised if I/O to stdout or stderr fails"""
278 285
279 286 def __init__(self, err):
280 287 IOError.__init__(self, err.errno, err.strerror)
281 288
282 289 # no __bytes__() because error message is derived from the standard IOError
283 290
284 291
285 292 class UnsupportedMergeRecords(Abort):
286 293 def __init__(self, recordtypes):
287 294 from .i18n import _
288 295
289 296 self.recordtypes = sorted(recordtypes)
290 297 s = b' '.join(self.recordtypes)
291 298 Abort.__init__(
292 299 self,
293 300 _(b'unsupported merge state records: %s') % s,
294 301 hint=_(
295 302 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
296 303 b'more information'
297 304 ),
298 305 )
299 306
300 307
301 308 class UnknownVersion(Abort):
302 309 """generic exception for aborting from an encounter with an unknown version
303 310 """
304 311
305 312 def __init__(self, msg, hint=None, version=None):
306 313 self.version = version
307 314 super(UnknownVersion, self).__init__(msg, hint=hint)
308 315
309 316
310 317 class LockError(IOError):
311 318 def __init__(self, errno, strerror, filename, desc):
312 319 IOError.__init__(self, errno, strerror, filename)
313 320 self.desc = desc
314 321
315 322 # no __bytes__() because error message is derived from the standard IOError
316 323
317 324
318 325 class LockHeld(LockError):
319 326 def __init__(self, errno, filename, desc, locker):
320 327 LockError.__init__(self, errno, b'Lock held', filename, desc)
321 328 self.locker = locker
322 329
323 330
324 331 class LockUnavailable(LockError):
325 332 pass
326 333
327 334
328 335 # LockError is for errors while acquiring the lock -- this is unrelated
329 336 class LockInheritanceContractViolation(RuntimeError):
330 337 __bytes__ = _tobytes
331 338
332 339
333 340 class ResponseError(Exception):
334 341 """Raised to print an error with part of output and exit."""
335 342
336 343 __bytes__ = _tobytes
337 344
338 345
339 346 # derived from KeyboardInterrupt to simplify some breakout code
340 347 class SignalInterrupt(KeyboardInterrupt):
341 348 """Exception raised on SIGTERM and SIGHUP."""
342 349
343 350
344 351 class SignatureError(Exception):
345 352 __bytes__ = _tobytes
346 353
347 354
348 355 class PushRaced(RuntimeError):
349 356 """An exception raised during unbundling that indicate a push race"""
350 357
351 358 __bytes__ = _tobytes
352 359
353 360
354 361 class ProgrammingError(Hint, RuntimeError):
355 362 """Raised if a mercurial (core or extension) developer made a mistake"""
356 363
357 364 def __init__(self, msg, *args, **kwargs):
358 365 # On Python 3, turn the message back into a string since this is
359 366 # an internal-only error that won't be printed except in a
360 367 # stack traces.
361 368 msg = pycompat.sysstr(msg)
362 369 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
363 370
364 371 __bytes__ = _tobytes
365 372
366 373
367 374 class WdirUnsupported(Exception):
368 375 """An exception which is raised when 'wdir()' is not supported"""
369 376
370 377 __bytes__ = _tobytes
371 378
372 379
373 380 # bundle2 related errors
374 381 class BundleValueError(ValueError):
375 382 """error raised when bundle2 cannot be processed"""
376 383
377 384 __bytes__ = _tobytes
378 385
379 386
380 387 class BundleUnknownFeatureError(BundleValueError):
381 388 def __init__(self, parttype=None, params=(), values=()):
382 389 self.parttype = parttype
383 390 self.params = params
384 391 self.values = values
385 392 if self.parttype is None:
386 393 msg = b'Stream Parameter'
387 394 else:
388 395 msg = parttype
389 396 entries = self.params
390 397 if self.params and self.values:
391 398 assert len(self.params) == len(self.values)
392 399 entries = []
393 400 for idx, par in enumerate(self.params):
394 401 val = self.values[idx]
395 402 if val is None:
396 403 entries.append(val)
397 404 else:
398 405 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
399 406 if entries:
400 407 msg = b'%s - %s' % (msg, b', '.join(entries))
401 408 ValueError.__init__(self, msg)
402 409
403 410
404 411 class ReadOnlyPartError(RuntimeError):
405 412 """error raised when code tries to alter a part being generated"""
406 413
407 414 __bytes__ = _tobytes
408 415
409 416
410 417 class PushkeyFailed(Abort):
411 418 """error raised when a pushkey part failed to update a value"""
412 419
413 420 def __init__(
414 421 self, partid, namespace=None, key=None, new=None, old=None, ret=None
415 422 ):
416 423 self.partid = partid
417 424 self.namespace = namespace
418 425 self.key = key
419 426 self.new = new
420 427 self.old = old
421 428 self.ret = ret
422 429 # no i18n expected to be processed into a better message
423 430 Abort.__init__(
424 431 self, b'failed to update value for "%s/%s"' % (namespace, key)
425 432 )
426 433
427 434
428 435 class CensoredNodeError(StorageError):
429 436 """error raised when content verification fails on a censored node
430 437
431 438 Also contains the tombstone data substituted for the uncensored data.
432 439 """
433 440
434 441 def __init__(self, filename, node, tombstone):
435 442 from .node import short
436 443
437 444 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
438 445 self.tombstone = tombstone
439 446
440 447
441 448 class CensoredBaseError(StorageError):
442 449 """error raised when a delta is rejected because its base is censored
443 450
444 451 A delta based on a censored revision must be formed as single patch
445 452 operation which replaces the entire base with new content. This ensures
446 453 the delta may be applied by clones which have not censored the base.
447 454 """
448 455
449 456
450 457 class InvalidBundleSpecification(Exception):
451 458 """error raised when a bundle specification is invalid.
452 459
453 460 This is used for syntax errors as opposed to support errors.
454 461 """
455 462
456 463 __bytes__ = _tobytes
457 464
458 465
459 466 class UnsupportedBundleSpecification(Exception):
460 467 """error raised when a bundle specification is not supported."""
461 468
462 469 __bytes__ = _tobytes
463 470
464 471
465 472 class CorruptedState(Exception):
466 473 """error raised when a command is not able to read its state from file"""
467 474
468 475 __bytes__ = _tobytes
469 476
470 477
471 478 class PeerTransportError(Abort):
472 479 """Transport-level I/O error when communicating with a peer repo."""
473 480
474 481
475 482 class InMemoryMergeConflictsError(Exception):
476 483 """Exception raised when merge conflicts arose during an in-memory merge."""
477 484
478 485 __bytes__ = _tobytes
479 486
480 487
481 488 class WireprotoCommandError(Exception):
482 489 """Represents an error during execution of a wire protocol command.
483 490
484 491 Should only be thrown by wire protocol version 2 commands.
485 492
486 493 The error is a formatter string and an optional iterable of arguments.
487 494 """
488 495
489 496 def __init__(self, message, args=None):
490 497 self.message = message
491 498 self.messageargs = args
@@ -1,2287 +1,2290 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28 from .pycompat import getattr
29 29 from .thirdparty import attr
30 30 from . import (
31 31 copies as copiesmod,
32 32 encoding,
33 33 error,
34 34 match as matchmod,
35 35 obsolete,
36 36 obsutil,
37 37 pathutil,
38 38 phases,
39 39 policy,
40 40 pycompat,
41 41 requirements as requirementsmod,
42 42 revsetlang,
43 43 similar,
44 44 smartset,
45 45 url,
46 46 util,
47 47 vfs,
48 48 )
49 49
50 50 from .utils import (
51 51 hashutil,
52 52 procutil,
53 53 stringutil,
54 54 )
55 55
56 56 if pycompat.iswindows:
57 57 from . import scmwindows as scmplatform
58 58 else:
59 59 from . import scmposix as scmplatform
60 60
61 61 parsers = policy.importmod('parsers')
62 62 rustrevlog = policy.importrust('revlog')
63 63
64 64 termsize = scmplatform.termsize
65 65
66 66
67 67 @attr.s(slots=True, repr=False)
68 68 class status(object):
69 69 '''Struct with a list of files per status.
70 70
71 71 The 'deleted', 'unknown' and 'ignored' properties are only
72 72 relevant to the working copy.
73 73 '''
74 74
75 75 modified = attr.ib(default=attr.Factory(list))
76 76 added = attr.ib(default=attr.Factory(list))
77 77 removed = attr.ib(default=attr.Factory(list))
78 78 deleted = attr.ib(default=attr.Factory(list))
79 79 unknown = attr.ib(default=attr.Factory(list))
80 80 ignored = attr.ib(default=attr.Factory(list))
81 81 clean = attr.ib(default=attr.Factory(list))
82 82
83 83 def __iter__(self):
84 84 yield self.modified
85 85 yield self.added
86 86 yield self.removed
87 87 yield self.deleted
88 88 yield self.unknown
89 89 yield self.ignored
90 90 yield self.clean
91 91
92 92 def __repr__(self):
93 93 return (
94 94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
95 95 r'unknown=%s, ignored=%s, clean=%s>'
96 96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
97 97
98 98
99 99 def itersubrepos(ctx1, ctx2):
100 100 """find subrepos in ctx1 or ctx2"""
101 101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106 106
107 107 missing = set()
108 108
109 109 for subpath in ctx2.substate:
110 110 if subpath not in ctx1.substate:
111 111 del subpaths[subpath]
112 112 missing.add(subpath)
113 113
114 114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
115 115 yield subpath, ctx.sub(subpath)
116 116
117 117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 118 # status and diff will have an accurate result when it does
119 119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 120 # against itself.
121 121 for subpath in missing:
122 122 yield subpath, ctx2.nullsub(subpath, ctx1)
123 123
124 124
125 125 def nochangesfound(ui, repo, excluded=None):
126 126 '''Report no changes for push/pull, excluded is None or a list of
127 127 nodes excluded from the push/pull.
128 128 '''
129 129 secretlist = []
130 130 if excluded:
131 131 for n in excluded:
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(
138 138 _(b"no changes found (ignored %d secret changesets)\n")
139 139 % len(secretlist)
140 140 )
141 141 else:
142 142 ui.status(_(b"no changes found\n"))
143 143
144 144
145 145 def callcatch(ui, func):
146 146 """call func() with global exception handling
147 147
148 148 return func() if no exception happens. otherwise do some error handling
149 149 and return an exit code accordingly. does not handle all exceptions.
150 150 """
151 151 try:
152 152 try:
153 153 return func()
154 154 except: # re-raises
155 155 ui.traceback()
156 156 raise
157 157 # Global exception handling, alphabetically
158 158 # Mercurial-specific first, followed by built-in and library exceptions
159 159 except error.LockHeld as inst:
160 160 if inst.errno == errno.ETIMEDOUT:
161 161 reason = _(b'timed out waiting for lock held by %r') % (
162 162 pycompat.bytestr(inst.locker)
163 163 )
164 164 else:
165 165 reason = _(b'lock held by %r') % inst.locker
166 166 ui.error(
167 167 _(b"abort: %s: %s\n")
168 168 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
169 169 )
170 170 if not inst.locker:
171 171 ui.error(_(b"(lock might be very busy)\n"))
172 172 except error.LockUnavailable as inst:
173 173 ui.error(
174 174 _(b"abort: could not lock %s: %s\n")
175 175 % (
176 176 inst.desc or stringutil.forcebytestr(inst.filename),
177 177 encoding.strtolocal(inst.strerror),
178 178 )
179 179 )
180 180 except error.OutOfBandError as inst:
181 181 if inst.args:
182 182 msg = _(b"abort: remote error:\n")
183 183 else:
184 184 msg = _(b"abort: remote error\n")
185 185 ui.error(msg)
186 186 if inst.args:
187 187 ui.error(b''.join(inst.args))
188 188 if inst.hint:
189 189 ui.error(b'(%s)\n' % inst.hint)
190 190 except error.RepoError as inst:
191 191 ui.error(_(b"abort: %s!\n") % inst)
192 192 if inst.hint:
193 193 ui.error(_(b"(%s)\n") % inst.hint)
194 194 except error.ResponseError as inst:
195 195 ui.error(_(b"abort: %s") % inst.args[0])
196 196 msg = inst.args[1]
197 197 if isinstance(msg, type(u'')):
198 198 msg = pycompat.sysbytes(msg)
199 199 if not isinstance(msg, bytes):
200 200 ui.error(b" %r\n" % (msg,))
201 201 elif not msg:
202 202 ui.error(_(b" empty string\n"))
203 203 else:
204 204 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
205 205 except error.CensoredNodeError as inst:
206 206 ui.error(_(b"abort: file censored %s!\n") % inst)
207 207 except error.StorageError as inst:
208 208 ui.error(_(b"abort: %s!\n") % inst)
209 209 if inst.hint:
210 210 ui.error(_(b"(%s)\n") % inst.hint)
211 211 except error.InterventionRequired as inst:
212 212 ui.error(b"%s\n" % inst)
213 213 if inst.hint:
214 214 ui.error(_(b"(%s)\n") % inst.hint)
215 215 return 1
216 216 except error.WdirUnsupported:
217 217 ui.error(_(b"abort: working directory revision cannot be specified\n"))
218 218 except error.Abort as inst:
219 219 ui.error(_(b"abort: %s\n") % inst.message)
220 220 if inst.hint:
221 221 ui.error(_(b"(%s)\n") % inst.hint)
222 except error.WorkerError as inst:
223 # Don't print a message -- the worker already should have
224 return inst.status_code
222 225 except ImportError as inst:
223 226 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
224 227 m = stringutil.forcebytestr(inst).split()[-1]
225 228 if m in b"mpatch bdiff".split():
226 229 ui.error(_(b"(did you forget to compile extensions?)\n"))
227 230 elif m in b"zlib".split():
228 231 ui.error(_(b"(is your Python install correct?)\n"))
229 232 except (IOError, OSError) as inst:
230 233 if util.safehasattr(inst, b"code"): # HTTPError
231 234 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
232 235 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
233 236 try: # usually it is in the form (errno, strerror)
234 237 reason = inst.reason.args[1]
235 238 except (AttributeError, IndexError):
236 239 # it might be anything, for example a string
237 240 reason = inst.reason
238 241 if isinstance(reason, pycompat.unicode):
239 242 # SSLError of Python 2.7.9 contains a unicode
240 243 reason = encoding.unitolocal(reason)
241 244 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
242 245 elif (
243 246 util.safehasattr(inst, b"args")
244 247 and inst.args
245 248 and inst.args[0] == errno.EPIPE
246 249 ):
247 250 pass
248 251 elif getattr(inst, "strerror", None): # common IOError or OSError
249 252 if getattr(inst, "filename", None) is not None:
250 253 ui.error(
251 254 _(b"abort: %s: '%s'\n")
252 255 % (
253 256 encoding.strtolocal(inst.strerror),
254 257 stringutil.forcebytestr(inst.filename),
255 258 )
256 259 )
257 260 else:
258 261 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
259 262 else: # suspicious IOError
260 263 raise
261 264 except MemoryError:
262 265 ui.error(_(b"abort: out of memory\n"))
263 266 except SystemExit as inst:
264 267 # Commands shouldn't sys.exit directly, but give a return code.
265 268 # Just in case catch this and and pass exit code to caller.
266 269 return inst.code
267 270
268 271 return -1
269 272
270 273
271 274 def checknewlabel(repo, lbl, kind):
272 275 # Do not use the "kind" parameter in ui output.
273 276 # It makes strings difficult to translate.
274 277 if lbl in [b'tip', b'.', b'null']:
275 278 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
276 279 for c in (b':', b'\0', b'\n', b'\r'):
277 280 if c in lbl:
278 281 raise error.Abort(
279 282 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
280 283 )
281 284 try:
282 285 int(lbl)
283 286 raise error.Abort(_(b"cannot use an integer as a name"))
284 287 except ValueError:
285 288 pass
286 289 if lbl.strip() != lbl:
287 290 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
288 291
289 292
290 293 def checkfilename(f):
291 294 '''Check that the filename f is an acceptable filename for a tracked file'''
292 295 if b'\r' in f or b'\n' in f:
293 296 raise error.Abort(
294 297 _(b"'\\n' and '\\r' disallowed in filenames: %r")
295 298 % pycompat.bytestr(f)
296 299 )
297 300
298 301
299 302 def checkportable(ui, f):
300 303 '''Check if filename f is portable and warn or abort depending on config'''
301 304 checkfilename(f)
302 305 abort, warn = checkportabilityalert(ui)
303 306 if abort or warn:
304 307 msg = util.checkwinfilename(f)
305 308 if msg:
306 309 msg = b"%s: %s" % (msg, procutil.shellquote(f))
307 310 if abort:
308 311 raise error.Abort(msg)
309 312 ui.warn(_(b"warning: %s\n") % msg)
310 313
311 314
312 315 def checkportabilityalert(ui):
313 316 '''check if the user's config requests nothing, a warning, or abort for
314 317 non-portable filenames'''
315 318 val = ui.config(b'ui', b'portablefilenames')
316 319 lval = val.lower()
317 320 bval = stringutil.parsebool(val)
318 321 abort = pycompat.iswindows or lval == b'abort'
319 322 warn = bval or lval == b'warn'
320 323 if bval is None and not (warn or abort or lval == b'ignore'):
321 324 raise error.ConfigError(
322 325 _(b"ui.portablefilenames value is invalid ('%s')") % val
323 326 )
324 327 return abort, warn
325 328
326 329
327 330 class casecollisionauditor(object):
328 331 def __init__(self, ui, abort, dirstate):
329 332 self._ui = ui
330 333 self._abort = abort
331 334 allfiles = b'\0'.join(dirstate)
332 335 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
333 336 self._dirstate = dirstate
334 337 # The purpose of _newfiles is so that we don't complain about
335 338 # case collisions if someone were to call this object with the
336 339 # same filename twice.
337 340 self._newfiles = set()
338 341
339 342 def __call__(self, f):
340 343 if f in self._newfiles:
341 344 return
342 345 fl = encoding.lower(f)
343 346 if fl in self._loweredfiles and f not in self._dirstate:
344 347 msg = _(b'possible case-folding collision for %s') % f
345 348 if self._abort:
346 349 raise error.Abort(msg)
347 350 self._ui.warn(_(b"warning: %s\n") % msg)
348 351 self._loweredfiles.add(fl)
349 352 self._newfiles.add(f)
350 353
351 354
352 355 def filteredhash(repo, maxrev):
353 356 """build hash of filtered revisions in the current repoview.
354 357
355 358 Multiple caches perform up-to-date validation by checking that the
356 359 tiprev and tipnode stored in the cache file match the current repository.
357 360 However, this is not sufficient for validating repoviews because the set
358 361 of revisions in the view may change without the repository tiprev and
359 362 tipnode changing.
360 363
361 364 This function hashes all the revs filtered from the view and returns
362 365 that SHA-1 digest.
363 366 """
364 367 cl = repo.changelog
365 368 if not cl.filteredrevs:
366 369 return None
367 370 key = cl._filteredrevs_hashcache.get(maxrev)
368 371 if not key:
369 372 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
370 373 if revs:
371 374 s = hashutil.sha1()
372 375 for rev in revs:
373 376 s.update(b'%d;' % rev)
374 377 key = s.digest()
375 378 cl._filteredrevs_hashcache[maxrev] = key
376 379 return key
377 380
378 381
379 382 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
380 383 '''yield every hg repository under path, always recursively.
381 384 The recurse flag will only control recursion into repo working dirs'''
382 385
383 386 def errhandler(err):
384 387 if err.filename == path:
385 388 raise err
386 389
387 390 samestat = getattr(os.path, 'samestat', None)
388 391 if followsym and samestat is not None:
389 392
390 393 def adddir(dirlst, dirname):
391 394 dirstat = os.stat(dirname)
392 395 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
393 396 if not match:
394 397 dirlst.append(dirstat)
395 398 return not match
396 399
397 400 else:
398 401 followsym = False
399 402
400 403 if (seen_dirs is None) and followsym:
401 404 seen_dirs = []
402 405 adddir(seen_dirs, path)
403 406 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
404 407 dirs.sort()
405 408 if b'.hg' in dirs:
406 409 yield root # found a repository
407 410 qroot = os.path.join(root, b'.hg', b'patches')
408 411 if os.path.isdir(os.path.join(qroot, b'.hg')):
409 412 yield qroot # we have a patch queue repo here
410 413 if recurse:
411 414 # avoid recursing inside the .hg directory
412 415 dirs.remove(b'.hg')
413 416 else:
414 417 dirs[:] = [] # don't descend further
415 418 elif followsym:
416 419 newdirs = []
417 420 for d in dirs:
418 421 fname = os.path.join(root, d)
419 422 if adddir(seen_dirs, fname):
420 423 if os.path.islink(fname):
421 424 for hgname in walkrepos(fname, True, seen_dirs):
422 425 yield hgname
423 426 else:
424 427 newdirs.append(d)
425 428 dirs[:] = newdirs
426 429
427 430
428 431 def binnode(ctx):
429 432 """Return binary node id for a given basectx"""
430 433 node = ctx.node()
431 434 if node is None:
432 435 return wdirid
433 436 return node
434 437
435 438
436 439 def intrev(ctx):
437 440 """Return integer for a given basectx that can be used in comparison or
438 441 arithmetic operation"""
439 442 rev = ctx.rev()
440 443 if rev is None:
441 444 return wdirrev
442 445 return rev
443 446
444 447
445 448 def formatchangeid(ctx):
446 449 """Format changectx as '{rev}:{node|formatnode}', which is the default
447 450 template provided by logcmdutil.changesettemplater"""
448 451 repo = ctx.repo()
449 452 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
450 453
451 454
452 455 def formatrevnode(ui, rev, node):
453 456 """Format given revision and node depending on the current verbosity"""
454 457 if ui.debugflag:
455 458 hexfunc = hex
456 459 else:
457 460 hexfunc = short
458 461 return b'%d:%s' % (rev, hexfunc(node))
459 462
460 463
461 464 def resolvehexnodeidprefix(repo, prefix):
462 465 if prefix.startswith(b'x'):
463 466 prefix = prefix[1:]
464 467 try:
465 468 # Uses unfiltered repo because it's faster when prefix is ambiguous/
466 469 # This matches the shortesthexnodeidprefix() function below.
467 470 node = repo.unfiltered().changelog._partialmatch(prefix)
468 471 except error.AmbiguousPrefixLookupError:
469 472 revset = repo.ui.config(
470 473 b'experimental', b'revisions.disambiguatewithin'
471 474 )
472 475 if revset:
473 476 # Clear config to avoid infinite recursion
474 477 configoverrides = {
475 478 (b'experimental', b'revisions.disambiguatewithin'): None
476 479 }
477 480 with repo.ui.configoverride(configoverrides):
478 481 revs = repo.anyrevs([revset], user=True)
479 482 matches = []
480 483 for rev in revs:
481 484 node = repo.changelog.node(rev)
482 485 if hex(node).startswith(prefix):
483 486 matches.append(node)
484 487 if len(matches) == 1:
485 488 return matches[0]
486 489 raise
487 490 if node is None:
488 491 return
489 492 repo.changelog.rev(node) # make sure node isn't filtered
490 493 return node
491 494
492 495
493 496 def mayberevnum(repo, prefix):
494 497 """Checks if the given prefix may be mistaken for a revision number"""
495 498 try:
496 499 i = int(prefix)
497 500 # if we are a pure int, then starting with zero will not be
498 501 # confused as a rev; or, obviously, if the int is larger
499 502 # than the value of the tip rev. We still need to disambiguate if
500 503 # prefix == '0', since that *is* a valid revnum.
501 504 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
502 505 return False
503 506 return True
504 507 except ValueError:
505 508 return False
506 509
507 510
508 511 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
509 512 """Find the shortest unambiguous prefix that matches hexnode.
510 513
511 514 If "cache" is not None, it must be a dictionary that can be used for
512 515 caching between calls to this method.
513 516 """
514 517 # _partialmatch() of filtered changelog could take O(len(repo)) time,
515 518 # which would be unacceptably slow. so we look for hash collision in
516 519 # unfiltered space, which means some hashes may be slightly longer.
517 520
518 521 minlength = max(minlength, 1)
519 522
520 523 def disambiguate(prefix):
521 524 """Disambiguate against revnums."""
522 525 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
523 526 if mayberevnum(repo, prefix):
524 527 return b'x' + prefix
525 528 else:
526 529 return prefix
527 530
528 531 hexnode = hex(node)
529 532 for length in range(len(prefix), len(hexnode) + 1):
530 533 prefix = hexnode[:length]
531 534 if not mayberevnum(repo, prefix):
532 535 return prefix
533 536
534 537 cl = repo.unfiltered().changelog
535 538 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
536 539 if revset:
537 540 revs = None
538 541 if cache is not None:
539 542 revs = cache.get(b'disambiguationrevset')
540 543 if revs is None:
541 544 revs = repo.anyrevs([revset], user=True)
542 545 if cache is not None:
543 546 cache[b'disambiguationrevset'] = revs
544 547 if cl.rev(node) in revs:
545 548 hexnode = hex(node)
546 549 nodetree = None
547 550 if cache is not None:
548 551 nodetree = cache.get(b'disambiguationnodetree')
549 552 if not nodetree:
550 553 if util.safehasattr(parsers, 'nodetree'):
551 554 # The CExt is the only implementation to provide a nodetree
552 555 # class so far.
553 556 index = cl.index
554 557 if util.safehasattr(index, 'get_cindex'):
555 558 # the rust wrapped need to give access to its internal index
556 559 index = index.get_cindex()
557 560 nodetree = parsers.nodetree(index, len(revs))
558 561 for r in revs:
559 562 nodetree.insert(r)
560 563 if cache is not None:
561 564 cache[b'disambiguationnodetree'] = nodetree
562 565 if nodetree is not None:
563 566 length = max(nodetree.shortest(node), minlength)
564 567 prefix = hexnode[:length]
565 568 return disambiguate(prefix)
566 569 for length in range(minlength, len(hexnode) + 1):
567 570 matches = []
568 571 prefix = hexnode[:length]
569 572 for rev in revs:
570 573 otherhexnode = repo[rev].hex()
571 574 if prefix == otherhexnode[:length]:
572 575 matches.append(otherhexnode)
573 576 if len(matches) == 1:
574 577 return disambiguate(prefix)
575 578
576 579 try:
577 580 return disambiguate(cl.shortest(node, minlength))
578 581 except error.LookupError:
579 582 raise error.RepoLookupError()
580 583
581 584
582 585 def isrevsymbol(repo, symbol):
583 586 """Checks if a symbol exists in the repo.
584 587
585 588 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
586 589 symbol is an ambiguous nodeid prefix.
587 590 """
588 591 try:
589 592 revsymbol(repo, symbol)
590 593 return True
591 594 except error.RepoLookupError:
592 595 return False
593 596
594 597
595 598 def revsymbol(repo, symbol):
596 599 """Returns a context given a single revision symbol (as string).
597 600
598 601 This is similar to revsingle(), but accepts only a single revision symbol,
599 602 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
600 603 not "max(public())".
601 604 """
602 605 if not isinstance(symbol, bytes):
603 606 msg = (
604 607 b"symbol (%s of type %s) was not a string, did you mean "
605 608 b"repo[symbol]?" % (symbol, type(symbol))
606 609 )
607 610 raise error.ProgrammingError(msg)
608 611 try:
609 612 if symbol in (b'.', b'tip', b'null'):
610 613 return repo[symbol]
611 614
612 615 try:
613 616 r = int(symbol)
614 617 if b'%d' % r != symbol:
615 618 raise ValueError
616 619 l = len(repo.changelog)
617 620 if r < 0:
618 621 r += l
619 622 if r < 0 or r >= l and r != wdirrev:
620 623 raise ValueError
621 624 return repo[r]
622 625 except error.FilteredIndexError:
623 626 raise
624 627 except (ValueError, OverflowError, IndexError):
625 628 pass
626 629
627 630 if len(symbol) == 40:
628 631 try:
629 632 node = bin(symbol)
630 633 rev = repo.changelog.rev(node)
631 634 return repo[rev]
632 635 except error.FilteredLookupError:
633 636 raise
634 637 except (TypeError, LookupError):
635 638 pass
636 639
637 640 # look up bookmarks through the name interface
638 641 try:
639 642 node = repo.names.singlenode(repo, symbol)
640 643 rev = repo.changelog.rev(node)
641 644 return repo[rev]
642 645 except KeyError:
643 646 pass
644 647
645 648 node = resolvehexnodeidprefix(repo, symbol)
646 649 if node is not None:
647 650 rev = repo.changelog.rev(node)
648 651 return repo[rev]
649 652
650 653 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
651 654
652 655 except error.WdirUnsupported:
653 656 return repo[None]
654 657 except (
655 658 error.FilteredIndexError,
656 659 error.FilteredLookupError,
657 660 error.FilteredRepoLookupError,
658 661 ):
659 662 raise _filterederror(repo, symbol)
660 663
661 664
662 665 def _filterederror(repo, changeid):
663 666 """build an exception to be raised about a filtered changeid
664 667
665 668 This is extracted in a function to help extensions (eg: evolve) to
666 669 experiment with various message variants."""
667 670 if repo.filtername.startswith(b'visible'):
668 671
669 672 # Check if the changeset is obsolete
670 673 unfilteredrepo = repo.unfiltered()
671 674 ctx = revsymbol(unfilteredrepo, changeid)
672 675
673 676 # If the changeset is obsolete, enrich the message with the reason
674 677 # that made this changeset not visible
675 678 if ctx.obsolete():
676 679 msg = obsutil._getfilteredreason(repo, changeid, ctx)
677 680 else:
678 681 msg = _(b"hidden revision '%s'") % changeid
679 682
680 683 hint = _(b'use --hidden to access hidden revisions')
681 684
682 685 return error.FilteredRepoLookupError(msg, hint=hint)
683 686 msg = _(b"filtered revision '%s' (not in '%s' subset)")
684 687 msg %= (changeid, repo.filtername)
685 688 return error.FilteredRepoLookupError(msg)
686 689
687 690
688 691 def revsingle(repo, revspec, default=b'.', localalias=None):
689 692 if not revspec and revspec != 0:
690 693 return repo[default]
691 694
692 695 l = revrange(repo, [revspec], localalias=localalias)
693 696 if not l:
694 697 raise error.Abort(_(b'empty revision set'))
695 698 return repo[l.last()]
696 699
697 700
698 701 def _pairspec(revspec):
699 702 tree = revsetlang.parse(revspec)
700 703 return tree and tree[0] in (
701 704 b'range',
702 705 b'rangepre',
703 706 b'rangepost',
704 707 b'rangeall',
705 708 )
706 709
707 710
708 711 def revpair(repo, revs):
709 712 if not revs:
710 713 return repo[b'.'], repo[None]
711 714
712 715 l = revrange(repo, revs)
713 716
714 717 if not l:
715 718 raise error.Abort(_(b'empty revision range'))
716 719
717 720 first = l.first()
718 721 second = l.last()
719 722
720 723 if (
721 724 first == second
722 725 and len(revs) >= 2
723 726 and not all(revrange(repo, [r]) for r in revs)
724 727 ):
725 728 raise error.Abort(_(b'empty revision on one side of range'))
726 729
727 730 # if top-level is range expression, the result must always be a pair
728 731 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
729 732 return repo[first], repo[None]
730 733
731 734 return repo[first], repo[second]
732 735
733 736
734 737 def revrange(repo, specs, localalias=None):
735 738 """Execute 1 to many revsets and return the union.
736 739
737 740 This is the preferred mechanism for executing revsets using user-specified
738 741 config options, such as revset aliases.
739 742
740 743 The revsets specified by ``specs`` will be executed via a chained ``OR``
741 744 expression. If ``specs`` is empty, an empty result is returned.
742 745
743 746 ``specs`` can contain integers, in which case they are assumed to be
744 747 revision numbers.
745 748
746 749 It is assumed the revsets are already formatted. If you have arguments
747 750 that need to be expanded in the revset, call ``revsetlang.formatspec()``
748 751 and pass the result as an element of ``specs``.
749 752
750 753 Specifying a single revset is allowed.
751 754
752 755 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
753 756 integer revisions.
754 757 """
755 758 allspecs = []
756 759 for spec in specs:
757 760 if isinstance(spec, int):
758 761 spec = revsetlang.formatspec(b'%d', spec)
759 762 allspecs.append(spec)
760 763 return repo.anyrevs(allspecs, user=True, localalias=localalias)
761 764
762 765
763 766 def increasingwindows(windowsize=8, sizelimit=512):
764 767 while True:
765 768 yield windowsize
766 769 if windowsize < sizelimit:
767 770 windowsize *= 2
768 771
769 772
770 773 def walkchangerevs(repo, revs, makefilematcher, prepare):
771 774 '''Iterate over files and the revs in a "windowed" way.
772 775
773 776 Callers most commonly need to iterate backwards over the history
774 777 in which they are interested. Doing so has awful (quadratic-looking)
775 778 performance, so we use iterators in a "windowed" way.
776 779
777 780 We walk a window of revisions in the desired order. Within the
778 781 window, we first walk forwards to gather data, then in the desired
779 782 order (usually backwards) to display it.
780 783
781 784 This function returns an iterator yielding contexts. Before
782 785 yielding each context, the iterator will first call the prepare
783 786 function on each context in the window in forward order.'''
784 787
785 788 if not revs:
786 789 return []
787 790 change = repo.__getitem__
788 791
789 792 def iterate():
790 793 it = iter(revs)
791 794 stopiteration = False
792 795 for windowsize in increasingwindows():
793 796 nrevs = []
794 797 for i in pycompat.xrange(windowsize):
795 798 rev = next(it, None)
796 799 if rev is None:
797 800 stopiteration = True
798 801 break
799 802 nrevs.append(rev)
800 803 for rev in sorted(nrevs):
801 804 ctx = change(rev)
802 805 prepare(ctx, makefilematcher(ctx))
803 806 for rev in nrevs:
804 807 yield change(rev)
805 808
806 809 if stopiteration:
807 810 break
808 811
809 812 return iterate()
810 813
811 814
812 815 def meaningfulparents(repo, ctx):
813 816 """Return list of meaningful (or all if debug) parentrevs for rev.
814 817
815 818 For merges (two non-nullrev revisions) both parents are meaningful.
816 819 Otherwise the first parent revision is considered meaningful if it
817 820 is not the preceding revision.
818 821 """
819 822 parents = ctx.parents()
820 823 if len(parents) > 1:
821 824 return parents
822 825 if repo.ui.debugflag:
823 826 return [parents[0], repo[nullrev]]
824 827 if parents[0].rev() >= intrev(ctx) - 1:
825 828 return []
826 829 return parents
827 830
828 831
829 832 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
830 833 """Return a function that produced paths for presenting to the user.
831 834
832 835 The returned function takes a repo-relative path and produces a path
833 836 that can be presented in the UI.
834 837
835 838 Depending on the value of ui.relative-paths, either a repo-relative or
836 839 cwd-relative path will be produced.
837 840
838 841 legacyrelativevalue is the value to use if ui.relative-paths=legacy
839 842
840 843 If forcerelativevalue is not None, then that value will be used regardless
841 844 of what ui.relative-paths is set to.
842 845 """
843 846 if forcerelativevalue is not None:
844 847 relative = forcerelativevalue
845 848 else:
846 849 config = repo.ui.config(b'ui', b'relative-paths')
847 850 if config == b'legacy':
848 851 relative = legacyrelativevalue
849 852 else:
850 853 relative = stringutil.parsebool(config)
851 854 if relative is None:
852 855 raise error.ConfigError(
853 856 _(b"ui.relative-paths is not a boolean ('%s')") % config
854 857 )
855 858
856 859 if relative:
857 860 cwd = repo.getcwd()
858 861 if cwd != b'':
859 862 # this branch would work even if cwd == b'' (ie cwd = repo
860 863 # root), but its generality makes the returned function slower
861 864 pathto = repo.pathto
862 865 return lambda f: pathto(f, cwd)
863 866 if repo.ui.configbool(b'ui', b'slash'):
864 867 return lambda f: f
865 868 else:
866 869 return util.localpath
867 870
868 871
869 872 def subdiruipathfn(subpath, uipathfn):
870 873 '''Create a new uipathfn that treats the file as relative to subpath.'''
871 874 return lambda f: uipathfn(posixpath.join(subpath, f))
872 875
873 876
874 877 def anypats(pats, opts):
875 878 '''Checks if any patterns, including --include and --exclude were given.
876 879
877 880 Some commands (e.g. addremove) use this condition for deciding whether to
878 881 print absolute or relative paths.
879 882 '''
880 883 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
881 884
882 885
883 886 def expandpats(pats):
884 887 '''Expand bare globs when running on windows.
885 888 On posix we assume it already has already been done by sh.'''
886 889 if not util.expandglobs:
887 890 return list(pats)
888 891 ret = []
889 892 for kindpat in pats:
890 893 kind, pat = matchmod._patsplit(kindpat, None)
891 894 if kind is None:
892 895 try:
893 896 globbed = glob.glob(pat)
894 897 except re.error:
895 898 globbed = [pat]
896 899 if globbed:
897 900 ret.extend(globbed)
898 901 continue
899 902 ret.append(kindpat)
900 903 return ret
901 904
902 905
903 906 def matchandpats(
904 907 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
905 908 ):
906 909 '''Return a matcher and the patterns that were used.
907 910 The matcher will warn about bad matches, unless an alternate badfn callback
908 911 is provided.'''
909 912 if opts is None:
910 913 opts = {}
911 914 if not globbed and default == b'relpath':
912 915 pats = expandpats(pats or [])
913 916
914 917 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
915 918
916 919 def bad(f, msg):
917 920 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
918 921
919 922 if badfn is None:
920 923 badfn = bad
921 924
922 925 m = ctx.match(
923 926 pats,
924 927 opts.get(b'include'),
925 928 opts.get(b'exclude'),
926 929 default,
927 930 listsubrepos=opts.get(b'subrepos'),
928 931 badfn=badfn,
929 932 )
930 933
931 934 if m.always():
932 935 pats = []
933 936 return m, pats
934 937
935 938
936 939 def match(
937 940 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
938 941 ):
939 942 '''Return a matcher that will warn about bad matches.'''
940 943 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
941 944
942 945
943 946 def matchall(repo):
944 947 '''Return a matcher that will efficiently match everything.'''
945 948 return matchmod.always()
946 949
947 950
948 951 def matchfiles(repo, files, badfn=None):
949 952 '''Return a matcher that will efficiently match exactly these files.'''
950 953 return matchmod.exact(files, badfn=badfn)
951 954
952 955
953 956 def parsefollowlinespattern(repo, rev, pat, msg):
954 957 """Return a file name from `pat` pattern suitable for usage in followlines
955 958 logic.
956 959 """
957 960 if not matchmod.patkind(pat):
958 961 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
959 962 else:
960 963 ctx = repo[rev]
961 964 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
962 965 files = [f for f in ctx if m(f)]
963 966 if len(files) != 1:
964 967 raise error.ParseError(msg)
965 968 return files[0]
966 969
967 970
968 971 def getorigvfs(ui, repo):
969 972 """return a vfs suitable to save 'orig' file
970 973
971 974 return None if no special directory is configured"""
972 975 origbackuppath = ui.config(b'ui', b'origbackuppath')
973 976 if not origbackuppath:
974 977 return None
975 978 return vfs.vfs(repo.wvfs.join(origbackuppath))
976 979
977 980
978 981 def backuppath(ui, repo, filepath):
979 982 '''customize where working copy backup files (.orig files) are created
980 983
981 984 Fetch user defined path from config file: [ui] origbackuppath = <path>
982 985 Fall back to default (filepath with .orig suffix) if not specified
983 986
984 987 filepath is repo-relative
985 988
986 989 Returns an absolute path
987 990 '''
988 991 origvfs = getorigvfs(ui, repo)
989 992 if origvfs is None:
990 993 return repo.wjoin(filepath + b".orig")
991 994
992 995 origbackupdir = origvfs.dirname(filepath)
993 996 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
994 997 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
995 998
996 999 # Remove any files that conflict with the backup file's path
997 1000 for f in reversed(list(pathutil.finddirs(filepath))):
998 1001 if origvfs.isfileorlink(f):
999 1002 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1000 1003 origvfs.unlink(f)
1001 1004 break
1002 1005
1003 1006 origvfs.makedirs(origbackupdir)
1004 1007
1005 1008 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1006 1009 ui.note(
1007 1010 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1008 1011 )
1009 1012 origvfs.rmtree(filepath, forcibly=True)
1010 1013
1011 1014 return origvfs.join(filepath)
1012 1015
1013 1016
1014 1017 class _containsnode(object):
1015 1018 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1016 1019
1017 1020 def __init__(self, repo, revcontainer):
1018 1021 self._torev = repo.changelog.rev
1019 1022 self._revcontains = revcontainer.__contains__
1020 1023
1021 1024 def __contains__(self, node):
1022 1025 return self._revcontains(self._torev(node))
1023 1026
1024 1027
1025 1028 def cleanupnodes(
1026 1029 repo,
1027 1030 replacements,
1028 1031 operation,
1029 1032 moves=None,
1030 1033 metadata=None,
1031 1034 fixphase=False,
1032 1035 targetphase=None,
1033 1036 backup=True,
1034 1037 ):
1035 1038 """do common cleanups when old nodes are replaced by new nodes
1036 1039
1037 1040 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1038 1041 (we might also want to move working directory parent in the future)
1039 1042
1040 1043 By default, bookmark moves are calculated automatically from 'replacements',
1041 1044 but 'moves' can be used to override that. Also, 'moves' may include
1042 1045 additional bookmark moves that should not have associated obsmarkers.
1043 1046
1044 1047 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1045 1048 have replacements. operation is a string, like "rebase".
1046 1049
1047 1050 metadata is dictionary containing metadata to be stored in obsmarker if
1048 1051 obsolescence is enabled.
1049 1052 """
1050 1053 assert fixphase or targetphase is None
1051 1054 if not replacements and not moves:
1052 1055 return
1053 1056
1054 1057 # translate mapping's other forms
1055 1058 if not util.safehasattr(replacements, b'items'):
1056 1059 replacements = {(n,): () for n in replacements}
1057 1060 else:
1058 1061 # upgrading non tuple "source" to tuple ones for BC
1059 1062 repls = {}
1060 1063 for key, value in replacements.items():
1061 1064 if not isinstance(key, tuple):
1062 1065 key = (key,)
1063 1066 repls[key] = value
1064 1067 replacements = repls
1065 1068
1066 1069 # Unfiltered repo is needed since nodes in replacements might be hidden.
1067 1070 unfi = repo.unfiltered()
1068 1071
1069 1072 # Calculate bookmark movements
1070 1073 if moves is None:
1071 1074 moves = {}
1072 1075 for oldnodes, newnodes in replacements.items():
1073 1076 for oldnode in oldnodes:
1074 1077 if oldnode in moves:
1075 1078 continue
1076 1079 if len(newnodes) > 1:
1077 1080 # usually a split, take the one with biggest rev number
1078 1081 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1079 1082 elif len(newnodes) == 0:
1080 1083 # move bookmark backwards
1081 1084 allreplaced = []
1082 1085 for rep in replacements:
1083 1086 allreplaced.extend(rep)
1084 1087 roots = list(
1085 1088 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1086 1089 )
1087 1090 if roots:
1088 1091 newnode = roots[0].node()
1089 1092 else:
1090 1093 newnode = nullid
1091 1094 else:
1092 1095 newnode = newnodes[0]
1093 1096 moves[oldnode] = newnode
1094 1097
1095 1098 allnewnodes = [n for ns in replacements.values() for n in ns]
1096 1099 toretract = {}
1097 1100 toadvance = {}
1098 1101 if fixphase:
1099 1102 precursors = {}
1100 1103 for oldnodes, newnodes in replacements.items():
1101 1104 for oldnode in oldnodes:
1102 1105 for newnode in newnodes:
1103 1106 precursors.setdefault(newnode, []).append(oldnode)
1104 1107
1105 1108 allnewnodes.sort(key=lambda n: unfi[n].rev())
1106 1109 newphases = {}
1107 1110
1108 1111 def phase(ctx):
1109 1112 return newphases.get(ctx.node(), ctx.phase())
1110 1113
1111 1114 for newnode in allnewnodes:
1112 1115 ctx = unfi[newnode]
1113 1116 parentphase = max(phase(p) for p in ctx.parents())
1114 1117 if targetphase is None:
1115 1118 oldphase = max(
1116 1119 unfi[oldnode].phase() for oldnode in precursors[newnode]
1117 1120 )
1118 1121 newphase = max(oldphase, parentphase)
1119 1122 else:
1120 1123 newphase = max(targetphase, parentphase)
1121 1124 newphases[newnode] = newphase
1122 1125 if newphase > ctx.phase():
1123 1126 toretract.setdefault(newphase, []).append(newnode)
1124 1127 elif newphase < ctx.phase():
1125 1128 toadvance.setdefault(newphase, []).append(newnode)
1126 1129
1127 1130 with repo.transaction(b'cleanup') as tr:
1128 1131 # Move bookmarks
1129 1132 bmarks = repo._bookmarks
1130 1133 bmarkchanges = []
1131 1134 for oldnode, newnode in moves.items():
1132 1135 oldbmarks = repo.nodebookmarks(oldnode)
1133 1136 if not oldbmarks:
1134 1137 continue
1135 1138 from . import bookmarks # avoid import cycle
1136 1139
1137 1140 repo.ui.debug(
1138 1141 b'moving bookmarks %r from %s to %s\n'
1139 1142 % (
1140 1143 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1141 1144 hex(oldnode),
1142 1145 hex(newnode),
1143 1146 )
1144 1147 )
1145 1148 # Delete divergent bookmarks being parents of related newnodes
1146 1149 deleterevs = repo.revs(
1147 1150 b'parents(roots(%ln & (::%n))) - parents(%n)',
1148 1151 allnewnodes,
1149 1152 newnode,
1150 1153 oldnode,
1151 1154 )
1152 1155 deletenodes = _containsnode(repo, deleterevs)
1153 1156 for name in oldbmarks:
1154 1157 bmarkchanges.append((name, newnode))
1155 1158 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1156 1159 bmarkchanges.append((b, None))
1157 1160
1158 1161 if bmarkchanges:
1159 1162 bmarks.applychanges(repo, tr, bmarkchanges)
1160 1163
1161 1164 for phase, nodes in toretract.items():
1162 1165 phases.retractboundary(repo, tr, phase, nodes)
1163 1166 for phase, nodes in toadvance.items():
1164 1167 phases.advanceboundary(repo, tr, phase, nodes)
1165 1168
1166 1169 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1167 1170 # Obsolete or strip nodes
1168 1171 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1169 1172 # If a node is already obsoleted, and we want to obsolete it
1170 1173 # without a successor, skip that obssolete request since it's
1171 1174 # unnecessary. That's the "if s or not isobs(n)" check below.
1172 1175 # Also sort the node in topology order, that might be useful for
1173 1176 # some obsstore logic.
1174 1177 # NOTE: the sorting might belong to createmarkers.
1175 1178 torev = unfi.changelog.rev
1176 1179 sortfunc = lambda ns: torev(ns[0][0])
1177 1180 rels = []
1178 1181 for ns, s in sorted(replacements.items(), key=sortfunc):
1179 1182 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1180 1183 rels.append(rel)
1181 1184 if rels:
1182 1185 obsolete.createmarkers(
1183 1186 repo, rels, operation=operation, metadata=metadata
1184 1187 )
1185 1188 elif phases.supportinternal(repo) and mayusearchived:
1186 1189 # this assume we do not have "unstable" nodes above the cleaned ones
1187 1190 allreplaced = set()
1188 1191 for ns in replacements.keys():
1189 1192 allreplaced.update(ns)
1190 1193 if backup:
1191 1194 from . import repair # avoid import cycle
1192 1195
1193 1196 node = min(allreplaced, key=repo.changelog.rev)
1194 1197 repair.backupbundle(
1195 1198 repo, allreplaced, allreplaced, node, operation
1196 1199 )
1197 1200 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1198 1201 else:
1199 1202 from . import repair # avoid import cycle
1200 1203
1201 1204 tostrip = list(n for ns in replacements for n in ns)
1202 1205 if tostrip:
1203 1206 repair.delayedstrip(
1204 1207 repo.ui, repo, tostrip, operation, backup=backup
1205 1208 )
1206 1209
1207 1210
1208 1211 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1209 1212 if opts is None:
1210 1213 opts = {}
1211 1214 m = matcher
1212 1215 dry_run = opts.get(b'dry_run')
1213 1216 try:
1214 1217 similarity = float(opts.get(b'similarity') or 0)
1215 1218 except ValueError:
1216 1219 raise error.Abort(_(b'similarity must be a number'))
1217 1220 if similarity < 0 or similarity > 100:
1218 1221 raise error.Abort(_(b'similarity must be between 0 and 100'))
1219 1222 similarity /= 100.0
1220 1223
1221 1224 ret = 0
1222 1225
1223 1226 wctx = repo[None]
1224 1227 for subpath in sorted(wctx.substate):
1225 1228 submatch = matchmod.subdirmatcher(subpath, m)
1226 1229 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1227 1230 sub = wctx.sub(subpath)
1228 1231 subprefix = repo.wvfs.reljoin(prefix, subpath)
1229 1232 subuipathfn = subdiruipathfn(subpath, uipathfn)
1230 1233 try:
1231 1234 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1232 1235 ret = 1
1233 1236 except error.LookupError:
1234 1237 repo.ui.status(
1235 1238 _(b"skipping missing subrepository: %s\n")
1236 1239 % uipathfn(subpath)
1237 1240 )
1238 1241
1239 1242 rejected = []
1240 1243
1241 1244 def badfn(f, msg):
1242 1245 if f in m.files():
1243 1246 m.bad(f, msg)
1244 1247 rejected.append(f)
1245 1248
1246 1249 badmatch = matchmod.badmatch(m, badfn)
1247 1250 added, unknown, deleted, removed, forgotten = _interestingfiles(
1248 1251 repo, badmatch
1249 1252 )
1250 1253
1251 1254 unknownset = set(unknown + forgotten)
1252 1255 toprint = unknownset.copy()
1253 1256 toprint.update(deleted)
1254 1257 for abs in sorted(toprint):
1255 1258 if repo.ui.verbose or not m.exact(abs):
1256 1259 if abs in unknownset:
1257 1260 status = _(b'adding %s\n') % uipathfn(abs)
1258 1261 label = b'ui.addremove.added'
1259 1262 else:
1260 1263 status = _(b'removing %s\n') % uipathfn(abs)
1261 1264 label = b'ui.addremove.removed'
1262 1265 repo.ui.status(status, label=label)
1263 1266
1264 1267 renames = _findrenames(
1265 1268 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1266 1269 )
1267 1270
1268 1271 if not dry_run:
1269 1272 _markchanges(repo, unknown + forgotten, deleted, renames)
1270 1273
1271 1274 for f in rejected:
1272 1275 if f in m.files():
1273 1276 return 1
1274 1277 return ret
1275 1278
1276 1279
1277 1280 def marktouched(repo, files, similarity=0.0):
1278 1281 '''Assert that files have somehow been operated upon. files are relative to
1279 1282 the repo root.'''
1280 1283 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1281 1284 rejected = []
1282 1285
1283 1286 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1284 1287
1285 1288 if repo.ui.verbose:
1286 1289 unknownset = set(unknown + forgotten)
1287 1290 toprint = unknownset.copy()
1288 1291 toprint.update(deleted)
1289 1292 for abs in sorted(toprint):
1290 1293 if abs in unknownset:
1291 1294 status = _(b'adding %s\n') % abs
1292 1295 else:
1293 1296 status = _(b'removing %s\n') % abs
1294 1297 repo.ui.status(status)
1295 1298
1296 1299 # TODO: We should probably have the caller pass in uipathfn and apply it to
1297 1300 # the messages above too. legacyrelativevalue=True is consistent with how
1298 1301 # it used to work.
1299 1302 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1300 1303 renames = _findrenames(
1301 1304 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1302 1305 )
1303 1306
1304 1307 _markchanges(repo, unknown + forgotten, deleted, renames)
1305 1308
1306 1309 for f in rejected:
1307 1310 if f in m.files():
1308 1311 return 1
1309 1312 return 0
1310 1313
1311 1314
1312 1315 def _interestingfiles(repo, matcher):
1313 1316 '''Walk dirstate with matcher, looking for files that addremove would care
1314 1317 about.
1315 1318
1316 1319 This is different from dirstate.status because it doesn't care about
1317 1320 whether files are modified or clean.'''
1318 1321 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1319 1322 audit_path = pathutil.pathauditor(repo.root, cached=True)
1320 1323
1321 1324 ctx = repo[None]
1322 1325 dirstate = repo.dirstate
1323 1326 matcher = repo.narrowmatch(matcher, includeexact=True)
1324 1327 walkresults = dirstate.walk(
1325 1328 matcher,
1326 1329 subrepos=sorted(ctx.substate),
1327 1330 unknown=True,
1328 1331 ignored=False,
1329 1332 full=False,
1330 1333 )
1331 1334 for abs, st in pycompat.iteritems(walkresults):
1332 1335 dstate = dirstate[abs]
1333 1336 if dstate == b'?' and audit_path.check(abs):
1334 1337 unknown.append(abs)
1335 1338 elif dstate != b'r' and not st:
1336 1339 deleted.append(abs)
1337 1340 elif dstate == b'r' and st:
1338 1341 forgotten.append(abs)
1339 1342 # for finding renames
1340 1343 elif dstate == b'r' and not st:
1341 1344 removed.append(abs)
1342 1345 elif dstate == b'a':
1343 1346 added.append(abs)
1344 1347
1345 1348 return added, unknown, deleted, removed, forgotten
1346 1349
1347 1350
1348 1351 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1349 1352 '''Find renames from removed files to added ones.'''
1350 1353 renames = {}
1351 1354 if similarity > 0:
1352 1355 for old, new, score in similar.findrenames(
1353 1356 repo, added, removed, similarity
1354 1357 ):
1355 1358 if (
1356 1359 repo.ui.verbose
1357 1360 or not matcher.exact(old)
1358 1361 or not matcher.exact(new)
1359 1362 ):
1360 1363 repo.ui.status(
1361 1364 _(
1362 1365 b'recording removal of %s as rename to %s '
1363 1366 b'(%d%% similar)\n'
1364 1367 )
1365 1368 % (uipathfn(old), uipathfn(new), score * 100)
1366 1369 )
1367 1370 renames[new] = old
1368 1371 return renames
1369 1372
1370 1373
1371 1374 def _markchanges(repo, unknown, deleted, renames):
1372 1375 '''Marks the files in unknown as added, the files in deleted as removed,
1373 1376 and the files in renames as copied.'''
1374 1377 wctx = repo[None]
1375 1378 with repo.wlock():
1376 1379 wctx.forget(deleted)
1377 1380 wctx.add(unknown)
1378 1381 for new, old in pycompat.iteritems(renames):
1379 1382 wctx.copy(old, new)
1380 1383
1381 1384
1382 1385 def getrenamedfn(repo, endrev=None):
1383 1386 if copiesmod.usechangesetcentricalgo(repo):
1384 1387
1385 1388 def getrenamed(fn, rev):
1386 1389 ctx = repo[rev]
1387 1390 p1copies = ctx.p1copies()
1388 1391 if fn in p1copies:
1389 1392 return p1copies[fn]
1390 1393 p2copies = ctx.p2copies()
1391 1394 if fn in p2copies:
1392 1395 return p2copies[fn]
1393 1396 return None
1394 1397
1395 1398 return getrenamed
1396 1399
1397 1400 rcache = {}
1398 1401 if endrev is None:
1399 1402 endrev = len(repo)
1400 1403
1401 1404 def getrenamed(fn, rev):
1402 1405 '''looks up all renames for a file (up to endrev) the first
1403 1406 time the file is given. It indexes on the changerev and only
1404 1407 parses the manifest if linkrev != changerev.
1405 1408 Returns rename info for fn at changerev rev.'''
1406 1409 if fn not in rcache:
1407 1410 rcache[fn] = {}
1408 1411 fl = repo.file(fn)
1409 1412 for i in fl:
1410 1413 lr = fl.linkrev(i)
1411 1414 renamed = fl.renamed(fl.node(i))
1412 1415 rcache[fn][lr] = renamed and renamed[0]
1413 1416 if lr >= endrev:
1414 1417 break
1415 1418 if rev in rcache[fn]:
1416 1419 return rcache[fn][rev]
1417 1420
1418 1421 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1419 1422 # filectx logic.
1420 1423 try:
1421 1424 return repo[rev][fn].copysource()
1422 1425 except error.LookupError:
1423 1426 return None
1424 1427
1425 1428 return getrenamed
1426 1429
1427 1430
1428 1431 def getcopiesfn(repo, endrev=None):
1429 1432 if copiesmod.usechangesetcentricalgo(repo):
1430 1433
1431 1434 def copiesfn(ctx):
1432 1435 if ctx.p2copies():
1433 1436 allcopies = ctx.p1copies().copy()
1434 1437 # There should be no overlap
1435 1438 allcopies.update(ctx.p2copies())
1436 1439 return sorted(allcopies.items())
1437 1440 else:
1438 1441 return sorted(ctx.p1copies().items())
1439 1442
1440 1443 else:
1441 1444 getrenamed = getrenamedfn(repo, endrev)
1442 1445
1443 1446 def copiesfn(ctx):
1444 1447 copies = []
1445 1448 for fn in ctx.files():
1446 1449 rename = getrenamed(fn, ctx.rev())
1447 1450 if rename:
1448 1451 copies.append((fn, rename))
1449 1452 return copies
1450 1453
1451 1454 return copiesfn
1452 1455
1453 1456
1454 1457 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1455 1458 """Update the dirstate to reflect the intent of copying src to dst. For
1456 1459 different reasons it might not end with dst being marked as copied from src.
1457 1460 """
1458 1461 origsrc = repo.dirstate.copied(src) or src
1459 1462 if dst == origsrc: # copying back a copy?
1460 1463 if repo.dirstate[dst] not in b'mn' and not dryrun:
1461 1464 repo.dirstate.normallookup(dst)
1462 1465 else:
1463 1466 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1464 1467 if not ui.quiet:
1465 1468 ui.warn(
1466 1469 _(
1467 1470 b"%s has not been committed yet, so no copy "
1468 1471 b"data will be stored for %s.\n"
1469 1472 )
1470 1473 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1471 1474 )
1472 1475 if repo.dirstate[dst] in b'?r' and not dryrun:
1473 1476 wctx.add([dst])
1474 1477 elif not dryrun:
1475 1478 wctx.copy(origsrc, dst)
1476 1479
1477 1480
1478 1481 def movedirstate(repo, newctx, match=None):
1479 1482 """Move the dirstate to newctx and adjust it as necessary.
1480 1483
1481 1484 A matcher can be provided as an optimization. It is probably a bug to pass
1482 1485 a matcher that doesn't match all the differences between the parent of the
1483 1486 working copy and newctx.
1484 1487 """
1485 1488 oldctx = repo[b'.']
1486 1489 ds = repo.dirstate
1487 1490 copies = dict(ds.copies())
1488 1491 ds.setparents(newctx.node(), nullid)
1489 1492 s = newctx.status(oldctx, match=match)
1490 1493 for f in s.modified:
1491 1494 if ds[f] == b'r':
1492 1495 # modified + removed -> removed
1493 1496 continue
1494 1497 ds.normallookup(f)
1495 1498
1496 1499 for f in s.added:
1497 1500 if ds[f] == b'r':
1498 1501 # added + removed -> unknown
1499 1502 ds.drop(f)
1500 1503 elif ds[f] != b'a':
1501 1504 ds.add(f)
1502 1505
1503 1506 for f in s.removed:
1504 1507 if ds[f] == b'a':
1505 1508 # removed + added -> normal
1506 1509 ds.normallookup(f)
1507 1510 elif ds[f] != b'r':
1508 1511 ds.remove(f)
1509 1512
1510 1513 # Merge old parent and old working dir copies
1511 1514 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1512 1515 oldcopies.update(copies)
1513 1516 copies = {
1514 1517 dst: oldcopies.get(src, src)
1515 1518 for dst, src in pycompat.iteritems(oldcopies)
1516 1519 }
1517 1520 # Adjust the dirstate copies
1518 1521 for dst, src in pycompat.iteritems(copies):
1519 1522 if src not in newctx or dst in newctx or ds[dst] != b'a':
1520 1523 src = None
1521 1524 ds.copy(src, dst)
1522 1525 repo._quick_access_changeid_invalidate()
1523 1526
1524 1527
1525 1528 def filterrequirements(requirements):
1526 1529 """ filters the requirements into two sets:
1527 1530
1528 1531 wcreq: requirements which should be written in .hg/requires
1529 1532 storereq: which should be written in .hg/store/requires
1530 1533
1531 1534 Returns (wcreq, storereq)
1532 1535 """
1533 1536 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1534 1537 wc, store = set(), set()
1535 1538 for r in requirements:
1536 1539 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1537 1540 wc.add(r)
1538 1541 else:
1539 1542 store.add(r)
1540 1543 return wc, store
1541 1544 return requirements, None
1542 1545
1543 1546
1544 1547 def istreemanifest(repo):
1545 1548 """ returns whether the repository is using treemanifest or not """
1546 1549 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1547 1550
1548 1551
1549 1552 def writereporequirements(repo, requirements=None):
1550 1553 """ writes requirements for the repo to .hg/requires """
1551 1554 if requirements:
1552 1555 repo.requirements = requirements
1553 1556 wcreq, storereq = filterrequirements(repo.requirements)
1554 1557 if wcreq is not None:
1555 1558 writerequires(repo.vfs, wcreq)
1556 1559 if storereq is not None:
1557 1560 writerequires(repo.svfs, storereq)
1558 1561
1559 1562
1560 1563 def writerequires(opener, requirements):
1561 1564 with opener(b'requires', b'w', atomictemp=True) as fp:
1562 1565 for r in sorted(requirements):
1563 1566 fp.write(b"%s\n" % r)
1564 1567
1565 1568
1566 1569 class filecachesubentry(object):
1567 1570 def __init__(self, path, stat):
1568 1571 self.path = path
1569 1572 self.cachestat = None
1570 1573 self._cacheable = None
1571 1574
1572 1575 if stat:
1573 1576 self.cachestat = filecachesubentry.stat(self.path)
1574 1577
1575 1578 if self.cachestat:
1576 1579 self._cacheable = self.cachestat.cacheable()
1577 1580 else:
1578 1581 # None means we don't know yet
1579 1582 self._cacheable = None
1580 1583
1581 1584 def refresh(self):
1582 1585 if self.cacheable():
1583 1586 self.cachestat = filecachesubentry.stat(self.path)
1584 1587
1585 1588 def cacheable(self):
1586 1589 if self._cacheable is not None:
1587 1590 return self._cacheable
1588 1591
1589 1592 # we don't know yet, assume it is for now
1590 1593 return True
1591 1594
1592 1595 def changed(self):
1593 1596 # no point in going further if we can't cache it
1594 1597 if not self.cacheable():
1595 1598 return True
1596 1599
1597 1600 newstat = filecachesubentry.stat(self.path)
1598 1601
1599 1602 # we may not know if it's cacheable yet, check again now
1600 1603 if newstat and self._cacheable is None:
1601 1604 self._cacheable = newstat.cacheable()
1602 1605
1603 1606 # check again
1604 1607 if not self._cacheable:
1605 1608 return True
1606 1609
1607 1610 if self.cachestat != newstat:
1608 1611 self.cachestat = newstat
1609 1612 return True
1610 1613 else:
1611 1614 return False
1612 1615
1613 1616 @staticmethod
1614 1617 def stat(path):
1615 1618 try:
1616 1619 return util.cachestat(path)
1617 1620 except OSError as e:
1618 1621 if e.errno != errno.ENOENT:
1619 1622 raise
1620 1623
1621 1624
1622 1625 class filecacheentry(object):
1623 1626 def __init__(self, paths, stat=True):
1624 1627 self._entries = []
1625 1628 for path in paths:
1626 1629 self._entries.append(filecachesubentry(path, stat))
1627 1630
1628 1631 def changed(self):
1629 1632 '''true if any entry has changed'''
1630 1633 for entry in self._entries:
1631 1634 if entry.changed():
1632 1635 return True
1633 1636 return False
1634 1637
1635 1638 def refresh(self):
1636 1639 for entry in self._entries:
1637 1640 entry.refresh()
1638 1641
1639 1642
1640 1643 class filecache(object):
1641 1644 """A property like decorator that tracks files under .hg/ for updates.
1642 1645
1643 1646 On first access, the files defined as arguments are stat()ed and the
1644 1647 results cached. The decorated function is called. The results are stashed
1645 1648 away in a ``_filecache`` dict on the object whose method is decorated.
1646 1649
1647 1650 On subsequent access, the cached result is used as it is set to the
1648 1651 instance dictionary.
1649 1652
1650 1653 On external property set/delete operations, the caller must update the
1651 1654 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1652 1655 instead of directly setting <attr>.
1653 1656
1654 1657 When using the property API, the cached data is always used if available.
1655 1658 No stat() is performed to check if the file has changed.
1656 1659
1657 1660 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1658 1661 can populate an entry before the property's getter is called. In this case,
1659 1662 entries in ``_filecache`` will be used during property operations,
1660 1663 if available. If the underlying file changes, it is up to external callers
1661 1664 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1662 1665 method result as well as possibly calling ``del obj._filecache[attr]`` to
1663 1666 remove the ``filecacheentry``.
1664 1667 """
1665 1668
1666 1669 def __init__(self, *paths):
1667 1670 self.paths = paths
1668 1671
1669 1672 def join(self, obj, fname):
1670 1673 """Used to compute the runtime path of a cached file.
1671 1674
1672 1675 Users should subclass filecache and provide their own version of this
1673 1676 function to call the appropriate join function on 'obj' (an instance
1674 1677 of the class that its member function was decorated).
1675 1678 """
1676 1679 raise NotImplementedError
1677 1680
1678 1681 def __call__(self, func):
1679 1682 self.func = func
1680 1683 self.sname = func.__name__
1681 1684 self.name = pycompat.sysbytes(self.sname)
1682 1685 return self
1683 1686
1684 1687 def __get__(self, obj, type=None):
1685 1688 # if accessed on the class, return the descriptor itself.
1686 1689 if obj is None:
1687 1690 return self
1688 1691
1689 1692 assert self.sname not in obj.__dict__
1690 1693
1691 1694 entry = obj._filecache.get(self.name)
1692 1695
1693 1696 if entry:
1694 1697 if entry.changed():
1695 1698 entry.obj = self.func(obj)
1696 1699 else:
1697 1700 paths = [self.join(obj, path) for path in self.paths]
1698 1701
1699 1702 # We stat -before- creating the object so our cache doesn't lie if
1700 1703 # a writer modified between the time we read and stat
1701 1704 entry = filecacheentry(paths, True)
1702 1705 entry.obj = self.func(obj)
1703 1706
1704 1707 obj._filecache[self.name] = entry
1705 1708
1706 1709 obj.__dict__[self.sname] = entry.obj
1707 1710 return entry.obj
1708 1711
1709 1712 # don't implement __set__(), which would make __dict__ lookup as slow as
1710 1713 # function call.
1711 1714
1712 1715 def set(self, obj, value):
1713 1716 if self.name not in obj._filecache:
1714 1717 # we add an entry for the missing value because X in __dict__
1715 1718 # implies X in _filecache
1716 1719 paths = [self.join(obj, path) for path in self.paths]
1717 1720 ce = filecacheentry(paths, False)
1718 1721 obj._filecache[self.name] = ce
1719 1722 else:
1720 1723 ce = obj._filecache[self.name]
1721 1724
1722 1725 ce.obj = value # update cached copy
1723 1726 obj.__dict__[self.sname] = value # update copy returned by obj.x
1724 1727
1725 1728
1726 1729 def extdatasource(repo, source):
1727 1730 """Gather a map of rev -> value dict from the specified source
1728 1731
1729 1732 A source spec is treated as a URL, with a special case shell: type
1730 1733 for parsing the output from a shell command.
1731 1734
1732 1735 The data is parsed as a series of newline-separated records where
1733 1736 each record is a revision specifier optionally followed by a space
1734 1737 and a freeform string value. If the revision is known locally, it
1735 1738 is converted to a rev, otherwise the record is skipped.
1736 1739
1737 1740 Note that both key and value are treated as UTF-8 and converted to
1738 1741 the local encoding. This allows uniformity between local and
1739 1742 remote data sources.
1740 1743 """
1741 1744
1742 1745 spec = repo.ui.config(b"extdata", source)
1743 1746 if not spec:
1744 1747 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1745 1748
1746 1749 data = {}
1747 1750 src = proc = None
1748 1751 try:
1749 1752 if spec.startswith(b"shell:"):
1750 1753 # external commands should be run relative to the repo root
1751 1754 cmd = spec[6:]
1752 1755 proc = subprocess.Popen(
1753 1756 procutil.tonativestr(cmd),
1754 1757 shell=True,
1755 1758 bufsize=-1,
1756 1759 close_fds=procutil.closefds,
1757 1760 stdout=subprocess.PIPE,
1758 1761 cwd=procutil.tonativestr(repo.root),
1759 1762 )
1760 1763 src = proc.stdout
1761 1764 else:
1762 1765 # treat as a URL or file
1763 1766 src = url.open(repo.ui, spec)
1764 1767 for l in src:
1765 1768 if b" " in l:
1766 1769 k, v = l.strip().split(b" ", 1)
1767 1770 else:
1768 1771 k, v = l.strip(), b""
1769 1772
1770 1773 k = encoding.tolocal(k)
1771 1774 try:
1772 1775 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1773 1776 except (error.LookupError, error.RepoLookupError):
1774 1777 pass # we ignore data for nodes that don't exist locally
1775 1778 finally:
1776 1779 if proc:
1777 1780 try:
1778 1781 proc.communicate()
1779 1782 except ValueError:
1780 1783 # This happens if we started iterating src and then
1781 1784 # get a parse error on a line. It should be safe to ignore.
1782 1785 pass
1783 1786 if src:
1784 1787 src.close()
1785 1788 if proc and proc.returncode != 0:
1786 1789 raise error.Abort(
1787 1790 _(b"extdata command '%s' failed: %s")
1788 1791 % (cmd, procutil.explainexit(proc.returncode))
1789 1792 )
1790 1793
1791 1794 return data
1792 1795
1793 1796
1794 1797 class progress(object):
1795 1798 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1796 1799 self.ui = ui
1797 1800 self.pos = 0
1798 1801 self.topic = topic
1799 1802 self.unit = unit
1800 1803 self.total = total
1801 1804 self.debug = ui.configbool(b'progress', b'debug')
1802 1805 self._updatebar = updatebar
1803 1806
1804 1807 def __enter__(self):
1805 1808 return self
1806 1809
1807 1810 def __exit__(self, exc_type, exc_value, exc_tb):
1808 1811 self.complete()
1809 1812
1810 1813 def update(self, pos, item=b"", total=None):
1811 1814 assert pos is not None
1812 1815 if total:
1813 1816 self.total = total
1814 1817 self.pos = pos
1815 1818 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1816 1819 if self.debug:
1817 1820 self._printdebug(item)
1818 1821
1819 1822 def increment(self, step=1, item=b"", total=None):
1820 1823 self.update(self.pos + step, item, total)
1821 1824
1822 1825 def complete(self):
1823 1826 self.pos = None
1824 1827 self.unit = b""
1825 1828 self.total = None
1826 1829 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1827 1830
1828 1831 def _printdebug(self, item):
1829 1832 unit = b''
1830 1833 if self.unit:
1831 1834 unit = b' ' + self.unit
1832 1835 if item:
1833 1836 item = b' ' + item
1834 1837
1835 1838 if self.total:
1836 1839 pct = 100.0 * self.pos / self.total
1837 1840 self.ui.debug(
1838 1841 b'%s:%s %d/%d%s (%4.2f%%)\n'
1839 1842 % (self.topic, item, self.pos, self.total, unit, pct)
1840 1843 )
1841 1844 else:
1842 1845 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1843 1846
1844 1847
1845 1848 def gdinitconfig(ui):
1846 1849 """helper function to know if a repo should be created as general delta
1847 1850 """
1848 1851 # experimental config: format.generaldelta
1849 1852 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1850 1853 b'format', b'usegeneraldelta'
1851 1854 )
1852 1855
1853 1856
1854 1857 def gddeltaconfig(ui):
1855 1858 """helper function to know if incoming delta should be optimised
1856 1859 """
1857 1860 # experimental config: format.generaldelta
1858 1861 return ui.configbool(b'format', b'generaldelta')
1859 1862
1860 1863
1861 1864 class simplekeyvaluefile(object):
1862 1865 """A simple file with key=value lines
1863 1866
1864 1867 Keys must be alphanumerics and start with a letter, values must not
1865 1868 contain '\n' characters"""
1866 1869
1867 1870 firstlinekey = b'__firstline'
1868 1871
1869 1872 def __init__(self, vfs, path, keys=None):
1870 1873 self.vfs = vfs
1871 1874 self.path = path
1872 1875
1873 1876 def read(self, firstlinenonkeyval=False):
1874 1877 """Read the contents of a simple key-value file
1875 1878
1876 1879 'firstlinenonkeyval' indicates whether the first line of file should
1877 1880 be treated as a key-value pair or reuturned fully under the
1878 1881 __firstline key."""
1879 1882 lines = self.vfs.readlines(self.path)
1880 1883 d = {}
1881 1884 if firstlinenonkeyval:
1882 1885 if not lines:
1883 1886 e = _(b"empty simplekeyvalue file")
1884 1887 raise error.CorruptedState(e)
1885 1888 # we don't want to include '\n' in the __firstline
1886 1889 d[self.firstlinekey] = lines[0][:-1]
1887 1890 del lines[0]
1888 1891
1889 1892 try:
1890 1893 # the 'if line.strip()' part prevents us from failing on empty
1891 1894 # lines which only contain '\n' therefore are not skipped
1892 1895 # by 'if line'
1893 1896 updatedict = dict(
1894 1897 line[:-1].split(b'=', 1) for line in lines if line.strip()
1895 1898 )
1896 1899 if self.firstlinekey in updatedict:
1897 1900 e = _(b"%r can't be used as a key")
1898 1901 raise error.CorruptedState(e % self.firstlinekey)
1899 1902 d.update(updatedict)
1900 1903 except ValueError as e:
1901 1904 raise error.CorruptedState(stringutil.forcebytestr(e))
1902 1905 return d
1903 1906
1904 1907 def write(self, data, firstline=None):
1905 1908 """Write key=>value mapping to a file
1906 1909 data is a dict. Keys must be alphanumerical and start with a letter.
1907 1910 Values must not contain newline characters.
1908 1911
1909 1912 If 'firstline' is not None, it is written to file before
1910 1913 everything else, as it is, not in a key=value form"""
1911 1914 lines = []
1912 1915 if firstline is not None:
1913 1916 lines.append(b'%s\n' % firstline)
1914 1917
1915 1918 for k, v in data.items():
1916 1919 if k == self.firstlinekey:
1917 1920 e = b"key name '%s' is reserved" % self.firstlinekey
1918 1921 raise error.ProgrammingError(e)
1919 1922 if not k[0:1].isalpha():
1920 1923 e = b"keys must start with a letter in a key-value file"
1921 1924 raise error.ProgrammingError(e)
1922 1925 if not k.isalnum():
1923 1926 e = b"invalid key name in a simple key-value file"
1924 1927 raise error.ProgrammingError(e)
1925 1928 if b'\n' in v:
1926 1929 e = b"invalid value in a simple key-value file"
1927 1930 raise error.ProgrammingError(e)
1928 1931 lines.append(b"%s=%s\n" % (k, v))
1929 1932 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1930 1933 fp.write(b''.join(lines))
1931 1934
1932 1935
1933 1936 _reportobsoletedsource = [
1934 1937 b'debugobsolete',
1935 1938 b'pull',
1936 1939 b'push',
1937 1940 b'serve',
1938 1941 b'unbundle',
1939 1942 ]
1940 1943
1941 1944 _reportnewcssource = [
1942 1945 b'pull',
1943 1946 b'unbundle',
1944 1947 ]
1945 1948
1946 1949
1947 1950 def prefetchfiles(repo, revmatches):
1948 1951 """Invokes the registered file prefetch functions, allowing extensions to
1949 1952 ensure the corresponding files are available locally, before the command
1950 1953 uses them.
1951 1954
1952 1955 Args:
1953 1956 revmatches: a list of (revision, match) tuples to indicate the files to
1954 1957 fetch at each revision. If any of the match elements is None, it matches
1955 1958 all files.
1956 1959 """
1957 1960
1958 1961 def _matcher(m):
1959 1962 if m:
1960 1963 assert isinstance(m, matchmod.basematcher)
1961 1964 # The command itself will complain about files that don't exist, so
1962 1965 # don't duplicate the message.
1963 1966 return matchmod.badmatch(m, lambda fn, msg: None)
1964 1967 else:
1965 1968 return matchall(repo)
1966 1969
1967 1970 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1968 1971
1969 1972 fileprefetchhooks(repo, revbadmatches)
1970 1973
1971 1974
1972 1975 # a list of (repo, revs, match) prefetch functions
1973 1976 fileprefetchhooks = util.hooks()
1974 1977
1975 1978 # A marker that tells the evolve extension to suppress its own reporting
1976 1979 _reportstroubledchangesets = True
1977 1980
1978 1981
1979 1982 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1980 1983 """register a callback to issue a summary after the transaction is closed
1981 1984
1982 1985 If as_validator is true, then the callbacks are registered as transaction
1983 1986 validators instead
1984 1987 """
1985 1988
1986 1989 def txmatch(sources):
1987 1990 return any(txnname.startswith(source) for source in sources)
1988 1991
1989 1992 categories = []
1990 1993
1991 1994 def reportsummary(func):
1992 1995 """decorator for report callbacks."""
1993 1996 # The repoview life cycle is shorter than the one of the actual
1994 1997 # underlying repository. So the filtered object can die before the
1995 1998 # weakref is used leading to troubles. We keep a reference to the
1996 1999 # unfiltered object and restore the filtering when retrieving the
1997 2000 # repository through the weakref.
1998 2001 filtername = repo.filtername
1999 2002 reporef = weakref.ref(repo.unfiltered())
2000 2003
2001 2004 def wrapped(tr):
2002 2005 repo = reporef()
2003 2006 if filtername:
2004 2007 assert repo is not None # help pytype
2005 2008 repo = repo.filtered(filtername)
2006 2009 func(repo, tr)
2007 2010
2008 2011 newcat = b'%02i-txnreport' % len(categories)
2009 2012 if as_validator:
2010 2013 otr.addvalidator(newcat, wrapped)
2011 2014 else:
2012 2015 otr.addpostclose(newcat, wrapped)
2013 2016 categories.append(newcat)
2014 2017 return wrapped
2015 2018
2016 2019 @reportsummary
2017 2020 def reportchangegroup(repo, tr):
2018 2021 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2019 2022 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2020 2023 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2021 2024 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2022 2025 if cgchangesets or cgrevisions or cgfiles:
2023 2026 htext = b""
2024 2027 if cgheads:
2025 2028 htext = _(b" (%+d heads)") % cgheads
2026 2029 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2027 2030 if as_validator:
2028 2031 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2029 2032 assert repo is not None # help pytype
2030 2033 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2031 2034
2032 2035 if txmatch(_reportobsoletedsource):
2033 2036
2034 2037 @reportsummary
2035 2038 def reportobsoleted(repo, tr):
2036 2039 obsoleted = obsutil.getobsoleted(repo, tr)
2037 2040 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2038 2041 if newmarkers:
2039 2042 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2040 2043 if obsoleted:
2041 2044 msg = _(b'obsoleted %i changesets\n')
2042 2045 if as_validator:
2043 2046 msg = _(b'obsoleting %i changesets\n')
2044 2047 repo.ui.status(msg % len(obsoleted))
2045 2048
2046 2049 if obsolete.isenabled(
2047 2050 repo, obsolete.createmarkersopt
2048 2051 ) and repo.ui.configbool(
2049 2052 b'experimental', b'evolution.report-instabilities'
2050 2053 ):
2051 2054 instabilitytypes = [
2052 2055 (b'orphan', b'orphan'),
2053 2056 (b'phase-divergent', b'phasedivergent'),
2054 2057 (b'content-divergent', b'contentdivergent'),
2055 2058 ]
2056 2059
2057 2060 def getinstabilitycounts(repo):
2058 2061 filtered = repo.changelog.filteredrevs
2059 2062 counts = {}
2060 2063 for instability, revset in instabilitytypes:
2061 2064 counts[instability] = len(
2062 2065 set(obsolete.getrevs(repo, revset)) - filtered
2063 2066 )
2064 2067 return counts
2065 2068
2066 2069 oldinstabilitycounts = getinstabilitycounts(repo)
2067 2070
2068 2071 @reportsummary
2069 2072 def reportnewinstabilities(repo, tr):
2070 2073 newinstabilitycounts = getinstabilitycounts(repo)
2071 2074 for instability, revset in instabilitytypes:
2072 2075 delta = (
2073 2076 newinstabilitycounts[instability]
2074 2077 - oldinstabilitycounts[instability]
2075 2078 )
2076 2079 msg = getinstabilitymessage(delta, instability)
2077 2080 if msg:
2078 2081 repo.ui.warn(msg)
2079 2082
2080 2083 if txmatch(_reportnewcssource):
2081 2084
2082 2085 @reportsummary
2083 2086 def reportnewcs(repo, tr):
2084 2087 """Report the range of new revisions pulled/unbundled."""
2085 2088 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2086 2089 unfi = repo.unfiltered()
2087 2090 if origrepolen >= len(unfi):
2088 2091 return
2089 2092
2090 2093 # Compute the bounds of new visible revisions' range.
2091 2094 revs = smartset.spanset(repo, start=origrepolen)
2092 2095 if revs:
2093 2096 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2094 2097
2095 2098 if minrev == maxrev:
2096 2099 revrange = minrev
2097 2100 else:
2098 2101 revrange = b'%s:%s' % (minrev, maxrev)
2099 2102 draft = len(repo.revs(b'%ld and draft()', revs))
2100 2103 secret = len(repo.revs(b'%ld and secret()', revs))
2101 2104 if not (draft or secret):
2102 2105 msg = _(b'new changesets %s\n') % revrange
2103 2106 elif draft and secret:
2104 2107 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2105 2108 msg %= (revrange, draft, secret)
2106 2109 elif draft:
2107 2110 msg = _(b'new changesets %s (%d drafts)\n')
2108 2111 msg %= (revrange, draft)
2109 2112 elif secret:
2110 2113 msg = _(b'new changesets %s (%d secrets)\n')
2111 2114 msg %= (revrange, secret)
2112 2115 else:
2113 2116 errormsg = b'entered unreachable condition'
2114 2117 raise error.ProgrammingError(errormsg)
2115 2118 repo.ui.status(msg)
2116 2119
2117 2120 # search new changesets directly pulled as obsolete
2118 2121 duplicates = tr.changes.get(b'revduplicates', ())
2119 2122 obsadded = unfi.revs(
2120 2123 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2121 2124 )
2122 2125 cl = repo.changelog
2123 2126 extinctadded = [r for r in obsadded if r not in cl]
2124 2127 if extinctadded:
2125 2128 # They are not just obsolete, but obsolete and invisible
2126 2129 # we call them "extinct" internally but the terms have not been
2127 2130 # exposed to users.
2128 2131 msg = b'(%d other changesets obsolete on arrival)\n'
2129 2132 repo.ui.status(msg % len(extinctadded))
2130 2133
2131 2134 @reportsummary
2132 2135 def reportphasechanges(repo, tr):
2133 2136 """Report statistics of phase changes for changesets pre-existing
2134 2137 pull/unbundle.
2135 2138 """
2136 2139 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2137 2140 published = []
2138 2141 for revs, (old, new) in tr.changes.get(b'phases', []):
2139 2142 if new != phases.public:
2140 2143 continue
2141 2144 published.extend(rev for rev in revs if rev < origrepolen)
2142 2145 if not published:
2143 2146 return
2144 2147 msg = _(b'%d local changesets published\n')
2145 2148 if as_validator:
2146 2149 msg = _(b'%d local changesets will be published\n')
2147 2150 repo.ui.status(msg % len(published))
2148 2151
2149 2152
2150 2153 def getinstabilitymessage(delta, instability):
2151 2154 """function to return the message to show warning about new instabilities
2152 2155
2153 2156 exists as a separate function so that extension can wrap to show more
2154 2157 information like how to fix instabilities"""
2155 2158 if delta > 0:
2156 2159 return _(b'%i new %s changesets\n') % (delta, instability)
2157 2160
2158 2161
2159 2162 def nodesummaries(repo, nodes, maxnumnodes=4):
2160 2163 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2161 2164 return b' '.join(short(h) for h in nodes)
2162 2165 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2163 2166 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2164 2167
2165 2168
2166 2169 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2167 2170 """check that no named branch has multiple heads"""
2168 2171 if desc in (b'strip', b'repair'):
2169 2172 # skip the logic during strip
2170 2173 return
2171 2174 visible = repo.filtered(b'visible')
2172 2175 # possible improvement: we could restrict the check to affected branch
2173 2176 bm = visible.branchmap()
2174 2177 for name in bm:
2175 2178 heads = bm.branchheads(name, closed=accountclosed)
2176 2179 if len(heads) > 1:
2177 2180 msg = _(b'rejecting multiple heads on branch "%s"')
2178 2181 msg %= name
2179 2182 hint = _(b'%d heads: %s')
2180 2183 hint %= (len(heads), nodesummaries(repo, heads))
2181 2184 raise error.Abort(msg, hint=hint)
2182 2185
2183 2186
2184 2187 def wrapconvertsink(sink):
2185 2188 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2186 2189 before it is used, whether or not the convert extension was formally loaded.
2187 2190 """
2188 2191 return sink
2189 2192
2190 2193
2191 2194 def unhidehashlikerevs(repo, specs, hiddentype):
2192 2195 """parse the user specs and unhide changesets whose hash or revision number
2193 2196 is passed.
2194 2197
2195 2198 hiddentype can be: 1) 'warn': warn while unhiding changesets
2196 2199 2) 'nowarn': don't warn while unhiding changesets
2197 2200
2198 2201 returns a repo object with the required changesets unhidden
2199 2202 """
2200 2203 if not repo.filtername or not repo.ui.configbool(
2201 2204 b'experimental', b'directaccess'
2202 2205 ):
2203 2206 return repo
2204 2207
2205 2208 if repo.filtername not in (b'visible', b'visible-hidden'):
2206 2209 return repo
2207 2210
2208 2211 symbols = set()
2209 2212 for spec in specs:
2210 2213 try:
2211 2214 tree = revsetlang.parse(spec)
2212 2215 except error.ParseError: # will be reported by scmutil.revrange()
2213 2216 continue
2214 2217
2215 2218 symbols.update(revsetlang.gethashlikesymbols(tree))
2216 2219
2217 2220 if not symbols:
2218 2221 return repo
2219 2222
2220 2223 revs = _getrevsfromsymbols(repo, symbols)
2221 2224
2222 2225 if not revs:
2223 2226 return repo
2224 2227
2225 2228 if hiddentype == b'warn':
2226 2229 unfi = repo.unfiltered()
2227 2230 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2228 2231 repo.ui.warn(
2229 2232 _(
2230 2233 b"warning: accessing hidden changesets for write "
2231 2234 b"operation: %s\n"
2232 2235 )
2233 2236 % revstr
2234 2237 )
2235 2238
2236 2239 # we have to use new filtername to separate branch/tags cache until we can
2237 2240 # disbale these cache when revisions are dynamically pinned.
2238 2241 return repo.filtered(b'visible-hidden', revs)
2239 2242
2240 2243
2241 2244 def _getrevsfromsymbols(repo, symbols):
2242 2245 """parse the list of symbols and returns a set of revision numbers of hidden
2243 2246 changesets present in symbols"""
2244 2247 revs = set()
2245 2248 unfi = repo.unfiltered()
2246 2249 unficl = unfi.changelog
2247 2250 cl = repo.changelog
2248 2251 tiprev = len(unficl)
2249 2252 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2250 2253 for s in symbols:
2251 2254 try:
2252 2255 n = int(s)
2253 2256 if n <= tiprev:
2254 2257 if not allowrevnums:
2255 2258 continue
2256 2259 else:
2257 2260 if n not in cl:
2258 2261 revs.add(n)
2259 2262 continue
2260 2263 except ValueError:
2261 2264 pass
2262 2265
2263 2266 try:
2264 2267 s = resolvehexnodeidprefix(unfi, s)
2265 2268 except (error.LookupError, error.WdirUnsupported):
2266 2269 s = None
2267 2270
2268 2271 if s is not None:
2269 2272 rev = unficl.rev(s)
2270 2273 if rev not in cl:
2271 2274 revs.add(rev)
2272 2275
2273 2276 return revs
2274 2277
2275 2278
2276 2279 def bookmarkrevs(repo, mark):
2277 2280 """
2278 2281 Select revisions reachable by a given bookmark
2279 2282 """
2280 2283 return repo.revs(
2281 2284 b"ancestors(bookmark(%s)) - "
2282 2285 b"ancestors(head() and not bookmark(%s)) - "
2283 2286 b"ancestors(bookmark() and not bookmark(%s))",
2284 2287 mark,
2285 2288 mark,
2286 2289 mark,
2287 2290 )
@@ -1,455 +1,455 b''
1 1 # worker.py - master-slave parallelism support
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import signal
13 13 import sys
14 14 import threading
15 15 import time
16 16
17 17 try:
18 18 import selectors
19 19
20 20 selectors.BaseSelector
21 21 except ImportError:
22 22 from .thirdparty import selectors2 as selectors
23 23
24 24 from .i18n import _
25 25 from . import (
26 26 encoding,
27 27 error,
28 28 pycompat,
29 29 scmutil,
30 30 util,
31 31 )
32 32
33 33
34 34 def countcpus():
35 35 '''try to count the number of CPUs on the system'''
36 36
37 37 # posix
38 38 try:
39 39 n = int(os.sysconf('SC_NPROCESSORS_ONLN'))
40 40 if n > 0:
41 41 return n
42 42 except (AttributeError, ValueError):
43 43 pass
44 44
45 45 # windows
46 46 try:
47 47 n = int(encoding.environ[b'NUMBER_OF_PROCESSORS'])
48 48 if n > 0:
49 49 return n
50 50 except (KeyError, ValueError):
51 51 pass
52 52
53 53 return 1
54 54
55 55
56 56 def _numworkers(ui):
57 57 s = ui.config(b'worker', b'numcpus')
58 58 if s:
59 59 try:
60 60 n = int(s)
61 61 if n >= 1:
62 62 return n
63 63 except ValueError:
64 64 raise error.Abort(_(b'number of cpus must be an integer'))
65 65 return min(max(countcpus(), 4), 32)
66 66
67 67
68 68 if pycompat.ispy3:
69 69
70 70 class _blockingreader(object):
71 71 def __init__(self, wrapped):
72 72 self._wrapped = wrapped
73 73
74 74 # Do NOT implement readinto() by making it delegate to
75 75 # _wrapped.readinto(), since that is unbuffered. The unpickler is fine
76 76 # with just read() and readline(), so we don't need to implement it.
77 77
78 78 def readline(self):
79 79 return self._wrapped.readline()
80 80
81 81 # issue multiple reads until size is fulfilled
82 82 def read(self, size=-1):
83 83 if size < 0:
84 84 return self._wrapped.readall()
85 85
86 86 buf = bytearray(size)
87 87 view = memoryview(buf)
88 88 pos = 0
89 89
90 90 while pos < size:
91 91 ret = self._wrapped.readinto(view[pos:])
92 92 if not ret:
93 93 break
94 94 pos += ret
95 95
96 96 del view
97 97 del buf[pos:]
98 98 return bytes(buf)
99 99
100 100
101 101 else:
102 102
103 103 def _blockingreader(wrapped):
104 104 return wrapped
105 105
106 106
107 107 if pycompat.isposix or pycompat.iswindows:
108 108 _STARTUP_COST = 0.01
109 109 # The Windows worker is thread based. If tasks are CPU bound, threads
110 110 # in the presence of the GIL result in excessive context switching and
111 111 # this overhead can slow down execution.
112 112 _DISALLOW_THREAD_UNSAFE = pycompat.iswindows
113 113 else:
114 114 _STARTUP_COST = 1e30
115 115 _DISALLOW_THREAD_UNSAFE = False
116 116
117 117
118 118 def worthwhile(ui, costperop, nops, threadsafe=True):
119 119 '''try to determine whether the benefit of multiple processes can
120 120 outweigh the cost of starting them'''
121 121
122 122 if not threadsafe and _DISALLOW_THREAD_UNSAFE:
123 123 return False
124 124
125 125 linear = costperop * nops
126 126 workers = _numworkers(ui)
127 127 benefit = linear - (_STARTUP_COST * workers + linear / workers)
128 128 return benefit >= 0.15
129 129
130 130
131 131 def worker(
132 132 ui, costperarg, func, staticargs, args, hasretval=False, threadsafe=True
133 133 ):
134 134 '''run a function, possibly in parallel in multiple worker
135 135 processes.
136 136
137 137 returns a progress iterator
138 138
139 139 costperarg - cost of a single task
140 140
141 141 func - function to run. It is expected to return a progress iterator.
142 142
143 143 staticargs - arguments to pass to every invocation of the function
144 144
145 145 args - arguments to split into chunks, to pass to individual
146 146 workers
147 147
148 148 hasretval - when True, func and the current function return an progress
149 149 iterator then a dict (encoded as an iterator that yield many (False, ..)
150 150 then a (True, dict)). The dicts are joined in some arbitrary order, so
151 151 overlapping keys are a bad idea.
152 152
153 153 threadsafe - whether work items are thread safe and can be executed using
154 154 a thread-based worker. Should be disabled for CPU heavy tasks that don't
155 155 release the GIL.
156 156 '''
157 157 enabled = ui.configbool(b'worker', b'enabled')
158 158 if enabled and worthwhile(ui, costperarg, len(args), threadsafe=threadsafe):
159 159 return _platformworker(ui, func, staticargs, args, hasretval)
160 160 return func(*staticargs + (args,))
161 161
162 162
163 163 def _posixworker(ui, func, staticargs, args, hasretval):
164 164 workers = _numworkers(ui)
165 165 oldhandler = signal.getsignal(signal.SIGINT)
166 166 signal.signal(signal.SIGINT, signal.SIG_IGN)
167 167 pids, problem = set(), [0]
168 168
169 169 def killworkers():
170 170 # unregister SIGCHLD handler as all children will be killed. This
171 171 # function shouldn't be interrupted by another SIGCHLD; otherwise pids
172 172 # could be updated while iterating, which would cause inconsistency.
173 173 signal.signal(signal.SIGCHLD, oldchldhandler)
174 174 # if one worker bails, there's no good reason to wait for the rest
175 175 for p in pids:
176 176 try:
177 177 os.kill(p, signal.SIGTERM)
178 178 except OSError as err:
179 179 if err.errno != errno.ESRCH:
180 180 raise
181 181
182 182 def waitforworkers(blocking=True):
183 183 for pid in pids.copy():
184 184 p = st = 0
185 185 while True:
186 186 try:
187 187 p, st = os.waitpid(pid, (0 if blocking else os.WNOHANG))
188 188 break
189 189 except OSError as e:
190 190 if e.errno == errno.EINTR:
191 191 continue
192 192 elif e.errno == errno.ECHILD:
193 193 # child would already be reaped, but pids yet been
194 194 # updated (maybe interrupted just after waitpid)
195 195 pids.discard(pid)
196 196 break
197 197 else:
198 198 raise
199 199 if not p:
200 200 # skip subsequent steps, because child process should
201 201 # be still running in this case
202 202 continue
203 203 pids.discard(p)
204 204 st = _exitstatus(st)
205 205 if st and not problem[0]:
206 206 problem[0] = st
207 207
208 208 def sigchldhandler(signum, frame):
209 209 waitforworkers(blocking=False)
210 210 if problem[0]:
211 211 killworkers()
212 212
213 213 oldchldhandler = signal.signal(signal.SIGCHLD, sigchldhandler)
214 214 ui.flush()
215 215 parentpid = os.getpid()
216 216 pipes = []
217 217 retval = {}
218 218 for pargs in partition(args, min(workers, len(args))):
219 219 # Every worker gets its own pipe to send results on, so we don't have to
220 220 # implement atomic writes larger than PIPE_BUF. Each forked process has
221 221 # its own pipe's descriptors in the local variables, and the parent
222 222 # process has the full list of pipe descriptors (and it doesn't really
223 223 # care what order they're in).
224 224 rfd, wfd = os.pipe()
225 225 pipes.append((rfd, wfd))
226 226 # make sure we use os._exit in all worker code paths. otherwise the
227 227 # worker may do some clean-ups which could cause surprises like
228 228 # deadlock. see sshpeer.cleanup for example.
229 229 # override error handling *before* fork. this is necessary because
230 230 # exception (signal) may arrive after fork, before "pid =" assignment
231 231 # completes, and other exception handler (dispatch.py) can lead to
232 232 # unexpected code path without os._exit.
233 233 ret = -1
234 234 try:
235 235 pid = os.fork()
236 236 if pid == 0:
237 237 signal.signal(signal.SIGINT, oldhandler)
238 238 signal.signal(signal.SIGCHLD, oldchldhandler)
239 239
240 240 def workerfunc():
241 241 for r, w in pipes[:-1]:
242 242 os.close(r)
243 243 os.close(w)
244 244 os.close(rfd)
245 245 for result in func(*(staticargs + (pargs,))):
246 246 os.write(wfd, util.pickle.dumps(result))
247 247 return 0
248 248
249 249 ret = scmutil.callcatch(ui, workerfunc)
250 250 except: # parent re-raises, child never returns
251 251 if os.getpid() == parentpid:
252 252 raise
253 253 exctype = sys.exc_info()[0]
254 254 force = not issubclass(exctype, KeyboardInterrupt)
255 255 ui.traceback(force=force)
256 256 finally:
257 257 if os.getpid() != parentpid:
258 258 try:
259 259 ui.flush()
260 260 except: # never returns, no re-raises
261 261 pass
262 262 finally:
263 263 os._exit(ret & 255)
264 264 pids.add(pid)
265 265 selector = selectors.DefaultSelector()
266 266 for rfd, wfd in pipes:
267 267 os.close(wfd)
268 268 selector.register(os.fdopen(rfd, 'rb', 0), selectors.EVENT_READ)
269 269
270 270 def cleanup():
271 271 signal.signal(signal.SIGINT, oldhandler)
272 272 waitforworkers()
273 273 signal.signal(signal.SIGCHLD, oldchldhandler)
274 274 selector.close()
275 275 return problem[0]
276 276
277 277 try:
278 278 openpipes = len(pipes)
279 279 while openpipes > 0:
280 280 for key, events in selector.select():
281 281 try:
282 282 res = util.pickle.load(_blockingreader(key.fileobj))
283 283 if hasretval and res[0]:
284 284 retval.update(res[1])
285 285 else:
286 286 yield res
287 287 except EOFError:
288 288 selector.unregister(key.fileobj)
289 289 key.fileobj.close()
290 290 openpipes -= 1
291 291 except IOError as e:
292 292 if e.errno == errno.EINTR:
293 293 continue
294 294 raise
295 295 except: # re-raises
296 296 killworkers()
297 297 cleanup()
298 298 raise
299 299 status = cleanup()
300 300 if status:
301 301 if status < 0:
302 302 os.kill(os.getpid(), -status)
303 sys.exit(status)
303 raise error.WorkerError(status)
304 304 if hasretval:
305 305 yield True, retval
306 306
307 307
308 308 def _posixexitstatus(code):
309 309 '''convert a posix exit status into the same form returned by
310 310 os.spawnv
311 311
312 312 returns None if the process was stopped instead of exiting'''
313 313 if os.WIFEXITED(code):
314 314 return os.WEXITSTATUS(code)
315 315 elif os.WIFSIGNALED(code):
316 316 return -(os.WTERMSIG(code))
317 317
318 318
319 319 def _windowsworker(ui, func, staticargs, args, hasretval):
320 320 class Worker(threading.Thread):
321 321 def __init__(
322 322 self, taskqueue, resultqueue, func, staticargs, *args, **kwargs
323 323 ):
324 324 threading.Thread.__init__(self, *args, **kwargs)
325 325 self._taskqueue = taskqueue
326 326 self._resultqueue = resultqueue
327 327 self._func = func
328 328 self._staticargs = staticargs
329 329 self._interrupted = False
330 330 self.daemon = True
331 331 self.exception = None
332 332
333 333 def interrupt(self):
334 334 self._interrupted = True
335 335
336 336 def run(self):
337 337 try:
338 338 while not self._taskqueue.empty():
339 339 try:
340 340 args = self._taskqueue.get_nowait()
341 341 for res in self._func(*self._staticargs + (args,)):
342 342 self._resultqueue.put(res)
343 343 # threading doesn't provide a native way to
344 344 # interrupt execution. handle it manually at every
345 345 # iteration.
346 346 if self._interrupted:
347 347 return
348 348 except pycompat.queue.Empty:
349 349 break
350 350 except Exception as e:
351 351 # store the exception such that the main thread can resurface
352 352 # it as if the func was running without workers.
353 353 self.exception = e
354 354 raise
355 355
356 356 threads = []
357 357
358 358 def trykillworkers():
359 359 # Allow up to 1 second to clean worker threads nicely
360 360 cleanupend = time.time() + 1
361 361 for t in threads:
362 362 t.interrupt()
363 363 for t in threads:
364 364 remainingtime = cleanupend - time.time()
365 365 t.join(remainingtime)
366 366 if t.is_alive():
367 367 # pass over the workers joining failure. it is more
368 368 # important to surface the inital exception than the
369 369 # fact that one of workers may be processing a large
370 370 # task and does not get to handle the interruption.
371 371 ui.warn(
372 372 _(
373 373 b"failed to kill worker threads while "
374 374 b"handling an exception\n"
375 375 )
376 376 )
377 377 return
378 378
379 379 workers = _numworkers(ui)
380 380 resultqueue = pycompat.queue.Queue()
381 381 taskqueue = pycompat.queue.Queue()
382 382 retval = {}
383 383 # partition work to more pieces than workers to minimize the chance
384 384 # of uneven distribution of large tasks between the workers
385 385 for pargs in partition(args, workers * 20):
386 386 taskqueue.put(pargs)
387 387 for _i in range(workers):
388 388 t = Worker(taskqueue, resultqueue, func, staticargs)
389 389 threads.append(t)
390 390 t.start()
391 391 try:
392 392 while len(threads) > 0:
393 393 while not resultqueue.empty():
394 394 res = resultqueue.get()
395 395 if hasretval and res[0]:
396 396 retval.update(res[1])
397 397 else:
398 398 yield res
399 399 threads[0].join(0.05)
400 400 finishedthreads = [_t for _t in threads if not _t.is_alive()]
401 401 for t in finishedthreads:
402 402 if t.exception is not None:
403 403 raise t.exception
404 404 threads.remove(t)
405 405 except (Exception, KeyboardInterrupt): # re-raises
406 406 trykillworkers()
407 407 raise
408 408 while not resultqueue.empty():
409 409 res = resultqueue.get()
410 410 if hasretval and res[0]:
411 411 retval.update(res[1])
412 412 else:
413 413 yield res
414 414 if hasretval:
415 415 yield True, retval
416 416
417 417
418 418 if pycompat.iswindows:
419 419 _platformworker = _windowsworker
420 420 else:
421 421 _platformworker = _posixworker
422 422 _exitstatus = _posixexitstatus
423 423
424 424
425 425 def partition(lst, nslices):
426 426 '''partition a list into N slices of roughly equal size
427 427
428 428 The current strategy takes every Nth element from the input. If
429 429 we ever write workers that need to preserve grouping in input
430 430 we should consider allowing callers to specify a partition strategy.
431 431
432 432 mpm is not a fan of this partitioning strategy when files are involved.
433 433 In his words:
434 434
435 435 Single-threaded Mercurial makes a point of creating and visiting
436 436 files in a fixed order (alphabetical). When creating files in order,
437 437 a typical filesystem is likely to allocate them on nearby regions on
438 438 disk. Thus, when revisiting in the same order, locality is maximized
439 439 and various forms of OS and disk-level caching and read-ahead get a
440 440 chance to work.
441 441
442 442 This effect can be quite significant on spinning disks. I discovered it
443 443 circa Mercurial v0.4 when revlogs were named by hashes of filenames.
444 444 Tarring a repo and copying it to another disk effectively randomized
445 445 the revlog ordering on disk by sorting the revlogs by hash and suddenly
446 446 performance of my kernel checkout benchmark dropped by ~10x because the
447 447 "working set" of sectors visited no longer fit in the drive's cache and
448 448 the workload switched from streaming to random I/O.
449 449
450 450 What we should really be doing is have workers read filenames from a
451 451 ordered queue. This preserves locality and also keeps any worker from
452 452 getting more than one file out of balance.
453 453 '''
454 454 for i in range(nslices):
455 455 yield lst[i::nslices]
@@ -1,165 +1,166 b''
1 1 Test UI worker interaction
2 2
3 3 $ cat > t.py <<EOF
4 4 > from __future__ import absolute_import, print_function
5 5 > import sys
6 6 > import time
7 7 > from mercurial import (
8 8 > error,
9 9 > registrar,
10 10 > ui as uimod,
11 11 > worker,
12 12 > )
13 13 > sys.unraisablehook = lambda x: None
14 14 > def abort(ui, args):
15 15 > if args[0] == 0:
16 16 > # by first worker for test stability
17 17 > raise error.Abort(b'known exception')
18 18 > return runme(ui, [])
19 19 > def exc(ui, args):
20 20 > if args[0] == 0:
21 21 > # by first worker for test stability
22 22 > raise Exception('unknown exception')
23 23 > return runme(ui, [])
24 24 > def runme(ui, args):
25 25 > for arg in args:
26 26 > ui.status(b'run\n')
27 27 > yield 1, arg
28 28 > time.sleep(0.1) # easier to trigger killworkers code path
29 29 > functable = {
30 30 > b'abort': abort,
31 31 > b'exc': exc,
32 32 > b'runme': runme,
33 33 > }
34 34 > cmdtable = {}
35 35 > command = registrar.command(cmdtable)
36 36 > @command(b'test', [], b'hg test [COST] [FUNC]')
37 37 > def t(ui, repo, cost=1.0, func=b'runme'):
38 38 > cost = float(cost)
39 39 > func = functable[func]
40 40 > ui.status(b'start\n')
41 41 > runs = worker.worker(ui, cost, func, (ui,), range(8))
42 42 > for n, i in runs:
43 43 > pass
44 44 > ui.status(b'done\n')
45 45 > EOF
46 46 $ abspath=`pwd`/t.py
47 47 $ hg init
48 48
49 49 Run tests with worker enable by forcing a heigh cost
50 50
51 51 $ hg --config "extensions.t=$abspath" test 100000.0
52 52 start
53 53 run
54 54 run
55 55 run
56 56 run
57 57 run
58 58 run
59 59 run
60 60 run
61 61 done
62 62
63 63 Run tests without worker by forcing a low cost
64 64
65 65 $ hg --config "extensions.t=$abspath" test 0.0000001
66 66 start
67 67 run
68 68 run
69 69 run
70 70 run
71 71 run
72 72 run
73 73 run
74 74 run
75 75 done
76 76
77 77 #if no-windows
78 78
79 79 Known exception should be caught, but printed if --traceback is enabled
80 80
81 81 $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=8' \
82 82 > test 100000.0 abort 2>&1
83 83 start
84 84 abort: known exception
85 85 [255]
86 86
87 87 $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=8' \
88 > test 100000.0 abort --traceback 2>&1 | egrep '(SystemExit|Abort)'
88 > test 100000.0 abort --traceback 2>&1 | egrep '(WorkerError|Abort)'
89 89 raise error.Abort(b'known exception')
90 90 mercurial.error.Abort: known exception (py3 !)
91 91 Abort: known exception (no-py3 !)
92 SystemExit: 255
92 raise error.WorkerError(status)
93 mercurial.error.WorkerError: 255
93 94
94 95 Traceback must be printed for unknown exceptions
95 96
96 97 $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=8' \
97 98 > test 100000.0 exc 2>&1 | grep '^Exception'
98 99 Exception: unknown exception
99 100
100 101 Workers should not do cleanups in all cases
101 102
102 103 $ cat > $TESTTMP/detectcleanup.py <<EOF
103 104 > from __future__ import absolute_import
104 105 > import atexit
105 106 > import os
106 107 > import sys
107 108 > import time
108 109 > sys.unraisablehook = lambda x: None
109 110 > oldfork = os.fork
110 111 > count = 0
111 112 > parentpid = os.getpid()
112 113 > def delayedfork():
113 114 > global count
114 115 > count += 1
115 116 > pid = oldfork()
116 117 > # make it easier to test SIGTERM hitting other workers when they have
117 118 > # not set up error handling yet.
118 119 > if count > 1 and pid == 0:
119 120 > time.sleep(0.1)
120 121 > return pid
121 122 > os.fork = delayedfork
122 123 > def cleanup():
123 124 > if os.getpid() != parentpid:
124 125 > os.write(1, 'should never happen\n')
125 126 > atexit.register(cleanup)
126 127 > EOF
127 128
128 129 $ hg --config "extensions.t=$abspath" --config worker.numcpus=8 --config \
129 130 > "extensions.d=$TESTTMP/detectcleanup.py" test 100000 abort
130 131 start
131 132 abort: known exception
132 133 [255]
133 134
134 135 Do not crash on partially read result
135 136
136 137 $ cat > $TESTTMP/detecttruncated.py <<EOF
137 138 > from __future__ import absolute_import
138 139 > import os
139 140 > import sys
140 141 > import time
141 142 > sys.unraisablehook = lambda x: None
142 143 > oldwrite = os.write
143 144 > def splitwrite(fd, string):
144 145 > ret = oldwrite(fd, string[:9])
145 146 > if ret == 9:
146 147 > time.sleep(0.1)
147 148 > ret += oldwrite(fd, string[9:])
148 149 > return ret
149 150 > os.write = splitwrite
150 151 > EOF
151 152
152 153 $ hg --config "extensions.t=$abspath" --config worker.numcpus=8 --config \
153 154 > "extensions.d=$TESTTMP/detecttruncated.py" test 100000.0
154 155 start
155 156 run
156 157 run
157 158 run
158 159 run
159 160 run
160 161 run
161 162 run
162 163 run
163 164 done
164 165
165 166 #endif
General Comments 0
You need to be logged in to leave comments. Login now