##// END OF EJS Templates
scmutil: add option to register summary callbacks as transaction validators...
Pulkit Goyal -
r45032:13da36d7 default
parent child Browse files
Show More
@@ -1,2202 +1,2214 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28 from .pycompat import getattr
29 29 from .thirdparty import attr
30 30 from . import (
31 31 copies as copiesmod,
32 32 encoding,
33 33 error,
34 34 match as matchmod,
35 35 obsolete,
36 36 obsutil,
37 37 pathutil,
38 38 phases,
39 39 policy,
40 40 pycompat,
41 41 revsetlang,
42 42 similar,
43 43 smartset,
44 44 url,
45 45 util,
46 46 vfs,
47 47 )
48 48
49 49 from .utils import (
50 50 hashutil,
51 51 procutil,
52 52 stringutil,
53 53 )
54 54
55 55 if pycompat.iswindows:
56 56 from . import scmwindows as scmplatform
57 57 else:
58 58 from . import scmposix as scmplatform
59 59
60 60 parsers = policy.importmod('parsers')
61 61 rustrevlog = policy.importrust('revlog')
62 62
63 63 termsize = scmplatform.termsize
64 64
65 65
66 66 @attr.s(slots=True, repr=False)
67 67 class status(object):
68 68 '''Struct with a list of files per status.
69 69
70 70 The 'deleted', 'unknown' and 'ignored' properties are only
71 71 relevant to the working copy.
72 72 '''
73 73
74 74 modified = attr.ib(default=attr.Factory(list))
75 75 added = attr.ib(default=attr.Factory(list))
76 76 removed = attr.ib(default=attr.Factory(list))
77 77 deleted = attr.ib(default=attr.Factory(list))
78 78 unknown = attr.ib(default=attr.Factory(list))
79 79 ignored = attr.ib(default=attr.Factory(list))
80 80 clean = attr.ib(default=attr.Factory(list))
81 81
82 82 def __iter__(self):
83 83 yield self.modified
84 84 yield self.added
85 85 yield self.removed
86 86 yield self.deleted
87 87 yield self.unknown
88 88 yield self.ignored
89 89 yield self.clean
90 90
91 91 def __repr__(self):
92 92 return (
93 93 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
94 94 r'unknown=%s, ignored=%s, clean=%s>'
95 95 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
96 96
97 97
98 98 def itersubrepos(ctx1, ctx2):
99 99 """find subrepos in ctx1 or ctx2"""
100 100 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 101 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 102 # has been modified (in ctx2) but not yet committed (in ctx1).
103 103 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 104 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105 105
106 106 missing = set()
107 107
108 108 for subpath in ctx2.substate:
109 109 if subpath not in ctx1.substate:
110 110 del subpaths[subpath]
111 111 missing.add(subpath)
112 112
113 113 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
114 114 yield subpath, ctx.sub(subpath)
115 115
116 116 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 117 # status and diff will have an accurate result when it does
118 118 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 119 # against itself.
120 120 for subpath in missing:
121 121 yield subpath, ctx2.nullsub(subpath, ctx1)
122 122
123 123
124 124 def nochangesfound(ui, repo, excluded=None):
125 125 '''Report no changes for push/pull, excluded is None or a list of
126 126 nodes excluded from the push/pull.
127 127 '''
128 128 secretlist = []
129 129 if excluded:
130 130 for n in excluded:
131 131 ctx = repo[n]
132 132 if ctx.phase() >= phases.secret and not ctx.extinct():
133 133 secretlist.append(n)
134 134
135 135 if secretlist:
136 136 ui.status(
137 137 _(b"no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist)
139 139 )
140 140 else:
141 141 ui.status(_(b"no changes found\n"))
142 142
143 143
144 144 def callcatch(ui, func):
145 145 """call func() with global exception handling
146 146
147 147 return func() if no exception happens. otherwise do some error handling
148 148 and return an exit code accordingly. does not handle all exceptions.
149 149 """
150 150 try:
151 151 try:
152 152 return func()
153 153 except: # re-raises
154 154 ui.traceback()
155 155 raise
156 156 # Global exception handling, alphabetically
157 157 # Mercurial-specific first, followed by built-in and library exceptions
158 158 except error.LockHeld as inst:
159 159 if inst.errno == errno.ETIMEDOUT:
160 160 reason = _(b'timed out waiting for lock held by %r') % (
161 161 pycompat.bytestr(inst.locker)
162 162 )
163 163 else:
164 164 reason = _(b'lock held by %r') % inst.locker
165 165 ui.error(
166 166 _(b"abort: %s: %s\n")
167 167 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
168 168 )
169 169 if not inst.locker:
170 170 ui.error(_(b"(lock might be very busy)\n"))
171 171 except error.LockUnavailable as inst:
172 172 ui.error(
173 173 _(b"abort: could not lock %s: %s\n")
174 174 % (
175 175 inst.desc or stringutil.forcebytestr(inst.filename),
176 176 encoding.strtolocal(inst.strerror),
177 177 )
178 178 )
179 179 except error.OutOfBandError as inst:
180 180 if inst.args:
181 181 msg = _(b"abort: remote error:\n")
182 182 else:
183 183 msg = _(b"abort: remote error\n")
184 184 ui.error(msg)
185 185 if inst.args:
186 186 ui.error(b''.join(inst.args))
187 187 if inst.hint:
188 188 ui.error(b'(%s)\n' % inst.hint)
189 189 except error.RepoError as inst:
190 190 ui.error(_(b"abort: %s!\n") % inst)
191 191 if inst.hint:
192 192 ui.error(_(b"(%s)\n") % inst.hint)
193 193 except error.ResponseError as inst:
194 194 ui.error(_(b"abort: %s") % inst.args[0])
195 195 msg = inst.args[1]
196 196 if isinstance(msg, type(u'')):
197 197 msg = pycompat.sysbytes(msg)
198 198 if not isinstance(msg, bytes):
199 199 ui.error(b" %r\n" % (msg,))
200 200 elif not msg:
201 201 ui.error(_(b" empty string\n"))
202 202 else:
203 203 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
204 204 except error.CensoredNodeError as inst:
205 205 ui.error(_(b"abort: file censored %s!\n") % inst)
206 206 except error.StorageError as inst:
207 207 ui.error(_(b"abort: %s!\n") % inst)
208 208 if inst.hint:
209 209 ui.error(_(b"(%s)\n") % inst.hint)
210 210 except error.InterventionRequired as inst:
211 211 ui.error(b"%s\n" % inst)
212 212 if inst.hint:
213 213 ui.error(_(b"(%s)\n") % inst.hint)
214 214 return 1
215 215 except error.WdirUnsupported:
216 216 ui.error(_(b"abort: working directory revision cannot be specified\n"))
217 217 except error.Abort as inst:
218 218 ui.error(_(b"abort: %s\n") % inst)
219 219 if inst.hint:
220 220 ui.error(_(b"(%s)\n") % inst.hint)
221 221 except ImportError as inst:
222 222 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
223 223 m = stringutil.forcebytestr(inst).split()[-1]
224 224 if m in b"mpatch bdiff".split():
225 225 ui.error(_(b"(did you forget to compile extensions?)\n"))
226 226 elif m in b"zlib".split():
227 227 ui.error(_(b"(is your Python install correct?)\n"))
228 228 except (IOError, OSError) as inst:
229 229 if util.safehasattr(inst, b"code"): # HTTPError
230 230 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
231 231 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
232 232 try: # usually it is in the form (errno, strerror)
233 233 reason = inst.reason.args[1]
234 234 except (AttributeError, IndexError):
235 235 # it might be anything, for example a string
236 236 reason = inst.reason
237 237 if isinstance(reason, pycompat.unicode):
238 238 # SSLError of Python 2.7.9 contains a unicode
239 239 reason = encoding.unitolocal(reason)
240 240 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
241 241 elif (
242 242 util.safehasattr(inst, b"args")
243 243 and inst.args
244 244 and inst.args[0] == errno.EPIPE
245 245 ):
246 246 pass
247 247 elif getattr(inst, "strerror", None): # common IOError or OSError
248 248 if getattr(inst, "filename", None) is not None:
249 249 ui.error(
250 250 _(b"abort: %s: '%s'\n")
251 251 % (
252 252 encoding.strtolocal(inst.strerror),
253 253 stringutil.forcebytestr(inst.filename),
254 254 )
255 255 )
256 256 else:
257 257 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
258 258 else: # suspicious IOError
259 259 raise
260 260 except MemoryError:
261 261 ui.error(_(b"abort: out of memory\n"))
262 262 except SystemExit as inst:
263 263 # Commands shouldn't sys.exit directly, but give a return code.
264 264 # Just in case catch this and and pass exit code to caller.
265 265 return inst.code
266 266
267 267 return -1
268 268
269 269
270 270 def checknewlabel(repo, lbl, kind):
271 271 # Do not use the "kind" parameter in ui output.
272 272 # It makes strings difficult to translate.
273 273 if lbl in [b'tip', b'.', b'null']:
274 274 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
275 275 for c in (b':', b'\0', b'\n', b'\r'):
276 276 if c in lbl:
277 277 raise error.Abort(
278 278 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
279 279 )
280 280 try:
281 281 int(lbl)
282 282 raise error.Abort(_(b"cannot use an integer as a name"))
283 283 except ValueError:
284 284 pass
285 285 if lbl.strip() != lbl:
286 286 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
287 287
288 288
289 289 def checkfilename(f):
290 290 '''Check that the filename f is an acceptable filename for a tracked file'''
291 291 if b'\r' in f or b'\n' in f:
292 292 raise error.Abort(
293 293 _(b"'\\n' and '\\r' disallowed in filenames: %r")
294 294 % pycompat.bytestr(f)
295 295 )
296 296
297 297
298 298 def checkportable(ui, f):
299 299 '''Check if filename f is portable and warn or abort depending on config'''
300 300 checkfilename(f)
301 301 abort, warn = checkportabilityalert(ui)
302 302 if abort or warn:
303 303 msg = util.checkwinfilename(f)
304 304 if msg:
305 305 msg = b"%s: %s" % (msg, procutil.shellquote(f))
306 306 if abort:
307 307 raise error.Abort(msg)
308 308 ui.warn(_(b"warning: %s\n") % msg)
309 309
310 310
311 311 def checkportabilityalert(ui):
312 312 '''check if the user's config requests nothing, a warning, or abort for
313 313 non-portable filenames'''
314 314 val = ui.config(b'ui', b'portablefilenames')
315 315 lval = val.lower()
316 316 bval = stringutil.parsebool(val)
317 317 abort = pycompat.iswindows or lval == b'abort'
318 318 warn = bval or lval == b'warn'
319 319 if bval is None and not (warn or abort or lval == b'ignore'):
320 320 raise error.ConfigError(
321 321 _(b"ui.portablefilenames value is invalid ('%s')") % val
322 322 )
323 323 return abort, warn
324 324
325 325
326 326 class casecollisionauditor(object):
327 327 def __init__(self, ui, abort, dirstate):
328 328 self._ui = ui
329 329 self._abort = abort
330 330 allfiles = b'\0'.join(dirstate)
331 331 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
332 332 self._dirstate = dirstate
333 333 # The purpose of _newfiles is so that we don't complain about
334 334 # case collisions if someone were to call this object with the
335 335 # same filename twice.
336 336 self._newfiles = set()
337 337
338 338 def __call__(self, f):
339 339 if f in self._newfiles:
340 340 return
341 341 fl = encoding.lower(f)
342 342 if fl in self._loweredfiles and f not in self._dirstate:
343 343 msg = _(b'possible case-folding collision for %s') % f
344 344 if self._abort:
345 345 raise error.Abort(msg)
346 346 self._ui.warn(_(b"warning: %s\n") % msg)
347 347 self._loweredfiles.add(fl)
348 348 self._newfiles.add(f)
349 349
350 350
351 351 def filteredhash(repo, maxrev):
352 352 """build hash of filtered revisions in the current repoview.
353 353
354 354 Multiple caches perform up-to-date validation by checking that the
355 355 tiprev and tipnode stored in the cache file match the current repository.
356 356 However, this is not sufficient for validating repoviews because the set
357 357 of revisions in the view may change without the repository tiprev and
358 358 tipnode changing.
359 359
360 360 This function hashes all the revs filtered from the view and returns
361 361 that SHA-1 digest.
362 362 """
363 363 cl = repo.changelog
364 364 if not cl.filteredrevs:
365 365 return None
366 366 key = None
367 367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
368 368 if revs:
369 369 s = hashutil.sha1()
370 370 for rev in revs:
371 371 s.update(b'%d;' % rev)
372 372 key = s.digest()
373 373 return key
374 374
375 375
376 376 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
377 377 '''yield every hg repository under path, always recursively.
378 378 The recurse flag will only control recursion into repo working dirs'''
379 379
380 380 def errhandler(err):
381 381 if err.filename == path:
382 382 raise err
383 383
384 384 samestat = getattr(os.path, 'samestat', None)
385 385 if followsym and samestat is not None:
386 386
387 387 def adddir(dirlst, dirname):
388 388 dirstat = os.stat(dirname)
389 389 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
390 390 if not match:
391 391 dirlst.append(dirstat)
392 392 return not match
393 393
394 394 else:
395 395 followsym = False
396 396
397 397 if (seen_dirs is None) and followsym:
398 398 seen_dirs = []
399 399 adddir(seen_dirs, path)
400 400 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
401 401 dirs.sort()
402 402 if b'.hg' in dirs:
403 403 yield root # found a repository
404 404 qroot = os.path.join(root, b'.hg', b'patches')
405 405 if os.path.isdir(os.path.join(qroot, b'.hg')):
406 406 yield qroot # we have a patch queue repo here
407 407 if recurse:
408 408 # avoid recursing inside the .hg directory
409 409 dirs.remove(b'.hg')
410 410 else:
411 411 dirs[:] = [] # don't descend further
412 412 elif followsym:
413 413 newdirs = []
414 414 for d in dirs:
415 415 fname = os.path.join(root, d)
416 416 if adddir(seen_dirs, fname):
417 417 if os.path.islink(fname):
418 418 for hgname in walkrepos(fname, True, seen_dirs):
419 419 yield hgname
420 420 else:
421 421 newdirs.append(d)
422 422 dirs[:] = newdirs
423 423
424 424
425 425 def binnode(ctx):
426 426 """Return binary node id for a given basectx"""
427 427 node = ctx.node()
428 428 if node is None:
429 429 return wdirid
430 430 return node
431 431
432 432
433 433 def intrev(ctx):
434 434 """Return integer for a given basectx that can be used in comparison or
435 435 arithmetic operation"""
436 436 rev = ctx.rev()
437 437 if rev is None:
438 438 return wdirrev
439 439 return rev
440 440
441 441
442 442 def formatchangeid(ctx):
443 443 """Format changectx as '{rev}:{node|formatnode}', which is the default
444 444 template provided by logcmdutil.changesettemplater"""
445 445 repo = ctx.repo()
446 446 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
447 447
448 448
449 449 def formatrevnode(ui, rev, node):
450 450 """Format given revision and node depending on the current verbosity"""
451 451 if ui.debugflag:
452 452 hexfunc = hex
453 453 else:
454 454 hexfunc = short
455 455 return b'%d:%s' % (rev, hexfunc(node))
456 456
457 457
458 458 def resolvehexnodeidprefix(repo, prefix):
459 459 if prefix.startswith(b'x') and repo.ui.configbool(
460 460 b'experimental', b'revisions.prefixhexnode'
461 461 ):
462 462 prefix = prefix[1:]
463 463 try:
464 464 # Uses unfiltered repo because it's faster when prefix is ambiguous/
465 465 # This matches the shortesthexnodeidprefix() function below.
466 466 node = repo.unfiltered().changelog._partialmatch(prefix)
467 467 except error.AmbiguousPrefixLookupError:
468 468 revset = repo.ui.config(
469 469 b'experimental', b'revisions.disambiguatewithin'
470 470 )
471 471 if revset:
472 472 # Clear config to avoid infinite recursion
473 473 configoverrides = {
474 474 (b'experimental', b'revisions.disambiguatewithin'): None
475 475 }
476 476 with repo.ui.configoverride(configoverrides):
477 477 revs = repo.anyrevs([revset], user=True)
478 478 matches = []
479 479 for rev in revs:
480 480 node = repo.changelog.node(rev)
481 481 if hex(node).startswith(prefix):
482 482 matches.append(node)
483 483 if len(matches) == 1:
484 484 return matches[0]
485 485 raise
486 486 if node is None:
487 487 return
488 488 repo.changelog.rev(node) # make sure node isn't filtered
489 489 return node
490 490
491 491
492 492 def mayberevnum(repo, prefix):
493 493 """Checks if the given prefix may be mistaken for a revision number"""
494 494 try:
495 495 i = int(prefix)
496 496 # if we are a pure int, then starting with zero will not be
497 497 # confused as a rev; or, obviously, if the int is larger
498 498 # than the value of the tip rev. We still need to disambiguate if
499 499 # prefix == '0', since that *is* a valid revnum.
500 500 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
501 501 return False
502 502 return True
503 503 except ValueError:
504 504 return False
505 505
506 506
507 507 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
508 508 """Find the shortest unambiguous prefix that matches hexnode.
509 509
510 510 If "cache" is not None, it must be a dictionary that can be used for
511 511 caching between calls to this method.
512 512 """
513 513 # _partialmatch() of filtered changelog could take O(len(repo)) time,
514 514 # which would be unacceptably slow. so we look for hash collision in
515 515 # unfiltered space, which means some hashes may be slightly longer.
516 516
517 517 minlength = max(minlength, 1)
518 518
519 519 def disambiguate(prefix):
520 520 """Disambiguate against revnums."""
521 521 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
522 522 if mayberevnum(repo, prefix):
523 523 return b'x' + prefix
524 524 else:
525 525 return prefix
526 526
527 527 hexnode = hex(node)
528 528 for length in range(len(prefix), len(hexnode) + 1):
529 529 prefix = hexnode[:length]
530 530 if not mayberevnum(repo, prefix):
531 531 return prefix
532 532
533 533 cl = repo.unfiltered().changelog
534 534 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
535 535 if revset:
536 536 revs = None
537 537 if cache is not None:
538 538 revs = cache.get(b'disambiguationrevset')
539 539 if revs is None:
540 540 revs = repo.anyrevs([revset], user=True)
541 541 if cache is not None:
542 542 cache[b'disambiguationrevset'] = revs
543 543 if cl.rev(node) in revs:
544 544 hexnode = hex(node)
545 545 nodetree = None
546 546 if cache is not None:
547 547 nodetree = cache.get(b'disambiguationnodetree')
548 548 if not nodetree:
549 549 if util.safehasattr(parsers, 'nodetree'):
550 550 # The CExt is the only implementation to provide a nodetree
551 551 # class so far.
552 552 index = cl.index
553 553 if util.safehasattr(index, 'get_cindex'):
554 554 # the rust wrapped need to give access to its internal index
555 555 index = index.get_cindex()
556 556 nodetree = parsers.nodetree(index, len(revs))
557 557 for r in revs:
558 558 nodetree.insert(r)
559 559 if cache is not None:
560 560 cache[b'disambiguationnodetree'] = nodetree
561 561 if nodetree is not None:
562 562 length = max(nodetree.shortest(node), minlength)
563 563 prefix = hexnode[:length]
564 564 return disambiguate(prefix)
565 565 for length in range(minlength, len(hexnode) + 1):
566 566 matches = []
567 567 prefix = hexnode[:length]
568 568 for rev in revs:
569 569 otherhexnode = repo[rev].hex()
570 570 if prefix == otherhexnode[:length]:
571 571 matches.append(otherhexnode)
572 572 if len(matches) == 1:
573 573 return disambiguate(prefix)
574 574
575 575 try:
576 576 return disambiguate(cl.shortest(node, minlength))
577 577 except error.LookupError:
578 578 raise error.RepoLookupError()
579 579
580 580
581 581 def isrevsymbol(repo, symbol):
582 582 """Checks if a symbol exists in the repo.
583 583
584 584 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
585 585 symbol is an ambiguous nodeid prefix.
586 586 """
587 587 try:
588 588 revsymbol(repo, symbol)
589 589 return True
590 590 except error.RepoLookupError:
591 591 return False
592 592
593 593
594 594 def revsymbol(repo, symbol):
595 595 """Returns a context given a single revision symbol (as string).
596 596
597 597 This is similar to revsingle(), but accepts only a single revision symbol,
598 598 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
599 599 not "max(public())".
600 600 """
601 601 if not isinstance(symbol, bytes):
602 602 msg = (
603 603 b"symbol (%s of type %s) was not a string, did you mean "
604 604 b"repo[symbol]?" % (symbol, type(symbol))
605 605 )
606 606 raise error.ProgrammingError(msg)
607 607 try:
608 608 if symbol in (b'.', b'tip', b'null'):
609 609 return repo[symbol]
610 610
611 611 try:
612 612 r = int(symbol)
613 613 if b'%d' % r != symbol:
614 614 raise ValueError
615 615 l = len(repo.changelog)
616 616 if r < 0:
617 617 r += l
618 618 if r < 0 or r >= l and r != wdirrev:
619 619 raise ValueError
620 620 return repo[r]
621 621 except error.FilteredIndexError:
622 622 raise
623 623 except (ValueError, OverflowError, IndexError):
624 624 pass
625 625
626 626 if len(symbol) == 40:
627 627 try:
628 628 node = bin(symbol)
629 629 rev = repo.changelog.rev(node)
630 630 return repo[rev]
631 631 except error.FilteredLookupError:
632 632 raise
633 633 except (TypeError, LookupError):
634 634 pass
635 635
636 636 # look up bookmarks through the name interface
637 637 try:
638 638 node = repo.names.singlenode(repo, symbol)
639 639 rev = repo.changelog.rev(node)
640 640 return repo[rev]
641 641 except KeyError:
642 642 pass
643 643
644 644 node = resolvehexnodeidprefix(repo, symbol)
645 645 if node is not None:
646 646 rev = repo.changelog.rev(node)
647 647 return repo[rev]
648 648
649 649 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
650 650
651 651 except error.WdirUnsupported:
652 652 return repo[None]
653 653 except (
654 654 error.FilteredIndexError,
655 655 error.FilteredLookupError,
656 656 error.FilteredRepoLookupError,
657 657 ):
658 658 raise _filterederror(repo, symbol)
659 659
660 660
661 661 def _filterederror(repo, changeid):
662 662 """build an exception to be raised about a filtered changeid
663 663
664 664 This is extracted in a function to help extensions (eg: evolve) to
665 665 experiment with various message variants."""
666 666 if repo.filtername.startswith(b'visible'):
667 667
668 668 # Check if the changeset is obsolete
669 669 unfilteredrepo = repo.unfiltered()
670 670 ctx = revsymbol(unfilteredrepo, changeid)
671 671
672 672 # If the changeset is obsolete, enrich the message with the reason
673 673 # that made this changeset not visible
674 674 if ctx.obsolete():
675 675 msg = obsutil._getfilteredreason(repo, changeid, ctx)
676 676 else:
677 677 msg = _(b"hidden revision '%s'") % changeid
678 678
679 679 hint = _(b'use --hidden to access hidden revisions')
680 680
681 681 return error.FilteredRepoLookupError(msg, hint=hint)
682 682 msg = _(b"filtered revision '%s' (not in '%s' subset)")
683 683 msg %= (changeid, repo.filtername)
684 684 return error.FilteredRepoLookupError(msg)
685 685
686 686
687 687 def revsingle(repo, revspec, default=b'.', localalias=None):
688 688 if not revspec and revspec != 0:
689 689 return repo[default]
690 690
691 691 l = revrange(repo, [revspec], localalias=localalias)
692 692 if not l:
693 693 raise error.Abort(_(b'empty revision set'))
694 694 return repo[l.last()]
695 695
696 696
697 697 def _pairspec(revspec):
698 698 tree = revsetlang.parse(revspec)
699 699 return tree and tree[0] in (
700 700 b'range',
701 701 b'rangepre',
702 702 b'rangepost',
703 703 b'rangeall',
704 704 )
705 705
706 706
707 707 def revpair(repo, revs):
708 708 if not revs:
709 709 return repo[b'.'], repo[None]
710 710
711 711 l = revrange(repo, revs)
712 712
713 713 if not l:
714 714 raise error.Abort(_(b'empty revision range'))
715 715
716 716 first = l.first()
717 717 second = l.last()
718 718
719 719 if (
720 720 first == second
721 721 and len(revs) >= 2
722 722 and not all(revrange(repo, [r]) for r in revs)
723 723 ):
724 724 raise error.Abort(_(b'empty revision on one side of range'))
725 725
726 726 # if top-level is range expression, the result must always be a pair
727 727 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
728 728 return repo[first], repo[None]
729 729
730 730 return repo[first], repo[second]
731 731
732 732
733 733 def revrange(repo, specs, localalias=None):
734 734 """Execute 1 to many revsets and return the union.
735 735
736 736 This is the preferred mechanism for executing revsets using user-specified
737 737 config options, such as revset aliases.
738 738
739 739 The revsets specified by ``specs`` will be executed via a chained ``OR``
740 740 expression. If ``specs`` is empty, an empty result is returned.
741 741
742 742 ``specs`` can contain integers, in which case they are assumed to be
743 743 revision numbers.
744 744
745 745 It is assumed the revsets are already formatted. If you have arguments
746 746 that need to be expanded in the revset, call ``revsetlang.formatspec()``
747 747 and pass the result as an element of ``specs``.
748 748
749 749 Specifying a single revset is allowed.
750 750
751 751 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
752 752 integer revisions.
753 753 """
754 754 allspecs = []
755 755 for spec in specs:
756 756 if isinstance(spec, int):
757 757 spec = revsetlang.formatspec(b'%d', spec)
758 758 allspecs.append(spec)
759 759 return repo.anyrevs(allspecs, user=True, localalias=localalias)
760 760
761 761
762 762 def meaningfulparents(repo, ctx):
763 763 """Return list of meaningful (or all if debug) parentrevs for rev.
764 764
765 765 For merges (two non-nullrev revisions) both parents are meaningful.
766 766 Otherwise the first parent revision is considered meaningful if it
767 767 is not the preceding revision.
768 768 """
769 769 parents = ctx.parents()
770 770 if len(parents) > 1:
771 771 return parents
772 772 if repo.ui.debugflag:
773 773 return [parents[0], repo[nullrev]]
774 774 if parents[0].rev() >= intrev(ctx) - 1:
775 775 return []
776 776 return parents
777 777
778 778
779 779 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
780 780 """Return a function that produced paths for presenting to the user.
781 781
782 782 The returned function takes a repo-relative path and produces a path
783 783 that can be presented in the UI.
784 784
785 785 Depending on the value of ui.relative-paths, either a repo-relative or
786 786 cwd-relative path will be produced.
787 787
788 788 legacyrelativevalue is the value to use if ui.relative-paths=legacy
789 789
790 790 If forcerelativevalue is not None, then that value will be used regardless
791 791 of what ui.relative-paths is set to.
792 792 """
793 793 if forcerelativevalue is not None:
794 794 relative = forcerelativevalue
795 795 else:
796 796 config = repo.ui.config(b'ui', b'relative-paths')
797 797 if config == b'legacy':
798 798 relative = legacyrelativevalue
799 799 else:
800 800 relative = stringutil.parsebool(config)
801 801 if relative is None:
802 802 raise error.ConfigError(
803 803 _(b"ui.relative-paths is not a boolean ('%s')") % config
804 804 )
805 805
806 806 if relative:
807 807 cwd = repo.getcwd()
808 808 pathto = repo.pathto
809 809 return lambda f: pathto(f, cwd)
810 810 elif repo.ui.configbool(b'ui', b'slash'):
811 811 return lambda f: f
812 812 else:
813 813 return util.localpath
814 814
815 815
816 816 def subdiruipathfn(subpath, uipathfn):
817 817 '''Create a new uipathfn that treats the file as relative to subpath.'''
818 818 return lambda f: uipathfn(posixpath.join(subpath, f))
819 819
820 820
821 821 def anypats(pats, opts):
822 822 '''Checks if any patterns, including --include and --exclude were given.
823 823
824 824 Some commands (e.g. addremove) use this condition for deciding whether to
825 825 print absolute or relative paths.
826 826 '''
827 827 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
828 828
829 829
830 830 def expandpats(pats):
831 831 '''Expand bare globs when running on windows.
832 832 On posix we assume it already has already been done by sh.'''
833 833 if not util.expandglobs:
834 834 return list(pats)
835 835 ret = []
836 836 for kindpat in pats:
837 837 kind, pat = matchmod._patsplit(kindpat, None)
838 838 if kind is None:
839 839 try:
840 840 globbed = glob.glob(pat)
841 841 except re.error:
842 842 globbed = [pat]
843 843 if globbed:
844 844 ret.extend(globbed)
845 845 continue
846 846 ret.append(kindpat)
847 847 return ret
848 848
849 849
850 850 def matchandpats(
851 851 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
852 852 ):
853 853 '''Return a matcher and the patterns that were used.
854 854 The matcher will warn about bad matches, unless an alternate badfn callback
855 855 is provided.'''
856 856 if opts is None:
857 857 opts = {}
858 858 if not globbed and default == b'relpath':
859 859 pats = expandpats(pats or [])
860 860
861 861 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
862 862
863 863 def bad(f, msg):
864 864 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
865 865
866 866 if badfn is None:
867 867 badfn = bad
868 868
869 869 m = ctx.match(
870 870 pats,
871 871 opts.get(b'include'),
872 872 opts.get(b'exclude'),
873 873 default,
874 874 listsubrepos=opts.get(b'subrepos'),
875 875 badfn=badfn,
876 876 )
877 877
878 878 if m.always():
879 879 pats = []
880 880 return m, pats
881 881
882 882
883 883 def match(
884 884 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
885 885 ):
886 886 '''Return a matcher that will warn about bad matches.'''
887 887 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
888 888
889 889
890 890 def matchall(repo):
891 891 '''Return a matcher that will efficiently match everything.'''
892 892 return matchmod.always()
893 893
894 894
895 895 def matchfiles(repo, files, badfn=None):
896 896 '''Return a matcher that will efficiently match exactly these files.'''
897 897 return matchmod.exact(files, badfn=badfn)
898 898
899 899
900 900 def parsefollowlinespattern(repo, rev, pat, msg):
901 901 """Return a file name from `pat` pattern suitable for usage in followlines
902 902 logic.
903 903 """
904 904 if not matchmod.patkind(pat):
905 905 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
906 906 else:
907 907 ctx = repo[rev]
908 908 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
909 909 files = [f for f in ctx if m(f)]
910 910 if len(files) != 1:
911 911 raise error.ParseError(msg)
912 912 return files[0]
913 913
914 914
915 915 def getorigvfs(ui, repo):
916 916 """return a vfs suitable to save 'orig' file
917 917
918 918 return None if no special directory is configured"""
919 919 origbackuppath = ui.config(b'ui', b'origbackuppath')
920 920 if not origbackuppath:
921 921 return None
922 922 return vfs.vfs(repo.wvfs.join(origbackuppath))
923 923
924 924
925 925 def backuppath(ui, repo, filepath):
926 926 '''customize where working copy backup files (.orig files) are created
927 927
928 928 Fetch user defined path from config file: [ui] origbackuppath = <path>
929 929 Fall back to default (filepath with .orig suffix) if not specified
930 930
931 931 filepath is repo-relative
932 932
933 933 Returns an absolute path
934 934 '''
935 935 origvfs = getorigvfs(ui, repo)
936 936 if origvfs is None:
937 937 return repo.wjoin(filepath + b".orig")
938 938
939 939 origbackupdir = origvfs.dirname(filepath)
940 940 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
941 941 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
942 942
943 943 # Remove any files that conflict with the backup file's path
944 944 for f in reversed(list(pathutil.finddirs(filepath))):
945 945 if origvfs.isfileorlink(f):
946 946 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
947 947 origvfs.unlink(f)
948 948 break
949 949
950 950 origvfs.makedirs(origbackupdir)
951 951
952 952 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
953 953 ui.note(
954 954 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
955 955 )
956 956 origvfs.rmtree(filepath, forcibly=True)
957 957
958 958 return origvfs.join(filepath)
959 959
960 960
961 961 class _containsnode(object):
962 962 """proxy __contains__(node) to container.__contains__ which accepts revs"""
963 963
964 964 def __init__(self, repo, revcontainer):
965 965 self._torev = repo.changelog.rev
966 966 self._revcontains = revcontainer.__contains__
967 967
968 968 def __contains__(self, node):
969 969 return self._revcontains(self._torev(node))
970 970
971 971
972 972 def cleanupnodes(
973 973 repo,
974 974 replacements,
975 975 operation,
976 976 moves=None,
977 977 metadata=None,
978 978 fixphase=False,
979 979 targetphase=None,
980 980 backup=True,
981 981 ):
982 982 """do common cleanups when old nodes are replaced by new nodes
983 983
984 984 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
985 985 (we might also want to move working directory parent in the future)
986 986
987 987 By default, bookmark moves are calculated automatically from 'replacements',
988 988 but 'moves' can be used to override that. Also, 'moves' may include
989 989 additional bookmark moves that should not have associated obsmarkers.
990 990
991 991 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
992 992 have replacements. operation is a string, like "rebase".
993 993
994 994 metadata is dictionary containing metadata to be stored in obsmarker if
995 995 obsolescence is enabled.
996 996 """
997 997 assert fixphase or targetphase is None
998 998 if not replacements and not moves:
999 999 return
1000 1000
1001 1001 # translate mapping's other forms
1002 1002 if not util.safehasattr(replacements, b'items'):
1003 1003 replacements = {(n,): () for n in replacements}
1004 1004 else:
1005 1005 # upgrading non tuple "source" to tuple ones for BC
1006 1006 repls = {}
1007 1007 for key, value in replacements.items():
1008 1008 if not isinstance(key, tuple):
1009 1009 key = (key,)
1010 1010 repls[key] = value
1011 1011 replacements = repls
1012 1012
1013 1013 # Unfiltered repo is needed since nodes in replacements might be hidden.
1014 1014 unfi = repo.unfiltered()
1015 1015
1016 1016 # Calculate bookmark movements
1017 1017 if moves is None:
1018 1018 moves = {}
1019 1019 for oldnodes, newnodes in replacements.items():
1020 1020 for oldnode in oldnodes:
1021 1021 if oldnode in moves:
1022 1022 continue
1023 1023 if len(newnodes) > 1:
1024 1024 # usually a split, take the one with biggest rev number
1025 1025 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1026 1026 elif len(newnodes) == 0:
1027 1027 # move bookmark backwards
1028 1028 allreplaced = []
1029 1029 for rep in replacements:
1030 1030 allreplaced.extend(rep)
1031 1031 roots = list(
1032 1032 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1033 1033 )
1034 1034 if roots:
1035 1035 newnode = roots[0].node()
1036 1036 else:
1037 1037 newnode = nullid
1038 1038 else:
1039 1039 newnode = newnodes[0]
1040 1040 moves[oldnode] = newnode
1041 1041
1042 1042 allnewnodes = [n for ns in replacements.values() for n in ns]
1043 1043 toretract = {}
1044 1044 toadvance = {}
1045 1045 if fixphase:
1046 1046 precursors = {}
1047 1047 for oldnodes, newnodes in replacements.items():
1048 1048 for oldnode in oldnodes:
1049 1049 for newnode in newnodes:
1050 1050 precursors.setdefault(newnode, []).append(oldnode)
1051 1051
1052 1052 allnewnodes.sort(key=lambda n: unfi[n].rev())
1053 1053 newphases = {}
1054 1054
1055 1055 def phase(ctx):
1056 1056 return newphases.get(ctx.node(), ctx.phase())
1057 1057
1058 1058 for newnode in allnewnodes:
1059 1059 ctx = unfi[newnode]
1060 1060 parentphase = max(phase(p) for p in ctx.parents())
1061 1061 if targetphase is None:
1062 1062 oldphase = max(
1063 1063 unfi[oldnode].phase() for oldnode in precursors[newnode]
1064 1064 )
1065 1065 newphase = max(oldphase, parentphase)
1066 1066 else:
1067 1067 newphase = max(targetphase, parentphase)
1068 1068 newphases[newnode] = newphase
1069 1069 if newphase > ctx.phase():
1070 1070 toretract.setdefault(newphase, []).append(newnode)
1071 1071 elif newphase < ctx.phase():
1072 1072 toadvance.setdefault(newphase, []).append(newnode)
1073 1073
1074 1074 with repo.transaction(b'cleanup') as tr:
1075 1075 # Move bookmarks
1076 1076 bmarks = repo._bookmarks
1077 1077 bmarkchanges = []
1078 1078 for oldnode, newnode in moves.items():
1079 1079 oldbmarks = repo.nodebookmarks(oldnode)
1080 1080 if not oldbmarks:
1081 1081 continue
1082 1082 from . import bookmarks # avoid import cycle
1083 1083
1084 1084 repo.ui.debug(
1085 1085 b'moving bookmarks %r from %s to %s\n'
1086 1086 % (
1087 1087 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1088 1088 hex(oldnode),
1089 1089 hex(newnode),
1090 1090 )
1091 1091 )
1092 1092 # Delete divergent bookmarks being parents of related newnodes
1093 1093 deleterevs = repo.revs(
1094 1094 b'parents(roots(%ln & (::%n))) - parents(%n)',
1095 1095 allnewnodes,
1096 1096 newnode,
1097 1097 oldnode,
1098 1098 )
1099 1099 deletenodes = _containsnode(repo, deleterevs)
1100 1100 for name in oldbmarks:
1101 1101 bmarkchanges.append((name, newnode))
1102 1102 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1103 1103 bmarkchanges.append((b, None))
1104 1104
1105 1105 if bmarkchanges:
1106 1106 bmarks.applychanges(repo, tr, bmarkchanges)
1107 1107
1108 1108 for phase, nodes in toretract.items():
1109 1109 phases.retractboundary(repo, tr, phase, nodes)
1110 1110 for phase, nodes in toadvance.items():
1111 1111 phases.advanceboundary(repo, tr, phase, nodes)
1112 1112
1113 1113 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1114 1114 # Obsolete or strip nodes
1115 1115 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1116 1116 # If a node is already obsoleted, and we want to obsolete it
1117 1117 # without a successor, skip that obssolete request since it's
1118 1118 # unnecessary. That's the "if s or not isobs(n)" check below.
1119 1119 # Also sort the node in topology order, that might be useful for
1120 1120 # some obsstore logic.
1121 1121 # NOTE: the sorting might belong to createmarkers.
1122 1122 torev = unfi.changelog.rev
1123 1123 sortfunc = lambda ns: torev(ns[0][0])
1124 1124 rels = []
1125 1125 for ns, s in sorted(replacements.items(), key=sortfunc):
1126 1126 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1127 1127 rels.append(rel)
1128 1128 if rels:
1129 1129 obsolete.createmarkers(
1130 1130 repo, rels, operation=operation, metadata=metadata
1131 1131 )
1132 1132 elif phases.supportinternal(repo) and mayusearchived:
1133 1133 # this assume we do not have "unstable" nodes above the cleaned ones
1134 1134 allreplaced = set()
1135 1135 for ns in replacements.keys():
1136 1136 allreplaced.update(ns)
1137 1137 if backup:
1138 1138 from . import repair # avoid import cycle
1139 1139
1140 1140 node = min(allreplaced, key=repo.changelog.rev)
1141 1141 repair.backupbundle(
1142 1142 repo, allreplaced, allreplaced, node, operation
1143 1143 )
1144 1144 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1145 1145 else:
1146 1146 from . import repair # avoid import cycle
1147 1147
1148 1148 tostrip = list(n for ns in replacements for n in ns)
1149 1149 if tostrip:
1150 1150 repair.delayedstrip(
1151 1151 repo.ui, repo, tostrip, operation, backup=backup
1152 1152 )
1153 1153
1154 1154
1155 1155 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1156 1156 if opts is None:
1157 1157 opts = {}
1158 1158 m = matcher
1159 1159 dry_run = opts.get(b'dry_run')
1160 1160 try:
1161 1161 similarity = float(opts.get(b'similarity') or 0)
1162 1162 except ValueError:
1163 1163 raise error.Abort(_(b'similarity must be a number'))
1164 1164 if similarity < 0 or similarity > 100:
1165 1165 raise error.Abort(_(b'similarity must be between 0 and 100'))
1166 1166 similarity /= 100.0
1167 1167
1168 1168 ret = 0
1169 1169
1170 1170 wctx = repo[None]
1171 1171 for subpath in sorted(wctx.substate):
1172 1172 submatch = matchmod.subdirmatcher(subpath, m)
1173 1173 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1174 1174 sub = wctx.sub(subpath)
1175 1175 subprefix = repo.wvfs.reljoin(prefix, subpath)
1176 1176 subuipathfn = subdiruipathfn(subpath, uipathfn)
1177 1177 try:
1178 1178 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1179 1179 ret = 1
1180 1180 except error.LookupError:
1181 1181 repo.ui.status(
1182 1182 _(b"skipping missing subrepository: %s\n")
1183 1183 % uipathfn(subpath)
1184 1184 )
1185 1185
1186 1186 rejected = []
1187 1187
1188 1188 def badfn(f, msg):
1189 1189 if f in m.files():
1190 1190 m.bad(f, msg)
1191 1191 rejected.append(f)
1192 1192
1193 1193 badmatch = matchmod.badmatch(m, badfn)
1194 1194 added, unknown, deleted, removed, forgotten = _interestingfiles(
1195 1195 repo, badmatch
1196 1196 )
1197 1197
1198 1198 unknownset = set(unknown + forgotten)
1199 1199 toprint = unknownset.copy()
1200 1200 toprint.update(deleted)
1201 1201 for abs in sorted(toprint):
1202 1202 if repo.ui.verbose or not m.exact(abs):
1203 1203 if abs in unknownset:
1204 1204 status = _(b'adding %s\n') % uipathfn(abs)
1205 1205 label = b'ui.addremove.added'
1206 1206 else:
1207 1207 status = _(b'removing %s\n') % uipathfn(abs)
1208 1208 label = b'ui.addremove.removed'
1209 1209 repo.ui.status(status, label=label)
1210 1210
1211 1211 renames = _findrenames(
1212 1212 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1213 1213 )
1214 1214
1215 1215 if not dry_run:
1216 1216 _markchanges(repo, unknown + forgotten, deleted, renames)
1217 1217
1218 1218 for f in rejected:
1219 1219 if f in m.files():
1220 1220 return 1
1221 1221 return ret
1222 1222
1223 1223
1224 1224 def marktouched(repo, files, similarity=0.0):
1225 1225 '''Assert that files have somehow been operated upon. files are relative to
1226 1226 the repo root.'''
1227 1227 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1228 1228 rejected = []
1229 1229
1230 1230 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1231 1231
1232 1232 if repo.ui.verbose:
1233 1233 unknownset = set(unknown + forgotten)
1234 1234 toprint = unknownset.copy()
1235 1235 toprint.update(deleted)
1236 1236 for abs in sorted(toprint):
1237 1237 if abs in unknownset:
1238 1238 status = _(b'adding %s\n') % abs
1239 1239 else:
1240 1240 status = _(b'removing %s\n') % abs
1241 1241 repo.ui.status(status)
1242 1242
1243 1243 # TODO: We should probably have the caller pass in uipathfn and apply it to
1244 1244 # the messages above too. legacyrelativevalue=True is consistent with how
1245 1245 # it used to work.
1246 1246 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1247 1247 renames = _findrenames(
1248 1248 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1249 1249 )
1250 1250
1251 1251 _markchanges(repo, unknown + forgotten, deleted, renames)
1252 1252
1253 1253 for f in rejected:
1254 1254 if f in m.files():
1255 1255 return 1
1256 1256 return 0
1257 1257
1258 1258
1259 1259 def _interestingfiles(repo, matcher):
1260 1260 '''Walk dirstate with matcher, looking for files that addremove would care
1261 1261 about.
1262 1262
1263 1263 This is different from dirstate.status because it doesn't care about
1264 1264 whether files are modified or clean.'''
1265 1265 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1266 1266 audit_path = pathutil.pathauditor(repo.root, cached=True)
1267 1267
1268 1268 ctx = repo[None]
1269 1269 dirstate = repo.dirstate
1270 1270 matcher = repo.narrowmatch(matcher, includeexact=True)
1271 1271 walkresults = dirstate.walk(
1272 1272 matcher,
1273 1273 subrepos=sorted(ctx.substate),
1274 1274 unknown=True,
1275 1275 ignored=False,
1276 1276 full=False,
1277 1277 )
1278 1278 for abs, st in pycompat.iteritems(walkresults):
1279 1279 dstate = dirstate[abs]
1280 1280 if dstate == b'?' and audit_path.check(abs):
1281 1281 unknown.append(abs)
1282 1282 elif dstate != b'r' and not st:
1283 1283 deleted.append(abs)
1284 1284 elif dstate == b'r' and st:
1285 1285 forgotten.append(abs)
1286 1286 # for finding renames
1287 1287 elif dstate == b'r' and not st:
1288 1288 removed.append(abs)
1289 1289 elif dstate == b'a':
1290 1290 added.append(abs)
1291 1291
1292 1292 return added, unknown, deleted, removed, forgotten
1293 1293
1294 1294
1295 1295 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1296 1296 '''Find renames from removed files to added ones.'''
1297 1297 renames = {}
1298 1298 if similarity > 0:
1299 1299 for old, new, score in similar.findrenames(
1300 1300 repo, added, removed, similarity
1301 1301 ):
1302 1302 if (
1303 1303 repo.ui.verbose
1304 1304 or not matcher.exact(old)
1305 1305 or not matcher.exact(new)
1306 1306 ):
1307 1307 repo.ui.status(
1308 1308 _(
1309 1309 b'recording removal of %s as rename to %s '
1310 1310 b'(%d%% similar)\n'
1311 1311 )
1312 1312 % (uipathfn(old), uipathfn(new), score * 100)
1313 1313 )
1314 1314 renames[new] = old
1315 1315 return renames
1316 1316
1317 1317
1318 1318 def _markchanges(repo, unknown, deleted, renames):
1319 1319 '''Marks the files in unknown as added, the files in deleted as removed,
1320 1320 and the files in renames as copied.'''
1321 1321 wctx = repo[None]
1322 1322 with repo.wlock():
1323 1323 wctx.forget(deleted)
1324 1324 wctx.add(unknown)
1325 1325 for new, old in pycompat.iteritems(renames):
1326 1326 wctx.copy(old, new)
1327 1327
1328 1328
1329 1329 def getrenamedfn(repo, endrev=None):
1330 1330 if copiesmod.usechangesetcentricalgo(repo):
1331 1331
1332 1332 def getrenamed(fn, rev):
1333 1333 ctx = repo[rev]
1334 1334 p1copies = ctx.p1copies()
1335 1335 if fn in p1copies:
1336 1336 return p1copies[fn]
1337 1337 p2copies = ctx.p2copies()
1338 1338 if fn in p2copies:
1339 1339 return p2copies[fn]
1340 1340 return None
1341 1341
1342 1342 return getrenamed
1343 1343
1344 1344 rcache = {}
1345 1345 if endrev is None:
1346 1346 endrev = len(repo)
1347 1347
1348 1348 def getrenamed(fn, rev):
1349 1349 '''looks up all renames for a file (up to endrev) the first
1350 1350 time the file is given. It indexes on the changerev and only
1351 1351 parses the manifest if linkrev != changerev.
1352 1352 Returns rename info for fn at changerev rev.'''
1353 1353 if fn not in rcache:
1354 1354 rcache[fn] = {}
1355 1355 fl = repo.file(fn)
1356 1356 for i in fl:
1357 1357 lr = fl.linkrev(i)
1358 1358 renamed = fl.renamed(fl.node(i))
1359 1359 rcache[fn][lr] = renamed and renamed[0]
1360 1360 if lr >= endrev:
1361 1361 break
1362 1362 if rev in rcache[fn]:
1363 1363 return rcache[fn][rev]
1364 1364
1365 1365 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1366 1366 # filectx logic.
1367 1367 try:
1368 1368 return repo[rev][fn].copysource()
1369 1369 except error.LookupError:
1370 1370 return None
1371 1371
1372 1372 return getrenamed
1373 1373
1374 1374
1375 1375 def getcopiesfn(repo, endrev=None):
1376 1376 if copiesmod.usechangesetcentricalgo(repo):
1377 1377
1378 1378 def copiesfn(ctx):
1379 1379 if ctx.p2copies():
1380 1380 allcopies = ctx.p1copies().copy()
1381 1381 # There should be no overlap
1382 1382 allcopies.update(ctx.p2copies())
1383 1383 return sorted(allcopies.items())
1384 1384 else:
1385 1385 return sorted(ctx.p1copies().items())
1386 1386
1387 1387 else:
1388 1388 getrenamed = getrenamedfn(repo, endrev)
1389 1389
1390 1390 def copiesfn(ctx):
1391 1391 copies = []
1392 1392 for fn in ctx.files():
1393 1393 rename = getrenamed(fn, ctx.rev())
1394 1394 if rename:
1395 1395 copies.append((fn, rename))
1396 1396 return copies
1397 1397
1398 1398 return copiesfn
1399 1399
1400 1400
1401 1401 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1402 1402 """Update the dirstate to reflect the intent of copying src to dst. For
1403 1403 different reasons it might not end with dst being marked as copied from src.
1404 1404 """
1405 1405 origsrc = repo.dirstate.copied(src) or src
1406 1406 if dst == origsrc: # copying back a copy?
1407 1407 if repo.dirstate[dst] not in b'mn' and not dryrun:
1408 1408 repo.dirstate.normallookup(dst)
1409 1409 else:
1410 1410 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1411 1411 if not ui.quiet:
1412 1412 ui.warn(
1413 1413 _(
1414 1414 b"%s has not been committed yet, so no copy "
1415 1415 b"data will be stored for %s.\n"
1416 1416 )
1417 1417 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1418 1418 )
1419 1419 if repo.dirstate[dst] in b'?r' and not dryrun:
1420 1420 wctx.add([dst])
1421 1421 elif not dryrun:
1422 1422 wctx.copy(origsrc, dst)
1423 1423
1424 1424
1425 1425 def movedirstate(repo, newctx, match=None):
1426 1426 """Move the dirstate to newctx and adjust it as necessary.
1427 1427
1428 1428 A matcher can be provided as an optimization. It is probably a bug to pass
1429 1429 a matcher that doesn't match all the differences between the parent of the
1430 1430 working copy and newctx.
1431 1431 """
1432 1432 oldctx = repo[b'.']
1433 1433 ds = repo.dirstate
1434 1434 copies = dict(ds.copies())
1435 1435 ds.setparents(newctx.node(), nullid)
1436 1436 s = newctx.status(oldctx, match=match)
1437 1437 for f in s.modified:
1438 1438 if ds[f] == b'r':
1439 1439 # modified + removed -> removed
1440 1440 continue
1441 1441 ds.normallookup(f)
1442 1442
1443 1443 for f in s.added:
1444 1444 if ds[f] == b'r':
1445 1445 # added + removed -> unknown
1446 1446 ds.drop(f)
1447 1447 elif ds[f] != b'a':
1448 1448 ds.add(f)
1449 1449
1450 1450 for f in s.removed:
1451 1451 if ds[f] == b'a':
1452 1452 # removed + added -> normal
1453 1453 ds.normallookup(f)
1454 1454 elif ds[f] != b'r':
1455 1455 ds.remove(f)
1456 1456
1457 1457 # Merge old parent and old working dir copies
1458 1458 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1459 1459 oldcopies.update(copies)
1460 1460 copies = {
1461 1461 dst: oldcopies.get(src, src)
1462 1462 for dst, src in pycompat.iteritems(oldcopies)
1463 1463 }
1464 1464 # Adjust the dirstate copies
1465 1465 for dst, src in pycompat.iteritems(copies):
1466 1466 if src not in newctx or dst in newctx or ds[dst] != b'a':
1467 1467 src = None
1468 1468 ds.copy(src, dst)
1469 1469 repo._quick_access_changeid_invalidate()
1470 1470
1471 1471
1472 1472 def writerequires(opener, requirements):
1473 1473 with opener(b'requires', b'w', atomictemp=True) as fp:
1474 1474 for r in sorted(requirements):
1475 1475 fp.write(b"%s\n" % r)
1476 1476
1477 1477
1478 1478 class filecachesubentry(object):
1479 1479 def __init__(self, path, stat):
1480 1480 self.path = path
1481 1481 self.cachestat = None
1482 1482 self._cacheable = None
1483 1483
1484 1484 if stat:
1485 1485 self.cachestat = filecachesubentry.stat(self.path)
1486 1486
1487 1487 if self.cachestat:
1488 1488 self._cacheable = self.cachestat.cacheable()
1489 1489 else:
1490 1490 # None means we don't know yet
1491 1491 self._cacheable = None
1492 1492
1493 1493 def refresh(self):
1494 1494 if self.cacheable():
1495 1495 self.cachestat = filecachesubentry.stat(self.path)
1496 1496
1497 1497 def cacheable(self):
1498 1498 if self._cacheable is not None:
1499 1499 return self._cacheable
1500 1500
1501 1501 # we don't know yet, assume it is for now
1502 1502 return True
1503 1503
1504 1504 def changed(self):
1505 1505 # no point in going further if we can't cache it
1506 1506 if not self.cacheable():
1507 1507 return True
1508 1508
1509 1509 newstat = filecachesubentry.stat(self.path)
1510 1510
1511 1511 # we may not know if it's cacheable yet, check again now
1512 1512 if newstat and self._cacheable is None:
1513 1513 self._cacheable = newstat.cacheable()
1514 1514
1515 1515 # check again
1516 1516 if not self._cacheable:
1517 1517 return True
1518 1518
1519 1519 if self.cachestat != newstat:
1520 1520 self.cachestat = newstat
1521 1521 return True
1522 1522 else:
1523 1523 return False
1524 1524
1525 1525 @staticmethod
1526 1526 def stat(path):
1527 1527 try:
1528 1528 return util.cachestat(path)
1529 1529 except OSError as e:
1530 1530 if e.errno != errno.ENOENT:
1531 1531 raise
1532 1532
1533 1533
1534 1534 class filecacheentry(object):
1535 1535 def __init__(self, paths, stat=True):
1536 1536 self._entries = []
1537 1537 for path in paths:
1538 1538 self._entries.append(filecachesubentry(path, stat))
1539 1539
1540 1540 def changed(self):
1541 1541 '''true if any entry has changed'''
1542 1542 for entry in self._entries:
1543 1543 if entry.changed():
1544 1544 return True
1545 1545 return False
1546 1546
1547 1547 def refresh(self):
1548 1548 for entry in self._entries:
1549 1549 entry.refresh()
1550 1550
1551 1551
1552 1552 class filecache(object):
1553 1553 """A property like decorator that tracks files under .hg/ for updates.
1554 1554
1555 1555 On first access, the files defined as arguments are stat()ed and the
1556 1556 results cached. The decorated function is called. The results are stashed
1557 1557 away in a ``_filecache`` dict on the object whose method is decorated.
1558 1558
1559 1559 On subsequent access, the cached result is used as it is set to the
1560 1560 instance dictionary.
1561 1561
1562 1562 On external property set/delete operations, the caller must update the
1563 1563 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1564 1564 instead of directly setting <attr>.
1565 1565
1566 1566 When using the property API, the cached data is always used if available.
1567 1567 No stat() is performed to check if the file has changed.
1568 1568
1569 1569 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1570 1570 can populate an entry before the property's getter is called. In this case,
1571 1571 entries in ``_filecache`` will be used during property operations,
1572 1572 if available. If the underlying file changes, it is up to external callers
1573 1573 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1574 1574 method result as well as possibly calling ``del obj._filecache[attr]`` to
1575 1575 remove the ``filecacheentry``.
1576 1576 """
1577 1577
1578 1578 def __init__(self, *paths):
1579 1579 self.paths = paths
1580 1580
1581 1581 def join(self, obj, fname):
1582 1582 """Used to compute the runtime path of a cached file.
1583 1583
1584 1584 Users should subclass filecache and provide their own version of this
1585 1585 function to call the appropriate join function on 'obj' (an instance
1586 1586 of the class that its member function was decorated).
1587 1587 """
1588 1588 raise NotImplementedError
1589 1589
1590 1590 def __call__(self, func):
1591 1591 self.func = func
1592 1592 self.sname = func.__name__
1593 1593 self.name = pycompat.sysbytes(self.sname)
1594 1594 return self
1595 1595
1596 1596 def __get__(self, obj, type=None):
1597 1597 # if accessed on the class, return the descriptor itself.
1598 1598 if obj is None:
1599 1599 return self
1600 1600
1601 1601 assert self.sname not in obj.__dict__
1602 1602
1603 1603 entry = obj._filecache.get(self.name)
1604 1604
1605 1605 if entry:
1606 1606 if entry.changed():
1607 1607 entry.obj = self.func(obj)
1608 1608 else:
1609 1609 paths = [self.join(obj, path) for path in self.paths]
1610 1610
1611 1611 # We stat -before- creating the object so our cache doesn't lie if
1612 1612 # a writer modified between the time we read and stat
1613 1613 entry = filecacheentry(paths, True)
1614 1614 entry.obj = self.func(obj)
1615 1615
1616 1616 obj._filecache[self.name] = entry
1617 1617
1618 1618 obj.__dict__[self.sname] = entry.obj
1619 1619 return entry.obj
1620 1620
1621 1621 # don't implement __set__(), which would make __dict__ lookup as slow as
1622 1622 # function call.
1623 1623
1624 1624 def set(self, obj, value):
1625 1625 if self.name not in obj._filecache:
1626 1626 # we add an entry for the missing value because X in __dict__
1627 1627 # implies X in _filecache
1628 1628 paths = [self.join(obj, path) for path in self.paths]
1629 1629 ce = filecacheentry(paths, False)
1630 1630 obj._filecache[self.name] = ce
1631 1631 else:
1632 1632 ce = obj._filecache[self.name]
1633 1633
1634 1634 ce.obj = value # update cached copy
1635 1635 obj.__dict__[self.sname] = value # update copy returned by obj.x
1636 1636
1637 1637
1638 1638 def extdatasource(repo, source):
1639 1639 """Gather a map of rev -> value dict from the specified source
1640 1640
1641 1641 A source spec is treated as a URL, with a special case shell: type
1642 1642 for parsing the output from a shell command.
1643 1643
1644 1644 The data is parsed as a series of newline-separated records where
1645 1645 each record is a revision specifier optionally followed by a space
1646 1646 and a freeform string value. If the revision is known locally, it
1647 1647 is converted to a rev, otherwise the record is skipped.
1648 1648
1649 1649 Note that both key and value are treated as UTF-8 and converted to
1650 1650 the local encoding. This allows uniformity between local and
1651 1651 remote data sources.
1652 1652 """
1653 1653
1654 1654 spec = repo.ui.config(b"extdata", source)
1655 1655 if not spec:
1656 1656 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1657 1657
1658 1658 data = {}
1659 1659 src = proc = None
1660 1660 try:
1661 1661 if spec.startswith(b"shell:"):
1662 1662 # external commands should be run relative to the repo root
1663 1663 cmd = spec[6:]
1664 1664 proc = subprocess.Popen(
1665 1665 procutil.tonativestr(cmd),
1666 1666 shell=True,
1667 1667 bufsize=-1,
1668 1668 close_fds=procutil.closefds,
1669 1669 stdout=subprocess.PIPE,
1670 1670 cwd=procutil.tonativestr(repo.root),
1671 1671 )
1672 1672 src = proc.stdout
1673 1673 else:
1674 1674 # treat as a URL or file
1675 1675 src = url.open(repo.ui, spec)
1676 1676 for l in src:
1677 1677 if b" " in l:
1678 1678 k, v = l.strip().split(b" ", 1)
1679 1679 else:
1680 1680 k, v = l.strip(), b""
1681 1681
1682 1682 k = encoding.tolocal(k)
1683 1683 try:
1684 1684 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1685 1685 except (error.LookupError, error.RepoLookupError):
1686 1686 pass # we ignore data for nodes that don't exist locally
1687 1687 finally:
1688 1688 if proc:
1689 1689 try:
1690 1690 proc.communicate()
1691 1691 except ValueError:
1692 1692 # This happens if we started iterating src and then
1693 1693 # get a parse error on a line. It should be safe to ignore.
1694 1694 pass
1695 1695 if src:
1696 1696 src.close()
1697 1697 if proc and proc.returncode != 0:
1698 1698 raise error.Abort(
1699 1699 _(b"extdata command '%s' failed: %s")
1700 1700 % (cmd, procutil.explainexit(proc.returncode))
1701 1701 )
1702 1702
1703 1703 return data
1704 1704
1705 1705
1706 1706 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1707 1707 if lock is None:
1708 1708 raise error.LockInheritanceContractViolation(
1709 1709 b'lock can only be inherited while held'
1710 1710 )
1711 1711 if environ is None:
1712 1712 environ = {}
1713 1713 with lock.inherit() as locker:
1714 1714 environ[envvar] = locker
1715 1715 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1716 1716
1717 1717
1718 1718 def wlocksub(repo, cmd, *args, **kwargs):
1719 1719 """run cmd as a subprocess that allows inheriting repo's wlock
1720 1720
1721 1721 This can only be called while the wlock is held. This takes all the
1722 1722 arguments that ui.system does, and returns the exit code of the
1723 1723 subprocess."""
1724 1724 return _locksub(
1725 1725 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1726 1726 )
1727 1727
1728 1728
1729 1729 class progress(object):
1730 1730 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1731 1731 self.ui = ui
1732 1732 self.pos = 0
1733 1733 self.topic = topic
1734 1734 self.unit = unit
1735 1735 self.total = total
1736 1736 self.debug = ui.configbool(b'progress', b'debug')
1737 1737 self._updatebar = updatebar
1738 1738
1739 1739 def __enter__(self):
1740 1740 return self
1741 1741
1742 1742 def __exit__(self, exc_type, exc_value, exc_tb):
1743 1743 self.complete()
1744 1744
1745 1745 def update(self, pos, item=b"", total=None):
1746 1746 assert pos is not None
1747 1747 if total:
1748 1748 self.total = total
1749 1749 self.pos = pos
1750 1750 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1751 1751 if self.debug:
1752 1752 self._printdebug(item)
1753 1753
1754 1754 def increment(self, step=1, item=b"", total=None):
1755 1755 self.update(self.pos + step, item, total)
1756 1756
1757 1757 def complete(self):
1758 1758 self.pos = None
1759 1759 self.unit = b""
1760 1760 self.total = None
1761 1761 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1762 1762
1763 1763 def _printdebug(self, item):
1764 1764 unit = b''
1765 1765 if self.unit:
1766 1766 unit = b' ' + self.unit
1767 1767 if item:
1768 1768 item = b' ' + item
1769 1769
1770 1770 if self.total:
1771 1771 pct = 100.0 * self.pos / self.total
1772 1772 self.ui.debug(
1773 1773 b'%s:%s %d/%d%s (%4.2f%%)\n'
1774 1774 % (self.topic, item, self.pos, self.total, unit, pct)
1775 1775 )
1776 1776 else:
1777 1777 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1778 1778
1779 1779
1780 1780 def gdinitconfig(ui):
1781 1781 """helper function to know if a repo should be created as general delta
1782 1782 """
1783 1783 # experimental config: format.generaldelta
1784 1784 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1785 1785 b'format', b'usegeneraldelta'
1786 1786 )
1787 1787
1788 1788
1789 1789 def gddeltaconfig(ui):
1790 1790 """helper function to know if incoming delta should be optimised
1791 1791 """
1792 1792 # experimental config: format.generaldelta
1793 1793 return ui.configbool(b'format', b'generaldelta')
1794 1794
1795 1795
1796 1796 class simplekeyvaluefile(object):
1797 1797 """A simple file with key=value lines
1798 1798
1799 1799 Keys must be alphanumerics and start with a letter, values must not
1800 1800 contain '\n' characters"""
1801 1801
1802 1802 firstlinekey = b'__firstline'
1803 1803
1804 1804 def __init__(self, vfs, path, keys=None):
1805 1805 self.vfs = vfs
1806 1806 self.path = path
1807 1807
1808 1808 def read(self, firstlinenonkeyval=False):
1809 1809 """Read the contents of a simple key-value file
1810 1810
1811 1811 'firstlinenonkeyval' indicates whether the first line of file should
1812 1812 be treated as a key-value pair or reuturned fully under the
1813 1813 __firstline key."""
1814 1814 lines = self.vfs.readlines(self.path)
1815 1815 d = {}
1816 1816 if firstlinenonkeyval:
1817 1817 if not lines:
1818 1818 e = _(b"empty simplekeyvalue file")
1819 1819 raise error.CorruptedState(e)
1820 1820 # we don't want to include '\n' in the __firstline
1821 1821 d[self.firstlinekey] = lines[0][:-1]
1822 1822 del lines[0]
1823 1823
1824 1824 try:
1825 1825 # the 'if line.strip()' part prevents us from failing on empty
1826 1826 # lines which only contain '\n' therefore are not skipped
1827 1827 # by 'if line'
1828 1828 updatedict = dict(
1829 1829 line[:-1].split(b'=', 1) for line in lines if line.strip()
1830 1830 )
1831 1831 if self.firstlinekey in updatedict:
1832 1832 e = _(b"%r can't be used as a key")
1833 1833 raise error.CorruptedState(e % self.firstlinekey)
1834 1834 d.update(updatedict)
1835 1835 except ValueError as e:
1836 1836 raise error.CorruptedState(stringutil.forcebytestr(e))
1837 1837 return d
1838 1838
1839 1839 def write(self, data, firstline=None):
1840 1840 """Write key=>value mapping to a file
1841 1841 data is a dict. Keys must be alphanumerical and start with a letter.
1842 1842 Values must not contain newline characters.
1843 1843
1844 1844 If 'firstline' is not None, it is written to file before
1845 1845 everything else, as it is, not in a key=value form"""
1846 1846 lines = []
1847 1847 if firstline is not None:
1848 1848 lines.append(b'%s\n' % firstline)
1849 1849
1850 1850 for k, v in data.items():
1851 1851 if k == self.firstlinekey:
1852 1852 e = b"key name '%s' is reserved" % self.firstlinekey
1853 1853 raise error.ProgrammingError(e)
1854 1854 if not k[0:1].isalpha():
1855 1855 e = b"keys must start with a letter in a key-value file"
1856 1856 raise error.ProgrammingError(e)
1857 1857 if not k.isalnum():
1858 1858 e = b"invalid key name in a simple key-value file"
1859 1859 raise error.ProgrammingError(e)
1860 1860 if b'\n' in v:
1861 1861 e = b"invalid value in a simple key-value file"
1862 1862 raise error.ProgrammingError(e)
1863 1863 lines.append(b"%s=%s\n" % (k, v))
1864 1864 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1865 1865 fp.write(b''.join(lines))
1866 1866
1867 1867
1868 1868 _reportobsoletedsource = [
1869 1869 b'debugobsolete',
1870 1870 b'pull',
1871 1871 b'push',
1872 1872 b'serve',
1873 1873 b'unbundle',
1874 1874 ]
1875 1875
1876 1876 _reportnewcssource = [
1877 1877 b'pull',
1878 1878 b'unbundle',
1879 1879 ]
1880 1880
1881 1881
1882 1882 def prefetchfiles(repo, revs, match):
1883 1883 """Invokes the registered file prefetch functions, allowing extensions to
1884 1884 ensure the corresponding files are available locally, before the command
1885 1885 uses them."""
1886 1886 if match:
1887 1887 # The command itself will complain about files that don't exist, so
1888 1888 # don't duplicate the message.
1889 1889 match = matchmod.badmatch(match, lambda fn, msg: None)
1890 1890 else:
1891 1891 match = matchall(repo)
1892 1892
1893 1893 fileprefetchhooks(repo, revs, match)
1894 1894
1895 1895
1896 1896 # a list of (repo, revs, match) prefetch functions
1897 1897 fileprefetchhooks = util.hooks()
1898 1898
1899 1899 # A marker that tells the evolve extension to suppress its own reporting
1900 1900 _reportstroubledchangesets = True
1901 1901
1902 1902
1903 def registersummarycallback(repo, otr, txnname=b''):
1903 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1904 1904 """register a callback to issue a summary after the transaction is closed
1905
1906 If as_validator is true, then the callbacks are registered as transaction
1907 validators instead
1905 1908 """
1906 1909
1907 1910 def txmatch(sources):
1908 1911 return any(txnname.startswith(source) for source in sources)
1909 1912
1910 1913 categories = []
1911 1914
1912 1915 def reportsummary(func):
1913 1916 """decorator for report callbacks."""
1914 1917 # The repoview life cycle is shorter than the one of the actual
1915 1918 # underlying repository. So the filtered object can die before the
1916 1919 # weakref is used leading to troubles. We keep a reference to the
1917 1920 # unfiltered object and restore the filtering when retrieving the
1918 1921 # repository through the weakref.
1919 1922 filtername = repo.filtername
1920 1923 reporef = weakref.ref(repo.unfiltered())
1921 1924
1922 1925 def wrapped(tr):
1923 1926 repo = reporef()
1924 1927 if filtername:
1925 1928 assert repo is not None # help pytype
1926 1929 repo = repo.filtered(filtername)
1927 1930 func(repo, tr)
1928 1931
1929 1932 newcat = b'%02i-txnreport' % len(categories)
1930 otr.addpostclose(newcat, wrapped)
1933 if as_validator:
1934 otr.addvalidator(newcat, wrapped)
1935 else:
1936 otr.addpostclose(newcat, wrapped)
1931 1937 categories.append(newcat)
1932 1938 return wrapped
1933 1939
1934 1940 @reportsummary
1935 1941 def reportchangegroup(repo, tr):
1936 1942 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1937 1943 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1938 1944 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1939 1945 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1940 1946 if cgchangesets or cgrevisions or cgfiles:
1941 1947 htext = b""
1942 1948 if cgheads:
1943 1949 htext = _(b" (%+d heads)") % cgheads
1944 1950 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1951 if as_validator:
1952 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
1945 1953 assert repo is not None # help pytype
1946 1954 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1947 1955
1948 1956 if txmatch(_reportobsoletedsource):
1949 1957
1950 1958 @reportsummary
1951 1959 def reportobsoleted(repo, tr):
1952 1960 obsoleted = obsutil.getobsoleted(repo, tr)
1953 1961 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1954 1962 if newmarkers:
1955 1963 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1956 1964 if obsoleted:
1957 repo.ui.status(_(b'obsoleted %i changesets\n') % len(obsoleted))
1965 msg = _(b'obsoleted %i changesets\n')
1966 if as_validator:
1967 msg = _(b'obsoleting %i changesets\n')
1968 repo.ui.status(msg % len(obsoleted))
1958 1969
1959 1970 if obsolete.isenabled(
1960 1971 repo, obsolete.createmarkersopt
1961 1972 ) and repo.ui.configbool(
1962 1973 b'experimental', b'evolution.report-instabilities'
1963 1974 ):
1964 1975 instabilitytypes = [
1965 1976 (b'orphan', b'orphan'),
1966 1977 (b'phase-divergent', b'phasedivergent'),
1967 1978 (b'content-divergent', b'contentdivergent'),
1968 1979 ]
1969 1980
1970 1981 def getinstabilitycounts(repo):
1971 1982 filtered = repo.changelog.filteredrevs
1972 1983 counts = {}
1973 1984 for instability, revset in instabilitytypes:
1974 1985 counts[instability] = len(
1975 1986 set(obsolete.getrevs(repo, revset)) - filtered
1976 1987 )
1977 1988 return counts
1978 1989
1979 1990 oldinstabilitycounts = getinstabilitycounts(repo)
1980 1991
1981 1992 @reportsummary
1982 1993 def reportnewinstabilities(repo, tr):
1983 1994 newinstabilitycounts = getinstabilitycounts(repo)
1984 1995 for instability, revset in instabilitytypes:
1985 1996 delta = (
1986 1997 newinstabilitycounts[instability]
1987 1998 - oldinstabilitycounts[instability]
1988 1999 )
1989 2000 msg = getinstabilitymessage(delta, instability)
1990 2001 if msg:
1991 2002 repo.ui.warn(msg)
1992 2003
1993 2004 if txmatch(_reportnewcssource):
1994 2005
1995 2006 @reportsummary
1996 2007 def reportnewcs(repo, tr):
1997 2008 """Report the range of new revisions pulled/unbundled."""
1998 2009 origrepolen = tr.changes.get(b'origrepolen', len(repo))
1999 2010 unfi = repo.unfiltered()
2000 2011 if origrepolen >= len(unfi):
2001 2012 return
2002 2013
2003 2014 # Compute the bounds of new visible revisions' range.
2004 2015 revs = smartset.spanset(repo, start=origrepolen)
2005 2016 if revs:
2006 2017 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2007 2018
2008 2019 if minrev == maxrev:
2009 2020 revrange = minrev
2010 2021 else:
2011 2022 revrange = b'%s:%s' % (minrev, maxrev)
2012 2023 draft = len(repo.revs(b'%ld and draft()', revs))
2013 2024 secret = len(repo.revs(b'%ld and secret()', revs))
2014 2025 if not (draft or secret):
2015 2026 msg = _(b'new changesets %s\n') % revrange
2016 2027 elif draft and secret:
2017 2028 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2018 2029 msg %= (revrange, draft, secret)
2019 2030 elif draft:
2020 2031 msg = _(b'new changesets %s (%d drafts)\n')
2021 2032 msg %= (revrange, draft)
2022 2033 elif secret:
2023 2034 msg = _(b'new changesets %s (%d secrets)\n')
2024 2035 msg %= (revrange, secret)
2025 2036 else:
2026 2037 errormsg = b'entered unreachable condition'
2027 2038 raise error.ProgrammingError(errormsg)
2028 2039 repo.ui.status(msg)
2029 2040
2030 2041 # search new changesets directly pulled as obsolete
2031 2042 duplicates = tr.changes.get(b'revduplicates', ())
2032 2043 obsadded = unfi.revs(
2033 2044 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2034 2045 )
2035 2046 cl = repo.changelog
2036 2047 extinctadded = [r for r in obsadded if r not in cl]
2037 2048 if extinctadded:
2038 2049 # They are not just obsolete, but obsolete and invisible
2039 2050 # we call them "extinct" internally but the terms have not been
2040 2051 # exposed to users.
2041 2052 msg = b'(%d other changesets obsolete on arrival)\n'
2042 2053 repo.ui.status(msg % len(extinctadded))
2043 2054
2044 2055 @reportsummary
2045 2056 def reportphasechanges(repo, tr):
2046 2057 """Report statistics of phase changes for changesets pre-existing
2047 2058 pull/unbundle.
2048 2059 """
2049 2060 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2050 2061 phasetracking = tr.changes.get(b'phases', {})
2051 2062 if not phasetracking:
2052 2063 return
2053 2064 published = [
2054 2065 rev
2055 2066 for rev, (old, new) in pycompat.iteritems(phasetracking)
2056 2067 if new == phases.public and rev < origrepolen
2057 2068 ]
2058 2069 if not published:
2059 2070 return
2060 repo.ui.status(
2061 _(b'%d local changesets published\n') % len(published)
2062 )
2071 msg = _(b'%d local changesets published\n')
2072 if as_validator:
2073 msg = _(b'%d local changesets will be published\n')
2074 repo.ui.status(msg % len(published))
2063 2075
2064 2076
2065 2077 def getinstabilitymessage(delta, instability):
2066 2078 """function to return the message to show warning about new instabilities
2067 2079
2068 2080 exists as a separate function so that extension can wrap to show more
2069 2081 information like how to fix instabilities"""
2070 2082 if delta > 0:
2071 2083 return _(b'%i new %s changesets\n') % (delta, instability)
2072 2084
2073 2085
2074 2086 def nodesummaries(repo, nodes, maxnumnodes=4):
2075 2087 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2076 2088 return b' '.join(short(h) for h in nodes)
2077 2089 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2078 2090 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2079 2091
2080 2092
2081 2093 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2082 2094 """check that no named branch has multiple heads"""
2083 2095 if desc in (b'strip', b'repair'):
2084 2096 # skip the logic during strip
2085 2097 return
2086 2098 visible = repo.filtered(b'visible')
2087 2099 # possible improvement: we could restrict the check to affected branch
2088 2100 bm = visible.branchmap()
2089 2101 for name in bm:
2090 2102 heads = bm.branchheads(name, closed=accountclosed)
2091 2103 if len(heads) > 1:
2092 2104 msg = _(b'rejecting multiple heads on branch "%s"')
2093 2105 msg %= name
2094 2106 hint = _(b'%d heads: %s')
2095 2107 hint %= (len(heads), nodesummaries(repo, heads))
2096 2108 raise error.Abort(msg, hint=hint)
2097 2109
2098 2110
2099 2111 def wrapconvertsink(sink):
2100 2112 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2101 2113 before it is used, whether or not the convert extension was formally loaded.
2102 2114 """
2103 2115 return sink
2104 2116
2105 2117
2106 2118 def unhidehashlikerevs(repo, specs, hiddentype):
2107 2119 """parse the user specs and unhide changesets whose hash or revision number
2108 2120 is passed.
2109 2121
2110 2122 hiddentype can be: 1) 'warn': warn while unhiding changesets
2111 2123 2) 'nowarn': don't warn while unhiding changesets
2112 2124
2113 2125 returns a repo object with the required changesets unhidden
2114 2126 """
2115 2127 if not repo.filtername or not repo.ui.configbool(
2116 2128 b'experimental', b'directaccess'
2117 2129 ):
2118 2130 return repo
2119 2131
2120 2132 if repo.filtername not in (b'visible', b'visible-hidden'):
2121 2133 return repo
2122 2134
2123 2135 symbols = set()
2124 2136 for spec in specs:
2125 2137 try:
2126 2138 tree = revsetlang.parse(spec)
2127 2139 except error.ParseError: # will be reported by scmutil.revrange()
2128 2140 continue
2129 2141
2130 2142 symbols.update(revsetlang.gethashlikesymbols(tree))
2131 2143
2132 2144 if not symbols:
2133 2145 return repo
2134 2146
2135 2147 revs = _getrevsfromsymbols(repo, symbols)
2136 2148
2137 2149 if not revs:
2138 2150 return repo
2139 2151
2140 2152 if hiddentype == b'warn':
2141 2153 unfi = repo.unfiltered()
2142 2154 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2143 2155 repo.ui.warn(
2144 2156 _(
2145 2157 b"warning: accessing hidden changesets for write "
2146 2158 b"operation: %s\n"
2147 2159 )
2148 2160 % revstr
2149 2161 )
2150 2162
2151 2163 # we have to use new filtername to separate branch/tags cache until we can
2152 2164 # disbale these cache when revisions are dynamically pinned.
2153 2165 return repo.filtered(b'visible-hidden', revs)
2154 2166
2155 2167
2156 2168 def _getrevsfromsymbols(repo, symbols):
2157 2169 """parse the list of symbols and returns a set of revision numbers of hidden
2158 2170 changesets present in symbols"""
2159 2171 revs = set()
2160 2172 unfi = repo.unfiltered()
2161 2173 unficl = unfi.changelog
2162 2174 cl = repo.changelog
2163 2175 tiprev = len(unficl)
2164 2176 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2165 2177 for s in symbols:
2166 2178 try:
2167 2179 n = int(s)
2168 2180 if n <= tiprev:
2169 2181 if not allowrevnums:
2170 2182 continue
2171 2183 else:
2172 2184 if n not in cl:
2173 2185 revs.add(n)
2174 2186 continue
2175 2187 except ValueError:
2176 2188 pass
2177 2189
2178 2190 try:
2179 2191 s = resolvehexnodeidprefix(unfi, s)
2180 2192 except (error.LookupError, error.WdirUnsupported):
2181 2193 s = None
2182 2194
2183 2195 if s is not None:
2184 2196 rev = unficl.rev(s)
2185 2197 if rev not in cl:
2186 2198 revs.add(rev)
2187 2199
2188 2200 return revs
2189 2201
2190 2202
2191 2203 def bookmarkrevs(repo, mark):
2192 2204 """
2193 2205 Select revisions reachable by a given bookmark
2194 2206 """
2195 2207 return repo.revs(
2196 2208 b"ancestors(bookmark(%s)) - "
2197 2209 b"ancestors(head() and not bookmark(%s)) - "
2198 2210 b"ancestors(bookmark() and not bookmark(%s))",
2199 2211 mark,
2200 2212 mark,
2201 2213 mark,
2202 2214 )
General Comments 0
You need to be logged in to leave comments. Login now