##// END OF EJS Templates
scmutil: fix an unbound variable with progressbar debug enabled...
Matt Harbison -
r44520:089255b1 default
parent child Browse files
Show More
@@ -1,2200 +1,2201 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28 from .pycompat import getattr
29 29 from .thirdparty import attr
30 30 from . import (
31 31 copies as copiesmod,
32 32 encoding,
33 33 error,
34 34 match as matchmod,
35 35 obsolete,
36 36 obsutil,
37 37 pathutil,
38 38 phases,
39 39 policy,
40 40 pycompat,
41 41 revsetlang,
42 42 similar,
43 43 smartset,
44 44 url,
45 45 util,
46 46 vfs,
47 47 )
48 48
49 49 from .utils import (
50 50 hashutil,
51 51 procutil,
52 52 stringutil,
53 53 )
54 54
55 55 if pycompat.iswindows:
56 56 from . import scmwindows as scmplatform
57 57 else:
58 58 from . import scmposix as scmplatform
59 59
60 60 parsers = policy.importmod('parsers')
61 61 rustrevlog = policy.importrust('revlog')
62 62
63 63 termsize = scmplatform.termsize
64 64
65 65
66 66 @attr.s(slots=True, repr=False)
67 67 class status(object):
68 68 '''Struct with a list of files per status.
69 69
70 70 The 'deleted', 'unknown' and 'ignored' properties are only
71 71 relevant to the working copy.
72 72 '''
73 73
74 74 modified = attr.ib(default=attr.Factory(list))
75 75 added = attr.ib(default=attr.Factory(list))
76 76 removed = attr.ib(default=attr.Factory(list))
77 77 deleted = attr.ib(default=attr.Factory(list))
78 78 unknown = attr.ib(default=attr.Factory(list))
79 79 ignored = attr.ib(default=attr.Factory(list))
80 80 clean = attr.ib(default=attr.Factory(list))
81 81
82 82 def __iter__(self):
83 83 yield self.modified
84 84 yield self.added
85 85 yield self.removed
86 86 yield self.deleted
87 87 yield self.unknown
88 88 yield self.ignored
89 89 yield self.clean
90 90
91 91 def __repr__(self):
92 92 return (
93 93 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
94 94 r'unknown=%s, ignored=%s, clean=%s>'
95 95 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
96 96
97 97
98 98 def itersubrepos(ctx1, ctx2):
99 99 """find subrepos in ctx1 or ctx2"""
100 100 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 101 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 102 # has been modified (in ctx2) but not yet committed (in ctx1).
103 103 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 104 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105 105
106 106 missing = set()
107 107
108 108 for subpath in ctx2.substate:
109 109 if subpath not in ctx1.substate:
110 110 del subpaths[subpath]
111 111 missing.add(subpath)
112 112
113 113 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
114 114 yield subpath, ctx.sub(subpath)
115 115
116 116 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 117 # status and diff will have an accurate result when it does
118 118 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 119 # against itself.
120 120 for subpath in missing:
121 121 yield subpath, ctx2.nullsub(subpath, ctx1)
122 122
123 123
124 124 def nochangesfound(ui, repo, excluded=None):
125 125 '''Report no changes for push/pull, excluded is None or a list of
126 126 nodes excluded from the push/pull.
127 127 '''
128 128 secretlist = []
129 129 if excluded:
130 130 for n in excluded:
131 131 ctx = repo[n]
132 132 if ctx.phase() >= phases.secret and not ctx.extinct():
133 133 secretlist.append(n)
134 134
135 135 if secretlist:
136 136 ui.status(
137 137 _(b"no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist)
139 139 )
140 140 else:
141 141 ui.status(_(b"no changes found\n"))
142 142
143 143
144 144 def callcatch(ui, func):
145 145 """call func() with global exception handling
146 146
147 147 return func() if no exception happens. otherwise do some error handling
148 148 and return an exit code accordingly. does not handle all exceptions.
149 149 """
150 150 try:
151 151 try:
152 152 return func()
153 153 except: # re-raises
154 154 ui.traceback()
155 155 raise
156 156 # Global exception handling, alphabetically
157 157 # Mercurial-specific first, followed by built-in and library exceptions
158 158 except error.LockHeld as inst:
159 159 if inst.errno == errno.ETIMEDOUT:
160 160 reason = _(b'timed out waiting for lock held by %r') % (
161 161 pycompat.bytestr(inst.locker)
162 162 )
163 163 else:
164 164 reason = _(b'lock held by %r') % inst.locker
165 165 ui.error(
166 166 _(b"abort: %s: %s\n")
167 167 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
168 168 )
169 169 if not inst.locker:
170 170 ui.error(_(b"(lock might be very busy)\n"))
171 171 except error.LockUnavailable as inst:
172 172 ui.error(
173 173 _(b"abort: could not lock %s: %s\n")
174 174 % (
175 175 inst.desc or stringutil.forcebytestr(inst.filename),
176 176 encoding.strtolocal(inst.strerror),
177 177 )
178 178 )
179 179 except error.OutOfBandError as inst:
180 180 if inst.args:
181 181 msg = _(b"abort: remote error:\n")
182 182 else:
183 183 msg = _(b"abort: remote error\n")
184 184 ui.error(msg)
185 185 if inst.args:
186 186 ui.error(b''.join(inst.args))
187 187 if inst.hint:
188 188 ui.error(b'(%s)\n' % inst.hint)
189 189 except error.RepoError as inst:
190 190 ui.error(_(b"abort: %s!\n") % inst)
191 191 if inst.hint:
192 192 ui.error(_(b"(%s)\n") % inst.hint)
193 193 except error.ResponseError as inst:
194 194 ui.error(_(b"abort: %s") % inst.args[0])
195 195 msg = inst.args[1]
196 196 if isinstance(msg, type(u'')):
197 197 msg = pycompat.sysbytes(msg)
198 198 if not isinstance(msg, bytes):
199 199 ui.error(b" %r\n" % (msg,))
200 200 elif not msg:
201 201 ui.error(_(b" empty string\n"))
202 202 else:
203 203 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
204 204 except error.CensoredNodeError as inst:
205 205 ui.error(_(b"abort: file censored %s!\n") % inst)
206 206 except error.StorageError as inst:
207 207 ui.error(_(b"abort: %s!\n") % inst)
208 208 if inst.hint:
209 209 ui.error(_(b"(%s)\n") % inst.hint)
210 210 except error.InterventionRequired as inst:
211 211 ui.error(b"%s\n" % inst)
212 212 if inst.hint:
213 213 ui.error(_(b"(%s)\n") % inst.hint)
214 214 return 1
215 215 except error.WdirUnsupported:
216 216 ui.error(_(b"abort: working directory revision cannot be specified\n"))
217 217 except error.Abort as inst:
218 218 ui.error(_(b"abort: %s\n") % inst)
219 219 if inst.hint:
220 220 ui.error(_(b"(%s)\n") % inst.hint)
221 221 except ImportError as inst:
222 222 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
223 223 m = stringutil.forcebytestr(inst).split()[-1]
224 224 if m in b"mpatch bdiff".split():
225 225 ui.error(_(b"(did you forget to compile extensions?)\n"))
226 226 elif m in b"zlib".split():
227 227 ui.error(_(b"(is your Python install correct?)\n"))
228 228 except (IOError, OSError) as inst:
229 229 if util.safehasattr(inst, b"code"): # HTTPError
230 230 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
231 231 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
232 232 try: # usually it is in the form (errno, strerror)
233 233 reason = inst.reason.args[1]
234 234 except (AttributeError, IndexError):
235 235 # it might be anything, for example a string
236 236 reason = inst.reason
237 237 if isinstance(reason, pycompat.unicode):
238 238 # SSLError of Python 2.7.9 contains a unicode
239 239 reason = encoding.unitolocal(reason)
240 240 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
241 241 elif (
242 242 util.safehasattr(inst, b"args")
243 243 and inst.args
244 244 and inst.args[0] == errno.EPIPE
245 245 ):
246 246 pass
247 247 elif getattr(inst, "strerror", None): # common IOError or OSError
248 248 if getattr(inst, "filename", None) is not None:
249 249 ui.error(
250 250 _(b"abort: %s: '%s'\n")
251 251 % (
252 252 encoding.strtolocal(inst.strerror),
253 253 stringutil.forcebytestr(inst.filename),
254 254 )
255 255 )
256 256 else:
257 257 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
258 258 else: # suspicious IOError
259 259 raise
260 260 except MemoryError:
261 261 ui.error(_(b"abort: out of memory\n"))
262 262 except SystemExit as inst:
263 263 # Commands shouldn't sys.exit directly, but give a return code.
264 264 # Just in case catch this and and pass exit code to caller.
265 265 return inst.code
266 266
267 267 return -1
268 268
269 269
270 270 def checknewlabel(repo, lbl, kind):
271 271 # Do not use the "kind" parameter in ui output.
272 272 # It makes strings difficult to translate.
273 273 if lbl in [b'tip', b'.', b'null']:
274 274 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
275 275 for c in (b':', b'\0', b'\n', b'\r'):
276 276 if c in lbl:
277 277 raise error.Abort(
278 278 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
279 279 )
280 280 try:
281 281 int(lbl)
282 282 raise error.Abort(_(b"cannot use an integer as a name"))
283 283 except ValueError:
284 284 pass
285 285 if lbl.strip() != lbl:
286 286 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
287 287
288 288
289 289 def checkfilename(f):
290 290 '''Check that the filename f is an acceptable filename for a tracked file'''
291 291 if b'\r' in f or b'\n' in f:
292 292 raise error.Abort(
293 293 _(b"'\\n' and '\\r' disallowed in filenames: %r")
294 294 % pycompat.bytestr(f)
295 295 )
296 296
297 297
298 298 def checkportable(ui, f):
299 299 '''Check if filename f is portable and warn or abort depending on config'''
300 300 checkfilename(f)
301 301 abort, warn = checkportabilityalert(ui)
302 302 if abort or warn:
303 303 msg = util.checkwinfilename(f)
304 304 if msg:
305 305 msg = b"%s: %s" % (msg, procutil.shellquote(f))
306 306 if abort:
307 307 raise error.Abort(msg)
308 308 ui.warn(_(b"warning: %s\n") % msg)
309 309
310 310
311 311 def checkportabilityalert(ui):
312 312 '''check if the user's config requests nothing, a warning, or abort for
313 313 non-portable filenames'''
314 314 val = ui.config(b'ui', b'portablefilenames')
315 315 lval = val.lower()
316 316 bval = stringutil.parsebool(val)
317 317 abort = pycompat.iswindows or lval == b'abort'
318 318 warn = bval or lval == b'warn'
319 319 if bval is None and not (warn or abort or lval == b'ignore'):
320 320 raise error.ConfigError(
321 321 _(b"ui.portablefilenames value is invalid ('%s')") % val
322 322 )
323 323 return abort, warn
324 324
325 325
326 326 class casecollisionauditor(object):
327 327 def __init__(self, ui, abort, dirstate):
328 328 self._ui = ui
329 329 self._abort = abort
330 330 allfiles = b'\0'.join(dirstate)
331 331 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
332 332 self._dirstate = dirstate
333 333 # The purpose of _newfiles is so that we don't complain about
334 334 # case collisions if someone were to call this object with the
335 335 # same filename twice.
336 336 self._newfiles = set()
337 337
338 338 def __call__(self, f):
339 339 if f in self._newfiles:
340 340 return
341 341 fl = encoding.lower(f)
342 342 if fl in self._loweredfiles and f not in self._dirstate:
343 343 msg = _(b'possible case-folding collision for %s') % f
344 344 if self._abort:
345 345 raise error.Abort(msg)
346 346 self._ui.warn(_(b"warning: %s\n") % msg)
347 347 self._loweredfiles.add(fl)
348 348 self._newfiles.add(f)
349 349
350 350
351 351 def filteredhash(repo, maxrev):
352 352 """build hash of filtered revisions in the current repoview.
353 353
354 354 Multiple caches perform up-to-date validation by checking that the
355 355 tiprev and tipnode stored in the cache file match the current repository.
356 356 However, this is not sufficient for validating repoviews because the set
357 357 of revisions in the view may change without the repository tiprev and
358 358 tipnode changing.
359 359
360 360 This function hashes all the revs filtered from the view and returns
361 361 that SHA-1 digest.
362 362 """
363 363 cl = repo.changelog
364 364 if not cl.filteredrevs:
365 365 return None
366 366 key = None
367 367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
368 368 if revs:
369 369 s = hashutil.sha1()
370 370 for rev in revs:
371 371 s.update(b'%d;' % rev)
372 372 key = s.digest()
373 373 return key
374 374
375 375
376 376 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
377 377 '''yield every hg repository under path, always recursively.
378 378 The recurse flag will only control recursion into repo working dirs'''
379 379
380 380 def errhandler(err):
381 381 if err.filename == path:
382 382 raise err
383 383
384 384 samestat = getattr(os.path, 'samestat', None)
385 385 if followsym and samestat is not None:
386 386
387 387 def adddir(dirlst, dirname):
388 388 dirstat = os.stat(dirname)
389 389 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
390 390 if not match:
391 391 dirlst.append(dirstat)
392 392 return not match
393 393
394 394 else:
395 395 followsym = False
396 396
397 397 if (seen_dirs is None) and followsym:
398 398 seen_dirs = []
399 399 adddir(seen_dirs, path)
400 400 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
401 401 dirs.sort()
402 402 if b'.hg' in dirs:
403 403 yield root # found a repository
404 404 qroot = os.path.join(root, b'.hg', b'patches')
405 405 if os.path.isdir(os.path.join(qroot, b'.hg')):
406 406 yield qroot # we have a patch queue repo here
407 407 if recurse:
408 408 # avoid recursing inside the .hg directory
409 409 dirs.remove(b'.hg')
410 410 else:
411 411 dirs[:] = [] # don't descend further
412 412 elif followsym:
413 413 newdirs = []
414 414 for d in dirs:
415 415 fname = os.path.join(root, d)
416 416 if adddir(seen_dirs, fname):
417 417 if os.path.islink(fname):
418 418 for hgname in walkrepos(fname, True, seen_dirs):
419 419 yield hgname
420 420 else:
421 421 newdirs.append(d)
422 422 dirs[:] = newdirs
423 423
424 424
425 425 def binnode(ctx):
426 426 """Return binary node id for a given basectx"""
427 427 node = ctx.node()
428 428 if node is None:
429 429 return wdirid
430 430 return node
431 431
432 432
433 433 def intrev(ctx):
434 434 """Return integer for a given basectx that can be used in comparison or
435 435 arithmetic operation"""
436 436 rev = ctx.rev()
437 437 if rev is None:
438 438 return wdirrev
439 439 return rev
440 440
441 441
442 442 def formatchangeid(ctx):
443 443 """Format changectx as '{rev}:{node|formatnode}', which is the default
444 444 template provided by logcmdutil.changesettemplater"""
445 445 repo = ctx.repo()
446 446 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
447 447
448 448
449 449 def formatrevnode(ui, rev, node):
450 450 """Format given revision and node depending on the current verbosity"""
451 451 if ui.debugflag:
452 452 hexfunc = hex
453 453 else:
454 454 hexfunc = short
455 455 return b'%d:%s' % (rev, hexfunc(node))
456 456
457 457
458 458 def resolvehexnodeidprefix(repo, prefix):
459 459 if prefix.startswith(b'x') and repo.ui.configbool(
460 460 b'experimental', b'revisions.prefixhexnode'
461 461 ):
462 462 prefix = prefix[1:]
463 463 try:
464 464 # Uses unfiltered repo because it's faster when prefix is ambiguous/
465 465 # This matches the shortesthexnodeidprefix() function below.
466 466 node = repo.unfiltered().changelog._partialmatch(prefix)
467 467 except error.AmbiguousPrefixLookupError:
468 468 revset = repo.ui.config(
469 469 b'experimental', b'revisions.disambiguatewithin'
470 470 )
471 471 if revset:
472 472 # Clear config to avoid infinite recursion
473 473 configoverrides = {
474 474 (b'experimental', b'revisions.disambiguatewithin'): None
475 475 }
476 476 with repo.ui.configoverride(configoverrides):
477 477 revs = repo.anyrevs([revset], user=True)
478 478 matches = []
479 479 for rev in revs:
480 480 node = repo.changelog.node(rev)
481 481 if hex(node).startswith(prefix):
482 482 matches.append(node)
483 483 if len(matches) == 1:
484 484 return matches[0]
485 485 raise
486 486 if node is None:
487 487 return
488 488 repo.changelog.rev(node) # make sure node isn't filtered
489 489 return node
490 490
491 491
492 492 def mayberevnum(repo, prefix):
493 493 """Checks if the given prefix may be mistaken for a revision number"""
494 494 try:
495 495 i = int(prefix)
496 496 # if we are a pure int, then starting with zero will not be
497 497 # confused as a rev; or, obviously, if the int is larger
498 498 # than the value of the tip rev. We still need to disambiguate if
499 499 # prefix == '0', since that *is* a valid revnum.
500 500 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
501 501 return False
502 502 return True
503 503 except ValueError:
504 504 return False
505 505
506 506
507 507 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
508 508 """Find the shortest unambiguous prefix that matches hexnode.
509 509
510 510 If "cache" is not None, it must be a dictionary that can be used for
511 511 caching between calls to this method.
512 512 """
513 513 # _partialmatch() of filtered changelog could take O(len(repo)) time,
514 514 # which would be unacceptably slow. so we look for hash collision in
515 515 # unfiltered space, which means some hashes may be slightly longer.
516 516
517 517 minlength = max(minlength, 1)
518 518
519 519 def disambiguate(prefix):
520 520 """Disambiguate against revnums."""
521 521 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
522 522 if mayberevnum(repo, prefix):
523 523 return b'x' + prefix
524 524 else:
525 525 return prefix
526 526
527 527 hexnode = hex(node)
528 528 for length in range(len(prefix), len(hexnode) + 1):
529 529 prefix = hexnode[:length]
530 530 if not mayberevnum(repo, prefix):
531 531 return prefix
532 532
533 533 cl = repo.unfiltered().changelog
534 534 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
535 535 if revset:
536 536 revs = None
537 537 if cache is not None:
538 538 revs = cache.get(b'disambiguationrevset')
539 539 if revs is None:
540 540 revs = repo.anyrevs([revset], user=True)
541 541 if cache is not None:
542 542 cache[b'disambiguationrevset'] = revs
543 543 if cl.rev(node) in revs:
544 544 hexnode = hex(node)
545 545 nodetree = None
546 546 if cache is not None:
547 547 nodetree = cache.get(b'disambiguationnodetree')
548 548 if not nodetree:
549 549 if util.safehasattr(parsers, 'nodetree'):
550 550 # The CExt is the only implementation to provide a nodetree
551 551 # class so far.
552 552 index = cl.index
553 553 if util.safehasattr(index, 'get_cindex'):
554 554 # the rust wrapped need to give access to its internal index
555 555 index = index.get_cindex()
556 556 nodetree = parsers.nodetree(index, len(revs))
557 557 for r in revs:
558 558 nodetree.insert(r)
559 559 if cache is not None:
560 560 cache[b'disambiguationnodetree'] = nodetree
561 561 if nodetree is not None:
562 562 length = max(nodetree.shortest(node), minlength)
563 563 prefix = hexnode[:length]
564 564 return disambiguate(prefix)
565 565 for length in range(minlength, len(hexnode) + 1):
566 566 matches = []
567 567 prefix = hexnode[:length]
568 568 for rev in revs:
569 569 otherhexnode = repo[rev].hex()
570 570 if prefix == otherhexnode[:length]:
571 571 matches.append(otherhexnode)
572 572 if len(matches) == 1:
573 573 return disambiguate(prefix)
574 574
575 575 try:
576 576 return disambiguate(cl.shortest(node, minlength))
577 577 except error.LookupError:
578 578 raise error.RepoLookupError()
579 579
580 580
581 581 def isrevsymbol(repo, symbol):
582 582 """Checks if a symbol exists in the repo.
583 583
584 584 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
585 585 symbol is an ambiguous nodeid prefix.
586 586 """
587 587 try:
588 588 revsymbol(repo, symbol)
589 589 return True
590 590 except error.RepoLookupError:
591 591 return False
592 592
593 593
594 594 def revsymbol(repo, symbol):
595 595 """Returns a context given a single revision symbol (as string).
596 596
597 597 This is similar to revsingle(), but accepts only a single revision symbol,
598 598 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
599 599 not "max(public())".
600 600 """
601 601 if not isinstance(symbol, bytes):
602 602 msg = (
603 603 b"symbol (%s of type %s) was not a string, did you mean "
604 604 b"repo[symbol]?" % (symbol, type(symbol))
605 605 )
606 606 raise error.ProgrammingError(msg)
607 607 try:
608 608 if symbol in (b'.', b'tip', b'null'):
609 609 return repo[symbol]
610 610
611 611 try:
612 612 r = int(symbol)
613 613 if b'%d' % r != symbol:
614 614 raise ValueError
615 615 l = len(repo.changelog)
616 616 if r < 0:
617 617 r += l
618 618 if r < 0 or r >= l and r != wdirrev:
619 619 raise ValueError
620 620 return repo[r]
621 621 except error.FilteredIndexError:
622 622 raise
623 623 except (ValueError, OverflowError, IndexError):
624 624 pass
625 625
626 626 if len(symbol) == 40:
627 627 try:
628 628 node = bin(symbol)
629 629 rev = repo.changelog.rev(node)
630 630 return repo[rev]
631 631 except error.FilteredLookupError:
632 632 raise
633 633 except (TypeError, LookupError):
634 634 pass
635 635
636 636 # look up bookmarks through the name interface
637 637 try:
638 638 node = repo.names.singlenode(repo, symbol)
639 639 rev = repo.changelog.rev(node)
640 640 return repo[rev]
641 641 except KeyError:
642 642 pass
643 643
644 644 node = resolvehexnodeidprefix(repo, symbol)
645 645 if node is not None:
646 646 rev = repo.changelog.rev(node)
647 647 return repo[rev]
648 648
649 649 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
650 650
651 651 except error.WdirUnsupported:
652 652 return repo[None]
653 653 except (
654 654 error.FilteredIndexError,
655 655 error.FilteredLookupError,
656 656 error.FilteredRepoLookupError,
657 657 ):
658 658 raise _filterederror(repo, symbol)
659 659
660 660
661 661 def _filterederror(repo, changeid):
662 662 """build an exception to be raised about a filtered changeid
663 663
664 664 This is extracted in a function to help extensions (eg: evolve) to
665 665 experiment with various message variants."""
666 666 if repo.filtername.startswith(b'visible'):
667 667
668 668 # Check if the changeset is obsolete
669 669 unfilteredrepo = repo.unfiltered()
670 670 ctx = revsymbol(unfilteredrepo, changeid)
671 671
672 672 # If the changeset is obsolete, enrich the message with the reason
673 673 # that made this changeset not visible
674 674 if ctx.obsolete():
675 675 msg = obsutil._getfilteredreason(repo, changeid, ctx)
676 676 else:
677 677 msg = _(b"hidden revision '%s'") % changeid
678 678
679 679 hint = _(b'use --hidden to access hidden revisions')
680 680
681 681 return error.FilteredRepoLookupError(msg, hint=hint)
682 682 msg = _(b"filtered revision '%s' (not in '%s' subset)")
683 683 msg %= (changeid, repo.filtername)
684 684 return error.FilteredRepoLookupError(msg)
685 685
686 686
687 687 def revsingle(repo, revspec, default=b'.', localalias=None):
688 688 if not revspec and revspec != 0:
689 689 return repo[default]
690 690
691 691 l = revrange(repo, [revspec], localalias=localalias)
692 692 if not l:
693 693 raise error.Abort(_(b'empty revision set'))
694 694 return repo[l.last()]
695 695
696 696
697 697 def _pairspec(revspec):
698 698 tree = revsetlang.parse(revspec)
699 699 return tree and tree[0] in (
700 700 b'range',
701 701 b'rangepre',
702 702 b'rangepost',
703 703 b'rangeall',
704 704 )
705 705
706 706
707 707 def revpair(repo, revs):
708 708 if not revs:
709 709 return repo[b'.'], repo[None]
710 710
711 711 l = revrange(repo, revs)
712 712
713 713 if not l:
714 714 raise error.Abort(_(b'empty revision range'))
715 715
716 716 first = l.first()
717 717 second = l.last()
718 718
719 719 if (
720 720 first == second
721 721 and len(revs) >= 2
722 722 and not all(revrange(repo, [r]) for r in revs)
723 723 ):
724 724 raise error.Abort(_(b'empty revision on one side of range'))
725 725
726 726 # if top-level is range expression, the result must always be a pair
727 727 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
728 728 return repo[first], repo[None]
729 729
730 730 return repo[first], repo[second]
731 731
732 732
733 733 def revrange(repo, specs, localalias=None):
734 734 """Execute 1 to many revsets and return the union.
735 735
736 736 This is the preferred mechanism for executing revsets using user-specified
737 737 config options, such as revset aliases.
738 738
739 739 The revsets specified by ``specs`` will be executed via a chained ``OR``
740 740 expression. If ``specs`` is empty, an empty result is returned.
741 741
742 742 ``specs`` can contain integers, in which case they are assumed to be
743 743 revision numbers.
744 744
745 745 It is assumed the revsets are already formatted. If you have arguments
746 746 that need to be expanded in the revset, call ``revsetlang.formatspec()``
747 747 and pass the result as an element of ``specs``.
748 748
749 749 Specifying a single revset is allowed.
750 750
751 751 Returns a ``revset.abstractsmartset`` which is a list-like interface over
752 752 integer revisions.
753 753 """
754 754 allspecs = []
755 755 for spec in specs:
756 756 if isinstance(spec, int):
757 757 spec = revsetlang.formatspec(b'%d', spec)
758 758 allspecs.append(spec)
759 759 return repo.anyrevs(allspecs, user=True, localalias=localalias)
760 760
761 761
762 762 def meaningfulparents(repo, ctx):
763 763 """Return list of meaningful (or all if debug) parentrevs for rev.
764 764
765 765 For merges (two non-nullrev revisions) both parents are meaningful.
766 766 Otherwise the first parent revision is considered meaningful if it
767 767 is not the preceding revision.
768 768 """
769 769 parents = ctx.parents()
770 770 if len(parents) > 1:
771 771 return parents
772 772 if repo.ui.debugflag:
773 773 return [parents[0], repo[nullrev]]
774 774 if parents[0].rev() >= intrev(ctx) - 1:
775 775 return []
776 776 return parents
777 777
778 778
779 779 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
780 780 """Return a function that produced paths for presenting to the user.
781 781
782 782 The returned function takes a repo-relative path and produces a path
783 783 that can be presented in the UI.
784 784
785 785 Depending on the value of ui.relative-paths, either a repo-relative or
786 786 cwd-relative path will be produced.
787 787
788 788 legacyrelativevalue is the value to use if ui.relative-paths=legacy
789 789
790 790 If forcerelativevalue is not None, then that value will be used regardless
791 791 of what ui.relative-paths is set to.
792 792 """
793 793 if forcerelativevalue is not None:
794 794 relative = forcerelativevalue
795 795 else:
796 796 config = repo.ui.config(b'ui', b'relative-paths')
797 797 if config == b'legacy':
798 798 relative = legacyrelativevalue
799 799 else:
800 800 relative = stringutil.parsebool(config)
801 801 if relative is None:
802 802 raise error.ConfigError(
803 803 _(b"ui.relative-paths is not a boolean ('%s')") % config
804 804 )
805 805
806 806 if relative:
807 807 cwd = repo.getcwd()
808 808 pathto = repo.pathto
809 809 return lambda f: pathto(f, cwd)
810 810 elif repo.ui.configbool(b'ui', b'slash'):
811 811 return lambda f: f
812 812 else:
813 813 return util.localpath
814 814
815 815
816 816 def subdiruipathfn(subpath, uipathfn):
817 817 '''Create a new uipathfn that treats the file as relative to subpath.'''
818 818 return lambda f: uipathfn(posixpath.join(subpath, f))
819 819
820 820
821 821 def anypats(pats, opts):
822 822 '''Checks if any patterns, including --include and --exclude were given.
823 823
824 824 Some commands (e.g. addremove) use this condition for deciding whether to
825 825 print absolute or relative paths.
826 826 '''
827 827 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
828 828
829 829
830 830 def expandpats(pats):
831 831 '''Expand bare globs when running on windows.
832 832 On posix we assume it already has already been done by sh.'''
833 833 if not util.expandglobs:
834 834 return list(pats)
835 835 ret = []
836 836 for kindpat in pats:
837 837 kind, pat = matchmod._patsplit(kindpat, None)
838 838 if kind is None:
839 839 try:
840 840 globbed = glob.glob(pat)
841 841 except re.error:
842 842 globbed = [pat]
843 843 if globbed:
844 844 ret.extend(globbed)
845 845 continue
846 846 ret.append(kindpat)
847 847 return ret
848 848
849 849
850 850 def matchandpats(
851 851 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
852 852 ):
853 853 '''Return a matcher and the patterns that were used.
854 854 The matcher will warn about bad matches, unless an alternate badfn callback
855 855 is provided.'''
856 856 if opts is None:
857 857 opts = {}
858 858 if not globbed and default == b'relpath':
859 859 pats = expandpats(pats or [])
860 860
861 861 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
862 862
863 863 def bad(f, msg):
864 864 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
865 865
866 866 if badfn is None:
867 867 badfn = bad
868 868
869 869 m = ctx.match(
870 870 pats,
871 871 opts.get(b'include'),
872 872 opts.get(b'exclude'),
873 873 default,
874 874 listsubrepos=opts.get(b'subrepos'),
875 875 badfn=badfn,
876 876 )
877 877
878 878 if m.always():
879 879 pats = []
880 880 return m, pats
881 881
882 882
883 883 def match(
884 884 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
885 885 ):
886 886 '''Return a matcher that will warn about bad matches.'''
887 887 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
888 888
889 889
890 890 def matchall(repo):
891 891 '''Return a matcher that will efficiently match everything.'''
892 892 return matchmod.always()
893 893
894 894
895 895 def matchfiles(repo, files, badfn=None):
896 896 '''Return a matcher that will efficiently match exactly these files.'''
897 897 return matchmod.exact(files, badfn=badfn)
898 898
899 899
900 900 def parsefollowlinespattern(repo, rev, pat, msg):
901 901 """Return a file name from `pat` pattern suitable for usage in followlines
902 902 logic.
903 903 """
904 904 if not matchmod.patkind(pat):
905 905 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
906 906 else:
907 907 ctx = repo[rev]
908 908 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
909 909 files = [f for f in ctx if m(f)]
910 910 if len(files) != 1:
911 911 raise error.ParseError(msg)
912 912 return files[0]
913 913
914 914
915 915 def getorigvfs(ui, repo):
916 916 """return a vfs suitable to save 'orig' file
917 917
918 918 return None if no special directory is configured"""
919 919 origbackuppath = ui.config(b'ui', b'origbackuppath')
920 920 if not origbackuppath:
921 921 return None
922 922 return vfs.vfs(repo.wvfs.join(origbackuppath))
923 923
924 924
925 925 def backuppath(ui, repo, filepath):
926 926 '''customize where working copy backup files (.orig files) are created
927 927
928 928 Fetch user defined path from config file: [ui] origbackuppath = <path>
929 929 Fall back to default (filepath with .orig suffix) if not specified
930 930
931 931 filepath is repo-relative
932 932
933 933 Returns an absolute path
934 934 '''
935 935 origvfs = getorigvfs(ui, repo)
936 936 if origvfs is None:
937 937 return repo.wjoin(filepath + b".orig")
938 938
939 939 origbackupdir = origvfs.dirname(filepath)
940 940 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
941 941 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
942 942
943 943 # Remove any files that conflict with the backup file's path
944 944 for f in reversed(list(pathutil.finddirs(filepath))):
945 945 if origvfs.isfileorlink(f):
946 946 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
947 947 origvfs.unlink(f)
948 948 break
949 949
950 950 origvfs.makedirs(origbackupdir)
951 951
952 952 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
953 953 ui.note(
954 954 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
955 955 )
956 956 origvfs.rmtree(filepath, forcibly=True)
957 957
958 958 return origvfs.join(filepath)
959 959
960 960
961 961 class _containsnode(object):
962 962 """proxy __contains__(node) to container.__contains__ which accepts revs"""
963 963
964 964 def __init__(self, repo, revcontainer):
965 965 self._torev = repo.changelog.rev
966 966 self._revcontains = revcontainer.__contains__
967 967
968 968 def __contains__(self, node):
969 969 return self._revcontains(self._torev(node))
970 970
971 971
972 972 def cleanupnodes(
973 973 repo,
974 974 replacements,
975 975 operation,
976 976 moves=None,
977 977 metadata=None,
978 978 fixphase=False,
979 979 targetphase=None,
980 980 backup=True,
981 981 ):
982 982 """do common cleanups when old nodes are replaced by new nodes
983 983
984 984 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
985 985 (we might also want to move working directory parent in the future)
986 986
987 987 By default, bookmark moves are calculated automatically from 'replacements',
988 988 but 'moves' can be used to override that. Also, 'moves' may include
989 989 additional bookmark moves that should not have associated obsmarkers.
990 990
991 991 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
992 992 have replacements. operation is a string, like "rebase".
993 993
994 994 metadata is dictionary containing metadata to be stored in obsmarker if
995 995 obsolescence is enabled.
996 996 """
997 997 assert fixphase or targetphase is None
998 998 if not replacements and not moves:
999 999 return
1000 1000
1001 1001 # translate mapping's other forms
1002 1002 if not util.safehasattr(replacements, b'items'):
1003 1003 replacements = {(n,): () for n in replacements}
1004 1004 else:
1005 1005 # upgrading non tuple "source" to tuple ones for BC
1006 1006 repls = {}
1007 1007 for key, value in replacements.items():
1008 1008 if not isinstance(key, tuple):
1009 1009 key = (key,)
1010 1010 repls[key] = value
1011 1011 replacements = repls
1012 1012
1013 1013 # Unfiltered repo is needed since nodes in replacements might be hidden.
1014 1014 unfi = repo.unfiltered()
1015 1015
1016 1016 # Calculate bookmark movements
1017 1017 if moves is None:
1018 1018 moves = {}
1019 1019 for oldnodes, newnodes in replacements.items():
1020 1020 for oldnode in oldnodes:
1021 1021 if oldnode in moves:
1022 1022 continue
1023 1023 if len(newnodes) > 1:
1024 1024 # usually a split, take the one with biggest rev number
1025 1025 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1026 1026 elif len(newnodes) == 0:
1027 1027 # move bookmark backwards
1028 1028 allreplaced = []
1029 1029 for rep in replacements:
1030 1030 allreplaced.extend(rep)
1031 1031 roots = list(
1032 1032 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1033 1033 )
1034 1034 if roots:
1035 1035 newnode = roots[0].node()
1036 1036 else:
1037 1037 newnode = nullid
1038 1038 else:
1039 1039 newnode = newnodes[0]
1040 1040 moves[oldnode] = newnode
1041 1041
1042 1042 allnewnodes = [n for ns in replacements.values() for n in ns]
1043 1043 toretract = {}
1044 1044 toadvance = {}
1045 1045 if fixphase:
1046 1046 precursors = {}
1047 1047 for oldnodes, newnodes in replacements.items():
1048 1048 for oldnode in oldnodes:
1049 1049 for newnode in newnodes:
1050 1050 precursors.setdefault(newnode, []).append(oldnode)
1051 1051
1052 1052 allnewnodes.sort(key=lambda n: unfi[n].rev())
1053 1053 newphases = {}
1054 1054
1055 1055 def phase(ctx):
1056 1056 return newphases.get(ctx.node(), ctx.phase())
1057 1057
1058 1058 for newnode in allnewnodes:
1059 1059 ctx = unfi[newnode]
1060 1060 parentphase = max(phase(p) for p in ctx.parents())
1061 1061 if targetphase is None:
1062 1062 oldphase = max(
1063 1063 unfi[oldnode].phase() for oldnode in precursors[newnode]
1064 1064 )
1065 1065 newphase = max(oldphase, parentphase)
1066 1066 else:
1067 1067 newphase = max(targetphase, parentphase)
1068 1068 newphases[newnode] = newphase
1069 1069 if newphase > ctx.phase():
1070 1070 toretract.setdefault(newphase, []).append(newnode)
1071 1071 elif newphase < ctx.phase():
1072 1072 toadvance.setdefault(newphase, []).append(newnode)
1073 1073
1074 1074 with repo.transaction(b'cleanup') as tr:
1075 1075 # Move bookmarks
1076 1076 bmarks = repo._bookmarks
1077 1077 bmarkchanges = []
1078 1078 for oldnode, newnode in moves.items():
1079 1079 oldbmarks = repo.nodebookmarks(oldnode)
1080 1080 if not oldbmarks:
1081 1081 continue
1082 1082 from . import bookmarks # avoid import cycle
1083 1083
1084 1084 repo.ui.debug(
1085 1085 b'moving bookmarks %r from %s to %s\n'
1086 1086 % (
1087 1087 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1088 1088 hex(oldnode),
1089 1089 hex(newnode),
1090 1090 )
1091 1091 )
1092 1092 # Delete divergent bookmarks being parents of related newnodes
1093 1093 deleterevs = repo.revs(
1094 1094 b'parents(roots(%ln & (::%n))) - parents(%n)',
1095 1095 allnewnodes,
1096 1096 newnode,
1097 1097 oldnode,
1098 1098 )
1099 1099 deletenodes = _containsnode(repo, deleterevs)
1100 1100 for name in oldbmarks:
1101 1101 bmarkchanges.append((name, newnode))
1102 1102 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1103 1103 bmarkchanges.append((b, None))
1104 1104
1105 1105 if bmarkchanges:
1106 1106 bmarks.applychanges(repo, tr, bmarkchanges)
1107 1107
1108 1108 for phase, nodes in toretract.items():
1109 1109 phases.retractboundary(repo, tr, phase, nodes)
1110 1110 for phase, nodes in toadvance.items():
1111 1111 phases.advanceboundary(repo, tr, phase, nodes)
1112 1112
1113 1113 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1114 1114 # Obsolete or strip nodes
1115 1115 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1116 1116 # If a node is already obsoleted, and we want to obsolete it
1117 1117 # without a successor, skip that obssolete request since it's
1118 1118 # unnecessary. That's the "if s or not isobs(n)" check below.
1119 1119 # Also sort the node in topology order, that might be useful for
1120 1120 # some obsstore logic.
1121 1121 # NOTE: the sorting might belong to createmarkers.
1122 1122 torev = unfi.changelog.rev
1123 1123 sortfunc = lambda ns: torev(ns[0][0])
1124 1124 rels = []
1125 1125 for ns, s in sorted(replacements.items(), key=sortfunc):
1126 1126 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1127 1127 rels.append(rel)
1128 1128 if rels:
1129 1129 obsolete.createmarkers(
1130 1130 repo, rels, operation=operation, metadata=metadata
1131 1131 )
1132 1132 elif phases.supportinternal(repo) and mayusearchived:
1133 1133 # this assume we do not have "unstable" nodes above the cleaned ones
1134 1134 allreplaced = set()
1135 1135 for ns in replacements.keys():
1136 1136 allreplaced.update(ns)
1137 1137 if backup:
1138 1138 from . import repair # avoid import cycle
1139 1139
1140 1140 node = min(allreplaced, key=repo.changelog.rev)
1141 1141 repair.backupbundle(
1142 1142 repo, allreplaced, allreplaced, node, operation
1143 1143 )
1144 1144 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1145 1145 else:
1146 1146 from . import repair # avoid import cycle
1147 1147
1148 1148 tostrip = list(n for ns in replacements for n in ns)
1149 1149 if tostrip:
1150 1150 repair.delayedstrip(
1151 1151 repo.ui, repo, tostrip, operation, backup=backup
1152 1152 )
1153 1153
1154 1154
1155 1155 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1156 1156 if opts is None:
1157 1157 opts = {}
1158 1158 m = matcher
1159 1159 dry_run = opts.get(b'dry_run')
1160 1160 try:
1161 1161 similarity = float(opts.get(b'similarity') or 0)
1162 1162 except ValueError:
1163 1163 raise error.Abort(_(b'similarity must be a number'))
1164 1164 if similarity < 0 or similarity > 100:
1165 1165 raise error.Abort(_(b'similarity must be between 0 and 100'))
1166 1166 similarity /= 100.0
1167 1167
1168 1168 ret = 0
1169 1169
1170 1170 wctx = repo[None]
1171 1171 for subpath in sorted(wctx.substate):
1172 1172 submatch = matchmod.subdirmatcher(subpath, m)
1173 1173 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1174 1174 sub = wctx.sub(subpath)
1175 1175 subprefix = repo.wvfs.reljoin(prefix, subpath)
1176 1176 subuipathfn = subdiruipathfn(subpath, uipathfn)
1177 1177 try:
1178 1178 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1179 1179 ret = 1
1180 1180 except error.LookupError:
1181 1181 repo.ui.status(
1182 1182 _(b"skipping missing subrepository: %s\n")
1183 1183 % uipathfn(subpath)
1184 1184 )
1185 1185
1186 1186 rejected = []
1187 1187
1188 1188 def badfn(f, msg):
1189 1189 if f in m.files():
1190 1190 m.bad(f, msg)
1191 1191 rejected.append(f)
1192 1192
1193 1193 badmatch = matchmod.badmatch(m, badfn)
1194 1194 added, unknown, deleted, removed, forgotten = _interestingfiles(
1195 1195 repo, badmatch
1196 1196 )
1197 1197
1198 1198 unknownset = set(unknown + forgotten)
1199 1199 toprint = unknownset.copy()
1200 1200 toprint.update(deleted)
1201 1201 for abs in sorted(toprint):
1202 1202 if repo.ui.verbose or not m.exact(abs):
1203 1203 if abs in unknownset:
1204 1204 status = _(b'adding %s\n') % uipathfn(abs)
1205 1205 label = b'ui.addremove.added'
1206 1206 else:
1207 1207 status = _(b'removing %s\n') % uipathfn(abs)
1208 1208 label = b'ui.addremove.removed'
1209 1209 repo.ui.status(status, label=label)
1210 1210
1211 1211 renames = _findrenames(
1212 1212 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1213 1213 )
1214 1214
1215 1215 if not dry_run:
1216 1216 _markchanges(repo, unknown + forgotten, deleted, renames)
1217 1217
1218 1218 for f in rejected:
1219 1219 if f in m.files():
1220 1220 return 1
1221 1221 return ret
1222 1222
1223 1223
1224 1224 def marktouched(repo, files, similarity=0.0):
1225 1225 '''Assert that files have somehow been operated upon. files are relative to
1226 1226 the repo root.'''
1227 1227 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1228 1228 rejected = []
1229 1229
1230 1230 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1231 1231
1232 1232 if repo.ui.verbose:
1233 1233 unknownset = set(unknown + forgotten)
1234 1234 toprint = unknownset.copy()
1235 1235 toprint.update(deleted)
1236 1236 for abs in sorted(toprint):
1237 1237 if abs in unknownset:
1238 1238 status = _(b'adding %s\n') % abs
1239 1239 else:
1240 1240 status = _(b'removing %s\n') % abs
1241 1241 repo.ui.status(status)
1242 1242
1243 1243 # TODO: We should probably have the caller pass in uipathfn and apply it to
1244 1244 # the messages above too. legacyrelativevalue=True is consistent with how
1245 1245 # it used to work.
1246 1246 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1247 1247 renames = _findrenames(
1248 1248 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1249 1249 )
1250 1250
1251 1251 _markchanges(repo, unknown + forgotten, deleted, renames)
1252 1252
1253 1253 for f in rejected:
1254 1254 if f in m.files():
1255 1255 return 1
1256 1256 return 0
1257 1257
1258 1258
1259 1259 def _interestingfiles(repo, matcher):
1260 1260 '''Walk dirstate with matcher, looking for files that addremove would care
1261 1261 about.
1262 1262
1263 1263 This is different from dirstate.status because it doesn't care about
1264 1264 whether files are modified or clean.'''
1265 1265 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1266 1266 audit_path = pathutil.pathauditor(repo.root, cached=True)
1267 1267
1268 1268 ctx = repo[None]
1269 1269 dirstate = repo.dirstate
1270 1270 matcher = repo.narrowmatch(matcher, includeexact=True)
1271 1271 walkresults = dirstate.walk(
1272 1272 matcher,
1273 1273 subrepos=sorted(ctx.substate),
1274 1274 unknown=True,
1275 1275 ignored=False,
1276 1276 full=False,
1277 1277 )
1278 1278 for abs, st in pycompat.iteritems(walkresults):
1279 1279 dstate = dirstate[abs]
1280 1280 if dstate == b'?' and audit_path.check(abs):
1281 1281 unknown.append(abs)
1282 1282 elif dstate != b'r' and not st:
1283 1283 deleted.append(abs)
1284 1284 elif dstate == b'r' and st:
1285 1285 forgotten.append(abs)
1286 1286 # for finding renames
1287 1287 elif dstate == b'r' and not st:
1288 1288 removed.append(abs)
1289 1289 elif dstate == b'a':
1290 1290 added.append(abs)
1291 1291
1292 1292 return added, unknown, deleted, removed, forgotten
1293 1293
1294 1294
1295 1295 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1296 1296 '''Find renames from removed files to added ones.'''
1297 1297 renames = {}
1298 1298 if similarity > 0:
1299 1299 for old, new, score in similar.findrenames(
1300 1300 repo, added, removed, similarity
1301 1301 ):
1302 1302 if (
1303 1303 repo.ui.verbose
1304 1304 or not matcher.exact(old)
1305 1305 or not matcher.exact(new)
1306 1306 ):
1307 1307 repo.ui.status(
1308 1308 _(
1309 1309 b'recording removal of %s as rename to %s '
1310 1310 b'(%d%% similar)\n'
1311 1311 )
1312 1312 % (uipathfn(old), uipathfn(new), score * 100)
1313 1313 )
1314 1314 renames[new] = old
1315 1315 return renames
1316 1316
1317 1317
1318 1318 def _markchanges(repo, unknown, deleted, renames):
1319 1319 '''Marks the files in unknown as added, the files in deleted as removed,
1320 1320 and the files in renames as copied.'''
1321 1321 wctx = repo[None]
1322 1322 with repo.wlock():
1323 1323 wctx.forget(deleted)
1324 1324 wctx.add(unknown)
1325 1325 for new, old in pycompat.iteritems(renames):
1326 1326 wctx.copy(old, new)
1327 1327
1328 1328
1329 1329 def getrenamedfn(repo, endrev=None):
1330 1330 if copiesmod.usechangesetcentricalgo(repo):
1331 1331
1332 1332 def getrenamed(fn, rev):
1333 1333 ctx = repo[rev]
1334 1334 p1copies = ctx.p1copies()
1335 1335 if fn in p1copies:
1336 1336 return p1copies[fn]
1337 1337 p2copies = ctx.p2copies()
1338 1338 if fn in p2copies:
1339 1339 return p2copies[fn]
1340 1340 return None
1341 1341
1342 1342 return getrenamed
1343 1343
1344 1344 rcache = {}
1345 1345 if endrev is None:
1346 1346 endrev = len(repo)
1347 1347
1348 1348 def getrenamed(fn, rev):
1349 1349 '''looks up all renames for a file (up to endrev) the first
1350 1350 time the file is given. It indexes on the changerev and only
1351 1351 parses the manifest if linkrev != changerev.
1352 1352 Returns rename info for fn at changerev rev.'''
1353 1353 if fn not in rcache:
1354 1354 rcache[fn] = {}
1355 1355 fl = repo.file(fn)
1356 1356 for i in fl:
1357 1357 lr = fl.linkrev(i)
1358 1358 renamed = fl.renamed(fl.node(i))
1359 1359 rcache[fn][lr] = renamed and renamed[0]
1360 1360 if lr >= endrev:
1361 1361 break
1362 1362 if rev in rcache[fn]:
1363 1363 return rcache[fn][rev]
1364 1364
1365 1365 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1366 1366 # filectx logic.
1367 1367 try:
1368 1368 return repo[rev][fn].copysource()
1369 1369 except error.LookupError:
1370 1370 return None
1371 1371
1372 1372 return getrenamed
1373 1373
1374 1374
1375 1375 def getcopiesfn(repo, endrev=None):
1376 1376 if copiesmod.usechangesetcentricalgo(repo):
1377 1377
1378 1378 def copiesfn(ctx):
1379 1379 if ctx.p2copies():
1380 1380 allcopies = ctx.p1copies().copy()
1381 1381 # There should be no overlap
1382 1382 allcopies.update(ctx.p2copies())
1383 1383 return sorted(allcopies.items())
1384 1384 else:
1385 1385 return sorted(ctx.p1copies().items())
1386 1386
1387 1387 else:
1388 1388 getrenamed = getrenamedfn(repo, endrev)
1389 1389
1390 1390 def copiesfn(ctx):
1391 1391 copies = []
1392 1392 for fn in ctx.files():
1393 1393 rename = getrenamed(fn, ctx.rev())
1394 1394 if rename:
1395 1395 copies.append((fn, rename))
1396 1396 return copies
1397 1397
1398 1398 return copiesfn
1399 1399
1400 1400
1401 1401 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1402 1402 """Update the dirstate to reflect the intent of copying src to dst. For
1403 1403 different reasons it might not end with dst being marked as copied from src.
1404 1404 """
1405 1405 origsrc = repo.dirstate.copied(src) or src
1406 1406 if dst == origsrc: # copying back a copy?
1407 1407 if repo.dirstate[dst] not in b'mn' and not dryrun:
1408 1408 repo.dirstate.normallookup(dst)
1409 1409 else:
1410 1410 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1411 1411 if not ui.quiet:
1412 1412 ui.warn(
1413 1413 _(
1414 1414 b"%s has not been committed yet, so no copy "
1415 1415 b"data will be stored for %s.\n"
1416 1416 )
1417 1417 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1418 1418 )
1419 1419 if repo.dirstate[dst] in b'?r' and not dryrun:
1420 1420 wctx.add([dst])
1421 1421 elif not dryrun:
1422 1422 wctx.copy(origsrc, dst)
1423 1423
1424 1424
1425 1425 def movedirstate(repo, newctx, match=None):
1426 1426 """Move the dirstate to newctx and adjust it as necessary.
1427 1427
1428 1428 A matcher can be provided as an optimization. It is probably a bug to pass
1429 1429 a matcher that doesn't match all the differences between the parent of the
1430 1430 working copy and newctx.
1431 1431 """
1432 1432 oldctx = repo[b'.']
1433 1433 ds = repo.dirstate
1434 1434 copies = dict(ds.copies())
1435 1435 ds.setparents(newctx.node(), nullid)
1436 1436 s = newctx.status(oldctx, match=match)
1437 1437 for f in s.modified:
1438 1438 if ds[f] == b'r':
1439 1439 # modified + removed -> removed
1440 1440 continue
1441 1441 ds.normallookup(f)
1442 1442
1443 1443 for f in s.added:
1444 1444 if ds[f] == b'r':
1445 1445 # added + removed -> unknown
1446 1446 ds.drop(f)
1447 1447 elif ds[f] != b'a':
1448 1448 ds.add(f)
1449 1449
1450 1450 for f in s.removed:
1451 1451 if ds[f] == b'a':
1452 1452 # removed + added -> normal
1453 1453 ds.normallookup(f)
1454 1454 elif ds[f] != b'r':
1455 1455 ds.remove(f)
1456 1456
1457 1457 # Merge old parent and old working dir copies
1458 1458 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1459 1459 oldcopies.update(copies)
1460 1460 copies = dict(
1461 1461 (dst, oldcopies.get(src, src))
1462 1462 for dst, src in pycompat.iteritems(oldcopies)
1463 1463 )
1464 1464 # Adjust the dirstate copies
1465 1465 for dst, src in pycompat.iteritems(copies):
1466 1466 if src not in newctx or dst in newctx or ds[dst] != b'a':
1467 1467 src = None
1468 1468 ds.copy(src, dst)
1469 1469
1470 1470
1471 1471 def writerequires(opener, requirements):
1472 1472 with opener(b'requires', b'w', atomictemp=True) as fp:
1473 1473 for r in sorted(requirements):
1474 1474 fp.write(b"%s\n" % r)
1475 1475
1476 1476
1477 1477 class filecachesubentry(object):
1478 1478 def __init__(self, path, stat):
1479 1479 self.path = path
1480 1480 self.cachestat = None
1481 1481 self._cacheable = None
1482 1482
1483 1483 if stat:
1484 1484 self.cachestat = filecachesubentry.stat(self.path)
1485 1485
1486 1486 if self.cachestat:
1487 1487 self._cacheable = self.cachestat.cacheable()
1488 1488 else:
1489 1489 # None means we don't know yet
1490 1490 self._cacheable = None
1491 1491
1492 1492 def refresh(self):
1493 1493 if self.cacheable():
1494 1494 self.cachestat = filecachesubentry.stat(self.path)
1495 1495
1496 1496 def cacheable(self):
1497 1497 if self._cacheable is not None:
1498 1498 return self._cacheable
1499 1499
1500 1500 # we don't know yet, assume it is for now
1501 1501 return True
1502 1502
1503 1503 def changed(self):
1504 1504 # no point in going further if we can't cache it
1505 1505 if not self.cacheable():
1506 1506 return True
1507 1507
1508 1508 newstat = filecachesubentry.stat(self.path)
1509 1509
1510 1510 # we may not know if it's cacheable yet, check again now
1511 1511 if newstat and self._cacheable is None:
1512 1512 self._cacheable = newstat.cacheable()
1513 1513
1514 1514 # check again
1515 1515 if not self._cacheable:
1516 1516 return True
1517 1517
1518 1518 if self.cachestat != newstat:
1519 1519 self.cachestat = newstat
1520 1520 return True
1521 1521 else:
1522 1522 return False
1523 1523
1524 1524 @staticmethod
1525 1525 def stat(path):
1526 1526 try:
1527 1527 return util.cachestat(path)
1528 1528 except OSError as e:
1529 1529 if e.errno != errno.ENOENT:
1530 1530 raise
1531 1531
1532 1532
1533 1533 class filecacheentry(object):
1534 1534 def __init__(self, paths, stat=True):
1535 1535 self._entries = []
1536 1536 for path in paths:
1537 1537 self._entries.append(filecachesubentry(path, stat))
1538 1538
1539 1539 def changed(self):
1540 1540 '''true if any entry has changed'''
1541 1541 for entry in self._entries:
1542 1542 if entry.changed():
1543 1543 return True
1544 1544 return False
1545 1545
1546 1546 def refresh(self):
1547 1547 for entry in self._entries:
1548 1548 entry.refresh()
1549 1549
1550 1550
1551 1551 class filecache(object):
1552 1552 """A property like decorator that tracks files under .hg/ for updates.
1553 1553
1554 1554 On first access, the files defined as arguments are stat()ed and the
1555 1555 results cached. The decorated function is called. The results are stashed
1556 1556 away in a ``_filecache`` dict on the object whose method is decorated.
1557 1557
1558 1558 On subsequent access, the cached result is used as it is set to the
1559 1559 instance dictionary.
1560 1560
1561 1561 On external property set/delete operations, the caller must update the
1562 1562 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1563 1563 instead of directly setting <attr>.
1564 1564
1565 1565 When using the property API, the cached data is always used if available.
1566 1566 No stat() is performed to check if the file has changed.
1567 1567
1568 1568 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1569 1569 can populate an entry before the property's getter is called. In this case,
1570 1570 entries in ``_filecache`` will be used during property operations,
1571 1571 if available. If the underlying file changes, it is up to external callers
1572 1572 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1573 1573 method result as well as possibly calling ``del obj._filecache[attr]`` to
1574 1574 remove the ``filecacheentry``.
1575 1575 """
1576 1576
1577 1577 def __init__(self, *paths):
1578 1578 self.paths = paths
1579 1579
1580 1580 def join(self, obj, fname):
1581 1581 """Used to compute the runtime path of a cached file.
1582 1582
1583 1583 Users should subclass filecache and provide their own version of this
1584 1584 function to call the appropriate join function on 'obj' (an instance
1585 1585 of the class that its member function was decorated).
1586 1586 """
1587 1587 raise NotImplementedError
1588 1588
1589 1589 def __call__(self, func):
1590 1590 self.func = func
1591 1591 self.sname = func.__name__
1592 1592 self.name = pycompat.sysbytes(self.sname)
1593 1593 return self
1594 1594
1595 1595 def __get__(self, obj, type=None):
1596 1596 # if accessed on the class, return the descriptor itself.
1597 1597 if obj is None:
1598 1598 return self
1599 1599
1600 1600 assert self.sname not in obj.__dict__
1601 1601
1602 1602 entry = obj._filecache.get(self.name)
1603 1603
1604 1604 if entry:
1605 1605 if entry.changed():
1606 1606 entry.obj = self.func(obj)
1607 1607 else:
1608 1608 paths = [self.join(obj, path) for path in self.paths]
1609 1609
1610 1610 # We stat -before- creating the object so our cache doesn't lie if
1611 1611 # a writer modified between the time we read and stat
1612 1612 entry = filecacheentry(paths, True)
1613 1613 entry.obj = self.func(obj)
1614 1614
1615 1615 obj._filecache[self.name] = entry
1616 1616
1617 1617 obj.__dict__[self.sname] = entry.obj
1618 1618 return entry.obj
1619 1619
1620 1620 # don't implement __set__(), which would make __dict__ lookup as slow as
1621 1621 # function call.
1622 1622
1623 1623 def set(self, obj, value):
1624 1624 if self.name not in obj._filecache:
1625 1625 # we add an entry for the missing value because X in __dict__
1626 1626 # implies X in _filecache
1627 1627 paths = [self.join(obj, path) for path in self.paths]
1628 1628 ce = filecacheentry(paths, False)
1629 1629 obj._filecache[self.name] = ce
1630 1630 else:
1631 1631 ce = obj._filecache[self.name]
1632 1632
1633 1633 ce.obj = value # update cached copy
1634 1634 obj.__dict__[self.sname] = value # update copy returned by obj.x
1635 1635
1636 1636
1637 1637 def extdatasource(repo, source):
1638 1638 """Gather a map of rev -> value dict from the specified source
1639 1639
1640 1640 A source spec is treated as a URL, with a special case shell: type
1641 1641 for parsing the output from a shell command.
1642 1642
1643 1643 The data is parsed as a series of newline-separated records where
1644 1644 each record is a revision specifier optionally followed by a space
1645 1645 and a freeform string value. If the revision is known locally, it
1646 1646 is converted to a rev, otherwise the record is skipped.
1647 1647
1648 1648 Note that both key and value are treated as UTF-8 and converted to
1649 1649 the local encoding. This allows uniformity between local and
1650 1650 remote data sources.
1651 1651 """
1652 1652
1653 1653 spec = repo.ui.config(b"extdata", source)
1654 1654 if not spec:
1655 1655 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1656 1656
1657 1657 data = {}
1658 1658 src = proc = None
1659 1659 try:
1660 1660 if spec.startswith(b"shell:"):
1661 1661 # external commands should be run relative to the repo root
1662 1662 cmd = spec[6:]
1663 1663 proc = subprocess.Popen(
1664 1664 procutil.tonativestr(cmd),
1665 1665 shell=True,
1666 1666 bufsize=-1,
1667 1667 close_fds=procutil.closefds,
1668 1668 stdout=subprocess.PIPE,
1669 1669 cwd=procutil.tonativestr(repo.root),
1670 1670 )
1671 1671 src = proc.stdout
1672 1672 else:
1673 1673 # treat as a URL or file
1674 1674 src = url.open(repo.ui, spec)
1675 1675 for l in src:
1676 1676 if b" " in l:
1677 1677 k, v = l.strip().split(b" ", 1)
1678 1678 else:
1679 1679 k, v = l.strip(), b""
1680 1680
1681 1681 k = encoding.tolocal(k)
1682 1682 try:
1683 1683 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1684 1684 except (error.LookupError, error.RepoLookupError):
1685 1685 pass # we ignore data for nodes that don't exist locally
1686 1686 finally:
1687 1687 if proc:
1688 1688 try:
1689 1689 proc.communicate()
1690 1690 except ValueError:
1691 1691 # This happens if we started iterating src and then
1692 1692 # get a parse error on a line. It should be safe to ignore.
1693 1693 pass
1694 1694 if src:
1695 1695 src.close()
1696 1696 if proc and proc.returncode != 0:
1697 1697 raise error.Abort(
1698 1698 _(b"extdata command '%s' failed: %s")
1699 1699 % (cmd, procutil.explainexit(proc.returncode))
1700 1700 )
1701 1701
1702 1702 return data
1703 1703
1704 1704
1705 1705 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1706 1706 if lock is None:
1707 1707 raise error.LockInheritanceContractViolation(
1708 1708 b'lock can only be inherited while held'
1709 1709 )
1710 1710 if environ is None:
1711 1711 environ = {}
1712 1712 with lock.inherit() as locker:
1713 1713 environ[envvar] = locker
1714 1714 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1715 1715
1716 1716
1717 1717 def wlocksub(repo, cmd, *args, **kwargs):
1718 1718 """run cmd as a subprocess that allows inheriting repo's wlock
1719 1719
1720 1720 This can only be called while the wlock is held. This takes all the
1721 1721 arguments that ui.system does, and returns the exit code of the
1722 1722 subprocess."""
1723 1723 return _locksub(
1724 1724 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1725 1725 )
1726 1726
1727 1727
1728 1728 class progress(object):
1729 1729 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1730 1730 self.ui = ui
1731 1731 self.pos = 0
1732 1732 self.topic = topic
1733 1733 self.unit = unit
1734 1734 self.total = total
1735 1735 self.debug = ui.configbool(b'progress', b'debug')
1736 1736 self._updatebar = updatebar
1737 1737
1738 1738 def __enter__(self):
1739 1739 return self
1740 1740
1741 1741 def __exit__(self, exc_type, exc_value, exc_tb):
1742 1742 self.complete()
1743 1743
1744 1744 def update(self, pos, item=b"", total=None):
1745 1745 assert pos is not None
1746 1746 if total:
1747 1747 self.total = total
1748 1748 self.pos = pos
1749 1749 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1750 1750 if self.debug:
1751 1751 self._printdebug(item)
1752 1752
1753 1753 def increment(self, step=1, item=b"", total=None):
1754 1754 self.update(self.pos + step, item, total)
1755 1755
1756 1756 def complete(self):
1757 1757 self.pos = None
1758 1758 self.unit = b""
1759 1759 self.total = None
1760 1760 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1761 1761
1762 1762 def _printdebug(self, item):
1763 unit = b''
1763 1764 if self.unit:
1764 1765 unit = b' ' + self.unit
1765 1766 if item:
1766 1767 item = b' ' + item
1767 1768
1768 1769 if self.total:
1769 1770 pct = 100.0 * self.pos / self.total
1770 1771 self.ui.debug(
1771 1772 b'%s:%s %d/%d%s (%4.2f%%)\n'
1772 1773 % (self.topic, item, self.pos, self.total, unit, pct)
1773 1774 )
1774 1775 else:
1775 1776 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1776 1777
1777 1778
1778 1779 def gdinitconfig(ui):
1779 1780 """helper function to know if a repo should be created as general delta
1780 1781 """
1781 1782 # experimental config: format.generaldelta
1782 1783 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1783 1784 b'format', b'usegeneraldelta'
1784 1785 )
1785 1786
1786 1787
1787 1788 def gddeltaconfig(ui):
1788 1789 """helper function to know if incoming delta should be optimised
1789 1790 """
1790 1791 # experimental config: format.generaldelta
1791 1792 return ui.configbool(b'format', b'generaldelta')
1792 1793
1793 1794
1794 1795 class simplekeyvaluefile(object):
1795 1796 """A simple file with key=value lines
1796 1797
1797 1798 Keys must be alphanumerics and start with a letter, values must not
1798 1799 contain '\n' characters"""
1799 1800
1800 1801 firstlinekey = b'__firstline'
1801 1802
1802 1803 def __init__(self, vfs, path, keys=None):
1803 1804 self.vfs = vfs
1804 1805 self.path = path
1805 1806
1806 1807 def read(self, firstlinenonkeyval=False):
1807 1808 """Read the contents of a simple key-value file
1808 1809
1809 1810 'firstlinenonkeyval' indicates whether the first line of file should
1810 1811 be treated as a key-value pair or reuturned fully under the
1811 1812 __firstline key."""
1812 1813 lines = self.vfs.readlines(self.path)
1813 1814 d = {}
1814 1815 if firstlinenonkeyval:
1815 1816 if not lines:
1816 1817 e = _(b"empty simplekeyvalue file")
1817 1818 raise error.CorruptedState(e)
1818 1819 # we don't want to include '\n' in the __firstline
1819 1820 d[self.firstlinekey] = lines[0][:-1]
1820 1821 del lines[0]
1821 1822
1822 1823 try:
1823 1824 # the 'if line.strip()' part prevents us from failing on empty
1824 1825 # lines which only contain '\n' therefore are not skipped
1825 1826 # by 'if line'
1826 1827 updatedict = dict(
1827 1828 line[:-1].split(b'=', 1) for line in lines if line.strip()
1828 1829 )
1829 1830 if self.firstlinekey in updatedict:
1830 1831 e = _(b"%r can't be used as a key")
1831 1832 raise error.CorruptedState(e % self.firstlinekey)
1832 1833 d.update(updatedict)
1833 1834 except ValueError as e:
1834 1835 raise error.CorruptedState(stringutil.forcebytestr(e))
1835 1836 return d
1836 1837
1837 1838 def write(self, data, firstline=None):
1838 1839 """Write key=>value mapping to a file
1839 1840 data is a dict. Keys must be alphanumerical and start with a letter.
1840 1841 Values must not contain newline characters.
1841 1842
1842 1843 If 'firstline' is not None, it is written to file before
1843 1844 everything else, as it is, not in a key=value form"""
1844 1845 lines = []
1845 1846 if firstline is not None:
1846 1847 lines.append(b'%s\n' % firstline)
1847 1848
1848 1849 for k, v in data.items():
1849 1850 if k == self.firstlinekey:
1850 1851 e = b"key name '%s' is reserved" % self.firstlinekey
1851 1852 raise error.ProgrammingError(e)
1852 1853 if not k[0:1].isalpha():
1853 1854 e = b"keys must start with a letter in a key-value file"
1854 1855 raise error.ProgrammingError(e)
1855 1856 if not k.isalnum():
1856 1857 e = b"invalid key name in a simple key-value file"
1857 1858 raise error.ProgrammingError(e)
1858 1859 if b'\n' in v:
1859 1860 e = b"invalid value in a simple key-value file"
1860 1861 raise error.ProgrammingError(e)
1861 1862 lines.append(b"%s=%s\n" % (k, v))
1862 1863 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1863 1864 fp.write(b''.join(lines))
1864 1865
1865 1866
1866 1867 _reportobsoletedsource = [
1867 1868 b'debugobsolete',
1868 1869 b'pull',
1869 1870 b'push',
1870 1871 b'serve',
1871 1872 b'unbundle',
1872 1873 ]
1873 1874
1874 1875 _reportnewcssource = [
1875 1876 b'pull',
1876 1877 b'unbundle',
1877 1878 ]
1878 1879
1879 1880
1880 1881 def prefetchfiles(repo, revs, match):
1881 1882 """Invokes the registered file prefetch functions, allowing extensions to
1882 1883 ensure the corresponding files are available locally, before the command
1883 1884 uses them."""
1884 1885 if match:
1885 1886 # The command itself will complain about files that don't exist, so
1886 1887 # don't duplicate the message.
1887 1888 match = matchmod.badmatch(match, lambda fn, msg: None)
1888 1889 else:
1889 1890 match = matchall(repo)
1890 1891
1891 1892 fileprefetchhooks(repo, revs, match)
1892 1893
1893 1894
1894 1895 # a list of (repo, revs, match) prefetch functions
1895 1896 fileprefetchhooks = util.hooks()
1896 1897
1897 1898 # A marker that tells the evolve extension to suppress its own reporting
1898 1899 _reportstroubledchangesets = True
1899 1900
1900 1901
1901 1902 def registersummarycallback(repo, otr, txnname=b''):
1902 1903 """register a callback to issue a summary after the transaction is closed
1903 1904 """
1904 1905
1905 1906 def txmatch(sources):
1906 1907 return any(txnname.startswith(source) for source in sources)
1907 1908
1908 1909 categories = []
1909 1910
1910 1911 def reportsummary(func):
1911 1912 """decorator for report callbacks."""
1912 1913 # The repoview life cycle is shorter than the one of the actual
1913 1914 # underlying repository. So the filtered object can die before the
1914 1915 # weakref is used leading to troubles. We keep a reference to the
1915 1916 # unfiltered object and restore the filtering when retrieving the
1916 1917 # repository through the weakref.
1917 1918 filtername = repo.filtername
1918 1919 reporef = weakref.ref(repo.unfiltered())
1919 1920
1920 1921 def wrapped(tr):
1921 1922 repo = reporef()
1922 1923 if filtername:
1923 1924 assert repo is not None # help pytype
1924 1925 repo = repo.filtered(filtername)
1925 1926 func(repo, tr)
1926 1927
1927 1928 newcat = b'%02i-txnreport' % len(categories)
1928 1929 otr.addpostclose(newcat, wrapped)
1929 1930 categories.append(newcat)
1930 1931 return wrapped
1931 1932
1932 1933 @reportsummary
1933 1934 def reportchangegroup(repo, tr):
1934 1935 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1935 1936 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1936 1937 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1937 1938 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1938 1939 if cgchangesets or cgrevisions or cgfiles:
1939 1940 htext = b""
1940 1941 if cgheads:
1941 1942 htext = _(b" (%+d heads)") % cgheads
1942 1943 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1943 1944 assert repo is not None # help pytype
1944 1945 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1945 1946
1946 1947 if txmatch(_reportobsoletedsource):
1947 1948
1948 1949 @reportsummary
1949 1950 def reportobsoleted(repo, tr):
1950 1951 obsoleted = obsutil.getobsoleted(repo, tr)
1951 1952 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1952 1953 if newmarkers:
1953 1954 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1954 1955 if obsoleted:
1955 1956 repo.ui.status(_(b'obsoleted %i changesets\n') % len(obsoleted))
1956 1957
1957 1958 if obsolete.isenabled(
1958 1959 repo, obsolete.createmarkersopt
1959 1960 ) and repo.ui.configbool(
1960 1961 b'experimental', b'evolution.report-instabilities'
1961 1962 ):
1962 1963 instabilitytypes = [
1963 1964 (b'orphan', b'orphan'),
1964 1965 (b'phase-divergent', b'phasedivergent'),
1965 1966 (b'content-divergent', b'contentdivergent'),
1966 1967 ]
1967 1968
1968 1969 def getinstabilitycounts(repo):
1969 1970 filtered = repo.changelog.filteredrevs
1970 1971 counts = {}
1971 1972 for instability, revset in instabilitytypes:
1972 1973 counts[instability] = len(
1973 1974 set(obsolete.getrevs(repo, revset)) - filtered
1974 1975 )
1975 1976 return counts
1976 1977
1977 1978 oldinstabilitycounts = getinstabilitycounts(repo)
1978 1979
1979 1980 @reportsummary
1980 1981 def reportnewinstabilities(repo, tr):
1981 1982 newinstabilitycounts = getinstabilitycounts(repo)
1982 1983 for instability, revset in instabilitytypes:
1983 1984 delta = (
1984 1985 newinstabilitycounts[instability]
1985 1986 - oldinstabilitycounts[instability]
1986 1987 )
1987 1988 msg = getinstabilitymessage(delta, instability)
1988 1989 if msg:
1989 1990 repo.ui.warn(msg)
1990 1991
1991 1992 if txmatch(_reportnewcssource):
1992 1993
1993 1994 @reportsummary
1994 1995 def reportnewcs(repo, tr):
1995 1996 """Report the range of new revisions pulled/unbundled."""
1996 1997 origrepolen = tr.changes.get(b'origrepolen', len(repo))
1997 1998 unfi = repo.unfiltered()
1998 1999 if origrepolen >= len(unfi):
1999 2000 return
2000 2001
2001 2002 # Compute the bounds of new visible revisions' range.
2002 2003 revs = smartset.spanset(repo, start=origrepolen)
2003 2004 if revs:
2004 2005 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2005 2006
2006 2007 if minrev == maxrev:
2007 2008 revrange = minrev
2008 2009 else:
2009 2010 revrange = b'%s:%s' % (minrev, maxrev)
2010 2011 draft = len(repo.revs(b'%ld and draft()', revs))
2011 2012 secret = len(repo.revs(b'%ld and secret()', revs))
2012 2013 if not (draft or secret):
2013 2014 msg = _(b'new changesets %s\n') % revrange
2014 2015 elif draft and secret:
2015 2016 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2016 2017 msg %= (revrange, draft, secret)
2017 2018 elif draft:
2018 2019 msg = _(b'new changesets %s (%d drafts)\n')
2019 2020 msg %= (revrange, draft)
2020 2021 elif secret:
2021 2022 msg = _(b'new changesets %s (%d secrets)\n')
2022 2023 msg %= (revrange, secret)
2023 2024 else:
2024 2025 errormsg = b'entered unreachable condition'
2025 2026 raise error.ProgrammingError(errormsg)
2026 2027 repo.ui.status(msg)
2027 2028
2028 2029 # search new changesets directly pulled as obsolete
2029 2030 duplicates = tr.changes.get(b'revduplicates', ())
2030 2031 obsadded = unfi.revs(
2031 2032 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2032 2033 )
2033 2034 cl = repo.changelog
2034 2035 extinctadded = [r for r in obsadded if r not in cl]
2035 2036 if extinctadded:
2036 2037 # They are not just obsolete, but obsolete and invisible
2037 2038 # we call them "extinct" internally but the terms have not been
2038 2039 # exposed to users.
2039 2040 msg = b'(%d other changesets obsolete on arrival)\n'
2040 2041 repo.ui.status(msg % len(extinctadded))
2041 2042
2042 2043 @reportsummary
2043 2044 def reportphasechanges(repo, tr):
2044 2045 """Report statistics of phase changes for changesets pre-existing
2045 2046 pull/unbundle.
2046 2047 """
2047 2048 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2048 2049 phasetracking = tr.changes.get(b'phases', {})
2049 2050 if not phasetracking:
2050 2051 return
2051 2052 published = [
2052 2053 rev
2053 2054 for rev, (old, new) in pycompat.iteritems(phasetracking)
2054 2055 if new == phases.public and rev < origrepolen
2055 2056 ]
2056 2057 if not published:
2057 2058 return
2058 2059 repo.ui.status(
2059 2060 _(b'%d local changesets published\n') % len(published)
2060 2061 )
2061 2062
2062 2063
2063 2064 def getinstabilitymessage(delta, instability):
2064 2065 """function to return the message to show warning about new instabilities
2065 2066
2066 2067 exists as a separate function so that extension can wrap to show more
2067 2068 information like how to fix instabilities"""
2068 2069 if delta > 0:
2069 2070 return _(b'%i new %s changesets\n') % (delta, instability)
2070 2071
2071 2072
2072 2073 def nodesummaries(repo, nodes, maxnumnodes=4):
2073 2074 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2074 2075 return b' '.join(short(h) for h in nodes)
2075 2076 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2076 2077 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2077 2078
2078 2079
2079 2080 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2080 2081 """check that no named branch has multiple heads"""
2081 2082 if desc in (b'strip', b'repair'):
2082 2083 # skip the logic during strip
2083 2084 return
2084 2085 visible = repo.filtered(b'visible')
2085 2086 # possible improvement: we could restrict the check to affected branch
2086 2087 bm = visible.branchmap()
2087 2088 for name in bm:
2088 2089 heads = bm.branchheads(name, closed=accountclosed)
2089 2090 if len(heads) > 1:
2090 2091 msg = _(b'rejecting multiple heads on branch "%s"')
2091 2092 msg %= name
2092 2093 hint = _(b'%d heads: %s')
2093 2094 hint %= (len(heads), nodesummaries(repo, heads))
2094 2095 raise error.Abort(msg, hint=hint)
2095 2096
2096 2097
2097 2098 def wrapconvertsink(sink):
2098 2099 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2099 2100 before it is used, whether or not the convert extension was formally loaded.
2100 2101 """
2101 2102 return sink
2102 2103
2103 2104
2104 2105 def unhidehashlikerevs(repo, specs, hiddentype):
2105 2106 """parse the user specs and unhide changesets whose hash or revision number
2106 2107 is passed.
2107 2108
2108 2109 hiddentype can be: 1) 'warn': warn while unhiding changesets
2109 2110 2) 'nowarn': don't warn while unhiding changesets
2110 2111
2111 2112 returns a repo object with the required changesets unhidden
2112 2113 """
2113 2114 if not repo.filtername or not repo.ui.configbool(
2114 2115 b'experimental', b'directaccess'
2115 2116 ):
2116 2117 return repo
2117 2118
2118 2119 if repo.filtername not in (b'visible', b'visible-hidden'):
2119 2120 return repo
2120 2121
2121 2122 symbols = set()
2122 2123 for spec in specs:
2123 2124 try:
2124 2125 tree = revsetlang.parse(spec)
2125 2126 except error.ParseError: # will be reported by scmutil.revrange()
2126 2127 continue
2127 2128
2128 2129 symbols.update(revsetlang.gethashlikesymbols(tree))
2129 2130
2130 2131 if not symbols:
2131 2132 return repo
2132 2133
2133 2134 revs = _getrevsfromsymbols(repo, symbols)
2134 2135
2135 2136 if not revs:
2136 2137 return repo
2137 2138
2138 2139 if hiddentype == b'warn':
2139 2140 unfi = repo.unfiltered()
2140 2141 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2141 2142 repo.ui.warn(
2142 2143 _(
2143 2144 b"warning: accessing hidden changesets for write "
2144 2145 b"operation: %s\n"
2145 2146 )
2146 2147 % revstr
2147 2148 )
2148 2149
2149 2150 # we have to use new filtername to separate branch/tags cache until we can
2150 2151 # disbale these cache when revisions are dynamically pinned.
2151 2152 return repo.filtered(b'visible-hidden', revs)
2152 2153
2153 2154
2154 2155 def _getrevsfromsymbols(repo, symbols):
2155 2156 """parse the list of symbols and returns a set of revision numbers of hidden
2156 2157 changesets present in symbols"""
2157 2158 revs = set()
2158 2159 unfi = repo.unfiltered()
2159 2160 unficl = unfi.changelog
2160 2161 cl = repo.changelog
2161 2162 tiprev = len(unficl)
2162 2163 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2163 2164 for s in symbols:
2164 2165 try:
2165 2166 n = int(s)
2166 2167 if n <= tiprev:
2167 2168 if not allowrevnums:
2168 2169 continue
2169 2170 else:
2170 2171 if n not in cl:
2171 2172 revs.add(n)
2172 2173 continue
2173 2174 except ValueError:
2174 2175 pass
2175 2176
2176 2177 try:
2177 2178 s = resolvehexnodeidprefix(unfi, s)
2178 2179 except (error.LookupError, error.WdirUnsupported):
2179 2180 s = None
2180 2181
2181 2182 if s is not None:
2182 2183 rev = unficl.rev(s)
2183 2184 if rev not in cl:
2184 2185 revs.add(rev)
2185 2186
2186 2187 return revs
2187 2188
2188 2189
2189 2190 def bookmarkrevs(repo, mark):
2190 2191 """
2191 2192 Select revisions reachable by a given bookmark
2192 2193 """
2193 2194 return repo.revs(
2194 2195 b"ancestors(bookmark(%s)) - "
2195 2196 b"ancestors(head() and not bookmark(%s)) - "
2196 2197 b"ancestors(bookmark() and not bookmark(%s))",
2197 2198 mark,
2198 2199 mark,
2199 2200 mark,
2200 2201 )
General Comments 0
You need to be logged in to leave comments. Login now