##// END OF EJS Templates
status: fix default value of status struct...
Yuya Nishihara -
r44214:b5f183ee default
parent child Browse files
Show More
@@ -1,2197 +1,2197 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import posixpath
15 15 import re
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29 from .pycompat import getattr
30 30 from .thirdparty import attr
31 31 from . import (
32 32 copies as copiesmod,
33 33 encoding,
34 34 error,
35 35 match as matchmod,
36 36 obsolete,
37 37 obsutil,
38 38 pathutil,
39 39 phases,
40 40 policy,
41 41 pycompat,
42 42 revsetlang,
43 43 similar,
44 44 smartset,
45 45 url,
46 46 util,
47 47 vfs,
48 48 )
49 49
50 50 from .utils import (
51 51 procutil,
52 52 stringutil,
53 53 )
54 54
55 55 if pycompat.iswindows:
56 56 from . import scmwindows as scmplatform
57 57 else:
58 58 from . import scmposix as scmplatform
59 59
60 60 parsers = policy.importmod('parsers')
61 61
62 62 termsize = scmplatform.termsize
63 63
64 64
65 65 @attr.s(slots=True, repr=False)
66 66 class status(object):
67 67 '''Struct with a list of files per status.
68 68
69 69 The 'deleted', 'unknown' and 'ignored' properties are only
70 70 relevant to the working copy.
71 71 '''
72 72
73 modified = attr.ib(default=list)
74 added = attr.ib(default=list)
75 removed = attr.ib(default=list)
76 deleted = attr.ib(default=list)
77 unknown = attr.ib(default=list)
78 ignored = attr.ib(default=list)
79 clean = attr.ib(default=list)
73 modified = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=attr.Factory(list))
80 80
81 81 def __iter__(self):
82 82 yield self.modified
83 83 yield self.added
84 84 yield self.removed
85 85 yield self.deleted
86 86 yield self.unknown
87 87 yield self.ignored
88 88 yield self.clean
89 89
90 90 def __repr__(self):
91 91 return (
92 92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 93 r'unknown=%s, ignored=%s, clean=%s>'
94 94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95 95
96 96
97 97 def itersubrepos(ctx1, ctx2):
98 98 """find subrepos in ctx1 or ctx2"""
99 99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104 104
105 105 missing = set()
106 106
107 107 for subpath in ctx2.substate:
108 108 if subpath not in ctx1.substate:
109 109 del subpaths[subpath]
110 110 missing.add(subpath)
111 111
112 112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
113 113 yield subpath, ctx.sub(subpath)
114 114
115 115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 116 # status and diff will have an accurate result when it does
117 117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 118 # against itself.
119 119 for subpath in missing:
120 120 yield subpath, ctx2.nullsub(subpath, ctx1)
121 121
122 122
123 123 def nochangesfound(ui, repo, excluded=None):
124 124 '''Report no changes for push/pull, excluded is None or a list of
125 125 nodes excluded from the push/pull.
126 126 '''
127 127 secretlist = []
128 128 if excluded:
129 129 for n in excluded:
130 130 ctx = repo[n]
131 131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 132 secretlist.append(n)
133 133
134 134 if secretlist:
135 135 ui.status(
136 136 _(b"no changes found (ignored %d secret changesets)\n")
137 137 % len(secretlist)
138 138 )
139 139 else:
140 140 ui.status(_(b"no changes found\n"))
141 141
142 142
143 143 def callcatch(ui, func):
144 144 """call func() with global exception handling
145 145
146 146 return func() if no exception happens. otherwise do some error handling
147 147 and return an exit code accordingly. does not handle all exceptions.
148 148 """
149 149 try:
150 150 try:
151 151 return func()
152 152 except: # re-raises
153 153 ui.traceback()
154 154 raise
155 155 # Global exception handling, alphabetically
156 156 # Mercurial-specific first, followed by built-in and library exceptions
157 157 except error.LockHeld as inst:
158 158 if inst.errno == errno.ETIMEDOUT:
159 159 reason = _(b'timed out waiting for lock held by %r') % (
160 160 pycompat.bytestr(inst.locker)
161 161 )
162 162 else:
163 163 reason = _(b'lock held by %r') % inst.locker
164 164 ui.error(
165 165 _(b"abort: %s: %s\n")
166 166 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
167 167 )
168 168 if not inst.locker:
169 169 ui.error(_(b"(lock might be very busy)\n"))
170 170 except error.LockUnavailable as inst:
171 171 ui.error(
172 172 _(b"abort: could not lock %s: %s\n")
173 173 % (
174 174 inst.desc or stringutil.forcebytestr(inst.filename),
175 175 encoding.strtolocal(inst.strerror),
176 176 )
177 177 )
178 178 except error.OutOfBandError as inst:
179 179 if inst.args:
180 180 msg = _(b"abort: remote error:\n")
181 181 else:
182 182 msg = _(b"abort: remote error\n")
183 183 ui.error(msg)
184 184 if inst.args:
185 185 ui.error(b''.join(inst.args))
186 186 if inst.hint:
187 187 ui.error(b'(%s)\n' % inst.hint)
188 188 except error.RepoError as inst:
189 189 ui.error(_(b"abort: %s!\n") % inst)
190 190 if inst.hint:
191 191 ui.error(_(b"(%s)\n") % inst.hint)
192 192 except error.ResponseError as inst:
193 193 ui.error(_(b"abort: %s") % inst.args[0])
194 194 msg = inst.args[1]
195 195 if isinstance(msg, type(u'')):
196 196 msg = pycompat.sysbytes(msg)
197 197 if not isinstance(msg, bytes):
198 198 ui.error(b" %r\n" % (msg,))
199 199 elif not msg:
200 200 ui.error(_(b" empty string\n"))
201 201 else:
202 202 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
203 203 except error.CensoredNodeError as inst:
204 204 ui.error(_(b"abort: file censored %s!\n") % inst)
205 205 except error.StorageError as inst:
206 206 ui.error(_(b"abort: %s!\n") % inst)
207 207 if inst.hint:
208 208 ui.error(_(b"(%s)\n") % inst.hint)
209 209 except error.InterventionRequired as inst:
210 210 ui.error(b"%s\n" % inst)
211 211 if inst.hint:
212 212 ui.error(_(b"(%s)\n") % inst.hint)
213 213 return 1
214 214 except error.WdirUnsupported:
215 215 ui.error(_(b"abort: working directory revision cannot be specified\n"))
216 216 except error.Abort as inst:
217 217 ui.error(_(b"abort: %s\n") % inst)
218 218 if inst.hint:
219 219 ui.error(_(b"(%s)\n") % inst.hint)
220 220 except ImportError as inst:
221 221 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
222 222 m = stringutil.forcebytestr(inst).split()[-1]
223 223 if m in b"mpatch bdiff".split():
224 224 ui.error(_(b"(did you forget to compile extensions?)\n"))
225 225 elif m in b"zlib".split():
226 226 ui.error(_(b"(is your Python install correct?)\n"))
227 227 except (IOError, OSError) as inst:
228 228 if util.safehasattr(inst, b"code"): # HTTPError
229 229 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
230 230 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
231 231 try: # usually it is in the form (errno, strerror)
232 232 reason = inst.reason.args[1]
233 233 except (AttributeError, IndexError):
234 234 # it might be anything, for example a string
235 235 reason = inst.reason
236 236 if isinstance(reason, pycompat.unicode):
237 237 # SSLError of Python 2.7.9 contains a unicode
238 238 reason = encoding.unitolocal(reason)
239 239 ui.error(_(b"abort: error: %s\n") % reason)
240 240 elif (
241 241 util.safehasattr(inst, b"args")
242 242 and inst.args
243 243 and inst.args[0] == errno.EPIPE
244 244 ):
245 245 pass
246 246 elif getattr(inst, "strerror", None): # common IOError or OSError
247 247 if getattr(inst, "filename", None) is not None:
248 248 ui.error(
249 249 _(b"abort: %s: '%s'\n")
250 250 % (
251 251 encoding.strtolocal(inst.strerror),
252 252 stringutil.forcebytestr(inst.filename),
253 253 )
254 254 )
255 255 else:
256 256 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
257 257 else: # suspicious IOError
258 258 raise
259 259 except MemoryError:
260 260 ui.error(_(b"abort: out of memory\n"))
261 261 except SystemExit as inst:
262 262 # Commands shouldn't sys.exit directly, but give a return code.
263 263 # Just in case catch this and and pass exit code to caller.
264 264 return inst.code
265 265
266 266 return -1
267 267
268 268
269 269 def checknewlabel(repo, lbl, kind):
270 270 # Do not use the "kind" parameter in ui output.
271 271 # It makes strings difficult to translate.
272 272 if lbl in [b'tip', b'.', b'null']:
273 273 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
274 274 for c in (b':', b'\0', b'\n', b'\r'):
275 275 if c in lbl:
276 276 raise error.Abort(
277 277 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
278 278 )
279 279 try:
280 280 int(lbl)
281 281 raise error.Abort(_(b"cannot use an integer as a name"))
282 282 except ValueError:
283 283 pass
284 284 if lbl.strip() != lbl:
285 285 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
286 286
287 287
288 288 def checkfilename(f):
289 289 '''Check that the filename f is an acceptable filename for a tracked file'''
290 290 if b'\r' in f or b'\n' in f:
291 291 raise error.Abort(
292 292 _(b"'\\n' and '\\r' disallowed in filenames: %r")
293 293 % pycompat.bytestr(f)
294 294 )
295 295
296 296
297 297 def checkportable(ui, f):
298 298 '''Check if filename f is portable and warn or abort depending on config'''
299 299 checkfilename(f)
300 300 abort, warn = checkportabilityalert(ui)
301 301 if abort or warn:
302 302 msg = util.checkwinfilename(f)
303 303 if msg:
304 304 msg = b"%s: %s" % (msg, procutil.shellquote(f))
305 305 if abort:
306 306 raise error.Abort(msg)
307 307 ui.warn(_(b"warning: %s\n") % msg)
308 308
309 309
310 310 def checkportabilityalert(ui):
311 311 '''check if the user's config requests nothing, a warning, or abort for
312 312 non-portable filenames'''
313 313 val = ui.config(b'ui', b'portablefilenames')
314 314 lval = val.lower()
315 315 bval = stringutil.parsebool(val)
316 316 abort = pycompat.iswindows or lval == b'abort'
317 317 warn = bval or lval == b'warn'
318 318 if bval is None and not (warn or abort or lval == b'ignore'):
319 319 raise error.ConfigError(
320 320 _(b"ui.portablefilenames value is invalid ('%s')") % val
321 321 )
322 322 return abort, warn
323 323
324 324
325 325 class casecollisionauditor(object):
326 326 def __init__(self, ui, abort, dirstate):
327 327 self._ui = ui
328 328 self._abort = abort
329 329 allfiles = b'\0'.join(dirstate)
330 330 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
331 331 self._dirstate = dirstate
332 332 # The purpose of _newfiles is so that we don't complain about
333 333 # case collisions if someone were to call this object with the
334 334 # same filename twice.
335 335 self._newfiles = set()
336 336
337 337 def __call__(self, f):
338 338 if f in self._newfiles:
339 339 return
340 340 fl = encoding.lower(f)
341 341 if fl in self._loweredfiles and f not in self._dirstate:
342 342 msg = _(b'possible case-folding collision for %s') % f
343 343 if self._abort:
344 344 raise error.Abort(msg)
345 345 self._ui.warn(_(b"warning: %s\n") % msg)
346 346 self._loweredfiles.add(fl)
347 347 self._newfiles.add(f)
348 348
349 349
350 350 def filteredhash(repo, maxrev):
351 351 """build hash of filtered revisions in the current repoview.
352 352
353 353 Multiple caches perform up-to-date validation by checking that the
354 354 tiprev and tipnode stored in the cache file match the current repository.
355 355 However, this is not sufficient for validating repoviews because the set
356 356 of revisions in the view may change without the repository tiprev and
357 357 tipnode changing.
358 358
359 359 This function hashes all the revs filtered from the view and returns
360 360 that SHA-1 digest.
361 361 """
362 362 cl = repo.changelog
363 363 if not cl.filteredrevs:
364 364 return None
365 365 key = None
366 366 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
367 367 if revs:
368 368 s = hashlib.sha1()
369 369 for rev in revs:
370 370 s.update(b'%d;' % rev)
371 371 key = s.digest()
372 372 return key
373 373
374 374
375 375 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
376 376 '''yield every hg repository under path, always recursively.
377 377 The recurse flag will only control recursion into repo working dirs'''
378 378
379 379 def errhandler(err):
380 380 if err.filename == path:
381 381 raise err
382 382
383 383 samestat = getattr(os.path, 'samestat', None)
384 384 if followsym and samestat is not None:
385 385
386 386 def adddir(dirlst, dirname):
387 387 dirstat = os.stat(dirname)
388 388 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
389 389 if not match:
390 390 dirlst.append(dirstat)
391 391 return not match
392 392
393 393 else:
394 394 followsym = False
395 395
396 396 if (seen_dirs is None) and followsym:
397 397 seen_dirs = []
398 398 adddir(seen_dirs, path)
399 399 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
400 400 dirs.sort()
401 401 if b'.hg' in dirs:
402 402 yield root # found a repository
403 403 qroot = os.path.join(root, b'.hg', b'patches')
404 404 if os.path.isdir(os.path.join(qroot, b'.hg')):
405 405 yield qroot # we have a patch queue repo here
406 406 if recurse:
407 407 # avoid recursing inside the .hg directory
408 408 dirs.remove(b'.hg')
409 409 else:
410 410 dirs[:] = [] # don't descend further
411 411 elif followsym:
412 412 newdirs = []
413 413 for d in dirs:
414 414 fname = os.path.join(root, d)
415 415 if adddir(seen_dirs, fname):
416 416 if os.path.islink(fname):
417 417 for hgname in walkrepos(fname, True, seen_dirs):
418 418 yield hgname
419 419 else:
420 420 newdirs.append(d)
421 421 dirs[:] = newdirs
422 422
423 423
424 424 def binnode(ctx):
425 425 """Return binary node id for a given basectx"""
426 426 node = ctx.node()
427 427 if node is None:
428 428 return wdirid
429 429 return node
430 430
431 431
432 432 def intrev(ctx):
433 433 """Return integer for a given basectx that can be used in comparison or
434 434 arithmetic operation"""
435 435 rev = ctx.rev()
436 436 if rev is None:
437 437 return wdirrev
438 438 return rev
439 439
440 440
441 441 def formatchangeid(ctx):
442 442 """Format changectx as '{rev}:{node|formatnode}', which is the default
443 443 template provided by logcmdutil.changesettemplater"""
444 444 repo = ctx.repo()
445 445 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
446 446
447 447
448 448 def formatrevnode(ui, rev, node):
449 449 """Format given revision and node depending on the current verbosity"""
450 450 if ui.debugflag:
451 451 hexfunc = hex
452 452 else:
453 453 hexfunc = short
454 454 return b'%d:%s' % (rev, hexfunc(node))
455 455
456 456
457 457 def resolvehexnodeidprefix(repo, prefix):
458 458 if prefix.startswith(b'x') and repo.ui.configbool(
459 459 b'experimental', b'revisions.prefixhexnode'
460 460 ):
461 461 prefix = prefix[1:]
462 462 try:
463 463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
464 464 # This matches the shortesthexnodeidprefix() function below.
465 465 node = repo.unfiltered().changelog._partialmatch(prefix)
466 466 except error.AmbiguousPrefixLookupError:
467 467 revset = repo.ui.config(
468 468 b'experimental', b'revisions.disambiguatewithin'
469 469 )
470 470 if revset:
471 471 # Clear config to avoid infinite recursion
472 472 configoverrides = {
473 473 (b'experimental', b'revisions.disambiguatewithin'): None
474 474 }
475 475 with repo.ui.configoverride(configoverrides):
476 476 revs = repo.anyrevs([revset], user=True)
477 477 matches = []
478 478 for rev in revs:
479 479 node = repo.changelog.node(rev)
480 480 if hex(node).startswith(prefix):
481 481 matches.append(node)
482 482 if len(matches) == 1:
483 483 return matches[0]
484 484 raise
485 485 if node is None:
486 486 return
487 487 repo.changelog.rev(node) # make sure node isn't filtered
488 488 return node
489 489
490 490
491 491 def mayberevnum(repo, prefix):
492 492 """Checks if the given prefix may be mistaken for a revision number"""
493 493 try:
494 494 i = int(prefix)
495 495 # if we are a pure int, then starting with zero will not be
496 496 # confused as a rev; or, obviously, if the int is larger
497 497 # than the value of the tip rev. We still need to disambiguate if
498 498 # prefix == '0', since that *is* a valid revnum.
499 499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
500 500 return False
501 501 return True
502 502 except ValueError:
503 503 return False
504 504
505 505
506 506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
507 507 """Find the shortest unambiguous prefix that matches hexnode.
508 508
509 509 If "cache" is not None, it must be a dictionary that can be used for
510 510 caching between calls to this method.
511 511 """
512 512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
513 513 # which would be unacceptably slow. so we look for hash collision in
514 514 # unfiltered space, which means some hashes may be slightly longer.
515 515
516 516 minlength = max(minlength, 1)
517 517
518 518 def disambiguate(prefix):
519 519 """Disambiguate against revnums."""
520 520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
521 521 if mayberevnum(repo, prefix):
522 522 return b'x' + prefix
523 523 else:
524 524 return prefix
525 525
526 526 hexnode = hex(node)
527 527 for length in range(len(prefix), len(hexnode) + 1):
528 528 prefix = hexnode[:length]
529 529 if not mayberevnum(repo, prefix):
530 530 return prefix
531 531
532 532 cl = repo.unfiltered().changelog
533 533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
534 534 if revset:
535 535 revs = None
536 536 if cache is not None:
537 537 revs = cache.get(b'disambiguationrevset')
538 538 if revs is None:
539 539 revs = repo.anyrevs([revset], user=True)
540 540 if cache is not None:
541 541 cache[b'disambiguationrevset'] = revs
542 542 if cl.rev(node) in revs:
543 543 hexnode = hex(node)
544 544 nodetree = None
545 545 if cache is not None:
546 546 nodetree = cache.get(b'disambiguationnodetree')
547 547 if not nodetree:
548 548 try:
549 549 nodetree = parsers.nodetree(cl.index, len(revs))
550 550 except AttributeError:
551 551 # no native nodetree
552 552 pass
553 553 else:
554 554 for r in revs:
555 555 nodetree.insert(r)
556 556 if cache is not None:
557 557 cache[b'disambiguationnodetree'] = nodetree
558 558 if nodetree is not None:
559 559 length = max(nodetree.shortest(node), minlength)
560 560 prefix = hexnode[:length]
561 561 return disambiguate(prefix)
562 562 for length in range(minlength, len(hexnode) + 1):
563 563 matches = []
564 564 prefix = hexnode[:length]
565 565 for rev in revs:
566 566 otherhexnode = repo[rev].hex()
567 567 if prefix == otherhexnode[:length]:
568 568 matches.append(otherhexnode)
569 569 if len(matches) == 1:
570 570 return disambiguate(prefix)
571 571
572 572 try:
573 573 return disambiguate(cl.shortest(node, minlength))
574 574 except error.LookupError:
575 575 raise error.RepoLookupError()
576 576
577 577
578 578 def isrevsymbol(repo, symbol):
579 579 """Checks if a symbol exists in the repo.
580 580
581 581 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
582 582 symbol is an ambiguous nodeid prefix.
583 583 """
584 584 try:
585 585 revsymbol(repo, symbol)
586 586 return True
587 587 except error.RepoLookupError:
588 588 return False
589 589
590 590
591 591 def revsymbol(repo, symbol):
592 592 """Returns a context given a single revision symbol (as string).
593 593
594 594 This is similar to revsingle(), but accepts only a single revision symbol,
595 595 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
596 596 not "max(public())".
597 597 """
598 598 if not isinstance(symbol, bytes):
599 599 msg = (
600 600 b"symbol (%s of type %s) was not a string, did you mean "
601 601 b"repo[symbol]?" % (symbol, type(symbol))
602 602 )
603 603 raise error.ProgrammingError(msg)
604 604 try:
605 605 if symbol in (b'.', b'tip', b'null'):
606 606 return repo[symbol]
607 607
608 608 try:
609 609 r = int(symbol)
610 610 if b'%d' % r != symbol:
611 611 raise ValueError
612 612 l = len(repo.changelog)
613 613 if r < 0:
614 614 r += l
615 615 if r < 0 or r >= l and r != wdirrev:
616 616 raise ValueError
617 617 return repo[r]
618 618 except error.FilteredIndexError:
619 619 raise
620 620 except (ValueError, OverflowError, IndexError):
621 621 pass
622 622
623 623 if len(symbol) == 40:
624 624 try:
625 625 node = bin(symbol)
626 626 rev = repo.changelog.rev(node)
627 627 return repo[rev]
628 628 except error.FilteredLookupError:
629 629 raise
630 630 except (TypeError, LookupError):
631 631 pass
632 632
633 633 # look up bookmarks through the name interface
634 634 try:
635 635 node = repo.names.singlenode(repo, symbol)
636 636 rev = repo.changelog.rev(node)
637 637 return repo[rev]
638 638 except KeyError:
639 639 pass
640 640
641 641 node = resolvehexnodeidprefix(repo, symbol)
642 642 if node is not None:
643 643 rev = repo.changelog.rev(node)
644 644 return repo[rev]
645 645
646 646 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
647 647
648 648 except error.WdirUnsupported:
649 649 return repo[None]
650 650 except (
651 651 error.FilteredIndexError,
652 652 error.FilteredLookupError,
653 653 error.FilteredRepoLookupError,
654 654 ):
655 655 raise _filterederror(repo, symbol)
656 656
657 657
658 658 def _filterederror(repo, changeid):
659 659 """build an exception to be raised about a filtered changeid
660 660
661 661 This is extracted in a function to help extensions (eg: evolve) to
662 662 experiment with various message variants."""
663 663 if repo.filtername.startswith(b'visible'):
664 664
665 665 # Check if the changeset is obsolete
666 666 unfilteredrepo = repo.unfiltered()
667 667 ctx = revsymbol(unfilteredrepo, changeid)
668 668
669 669 # If the changeset is obsolete, enrich the message with the reason
670 670 # that made this changeset not visible
671 671 if ctx.obsolete():
672 672 msg = obsutil._getfilteredreason(repo, changeid, ctx)
673 673 else:
674 674 msg = _(b"hidden revision '%s'") % changeid
675 675
676 676 hint = _(b'use --hidden to access hidden revisions')
677 677
678 678 return error.FilteredRepoLookupError(msg, hint=hint)
679 679 msg = _(b"filtered revision '%s' (not in '%s' subset)")
680 680 msg %= (changeid, repo.filtername)
681 681 return error.FilteredRepoLookupError(msg)
682 682
683 683
684 684 def revsingle(repo, revspec, default=b'.', localalias=None):
685 685 if not revspec and revspec != 0:
686 686 return repo[default]
687 687
688 688 l = revrange(repo, [revspec], localalias=localalias)
689 689 if not l:
690 690 raise error.Abort(_(b'empty revision set'))
691 691 return repo[l.last()]
692 692
693 693
694 694 def _pairspec(revspec):
695 695 tree = revsetlang.parse(revspec)
696 696 return tree and tree[0] in (
697 697 b'range',
698 698 b'rangepre',
699 699 b'rangepost',
700 700 b'rangeall',
701 701 )
702 702
703 703
704 704 def revpair(repo, revs):
705 705 if not revs:
706 706 return repo[b'.'], repo[None]
707 707
708 708 l = revrange(repo, revs)
709 709
710 710 if not l:
711 711 raise error.Abort(_(b'empty revision range'))
712 712
713 713 first = l.first()
714 714 second = l.last()
715 715
716 716 if (
717 717 first == second
718 718 and len(revs) >= 2
719 719 and not all(revrange(repo, [r]) for r in revs)
720 720 ):
721 721 raise error.Abort(_(b'empty revision on one side of range'))
722 722
723 723 # if top-level is range expression, the result must always be a pair
724 724 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
725 725 return repo[first], repo[None]
726 726
727 727 return repo[first], repo[second]
728 728
729 729
730 730 def revrange(repo, specs, localalias=None):
731 731 """Execute 1 to many revsets and return the union.
732 732
733 733 This is the preferred mechanism for executing revsets using user-specified
734 734 config options, such as revset aliases.
735 735
736 736 The revsets specified by ``specs`` will be executed via a chained ``OR``
737 737 expression. If ``specs`` is empty, an empty result is returned.
738 738
739 739 ``specs`` can contain integers, in which case they are assumed to be
740 740 revision numbers.
741 741
742 742 It is assumed the revsets are already formatted. If you have arguments
743 743 that need to be expanded in the revset, call ``revsetlang.formatspec()``
744 744 and pass the result as an element of ``specs``.
745 745
746 746 Specifying a single revset is allowed.
747 747
748 748 Returns a ``revset.abstractsmartset`` which is a list-like interface over
749 749 integer revisions.
750 750 """
751 751 allspecs = []
752 752 for spec in specs:
753 753 if isinstance(spec, int):
754 754 spec = revsetlang.formatspec(b'%d', spec)
755 755 allspecs.append(spec)
756 756 return repo.anyrevs(allspecs, user=True, localalias=localalias)
757 757
758 758
759 759 def meaningfulparents(repo, ctx):
760 760 """Return list of meaningful (or all if debug) parentrevs for rev.
761 761
762 762 For merges (two non-nullrev revisions) both parents are meaningful.
763 763 Otherwise the first parent revision is considered meaningful if it
764 764 is not the preceding revision.
765 765 """
766 766 parents = ctx.parents()
767 767 if len(parents) > 1:
768 768 return parents
769 769 if repo.ui.debugflag:
770 770 return [parents[0], repo[nullrev]]
771 771 if parents[0].rev() >= intrev(ctx) - 1:
772 772 return []
773 773 return parents
774 774
775 775
776 776 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
777 777 """Return a function that produced paths for presenting to the user.
778 778
779 779 The returned function takes a repo-relative path and produces a path
780 780 that can be presented in the UI.
781 781
782 782 Depending on the value of ui.relative-paths, either a repo-relative or
783 783 cwd-relative path will be produced.
784 784
785 785 legacyrelativevalue is the value to use if ui.relative-paths=legacy
786 786
787 787 If forcerelativevalue is not None, then that value will be used regardless
788 788 of what ui.relative-paths is set to.
789 789 """
790 790 if forcerelativevalue is not None:
791 791 relative = forcerelativevalue
792 792 else:
793 793 config = repo.ui.config(b'ui', b'relative-paths')
794 794 if config == b'legacy':
795 795 relative = legacyrelativevalue
796 796 else:
797 797 relative = stringutil.parsebool(config)
798 798 if relative is None:
799 799 raise error.ConfigError(
800 800 _(b"ui.relative-paths is not a boolean ('%s')") % config
801 801 )
802 802
803 803 if relative:
804 804 cwd = repo.getcwd()
805 805 pathto = repo.pathto
806 806 return lambda f: pathto(f, cwd)
807 807 elif repo.ui.configbool(b'ui', b'slash'):
808 808 return lambda f: f
809 809 else:
810 810 return util.localpath
811 811
812 812
813 813 def subdiruipathfn(subpath, uipathfn):
814 814 '''Create a new uipathfn that treats the file as relative to subpath.'''
815 815 return lambda f: uipathfn(posixpath.join(subpath, f))
816 816
817 817
818 818 def anypats(pats, opts):
819 819 '''Checks if any patterns, including --include and --exclude were given.
820 820
821 821 Some commands (e.g. addremove) use this condition for deciding whether to
822 822 print absolute or relative paths.
823 823 '''
824 824 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
825 825
826 826
827 827 def expandpats(pats):
828 828 '''Expand bare globs when running on windows.
829 829 On posix we assume it already has already been done by sh.'''
830 830 if not util.expandglobs:
831 831 return list(pats)
832 832 ret = []
833 833 for kindpat in pats:
834 834 kind, pat = matchmod._patsplit(kindpat, None)
835 835 if kind is None:
836 836 try:
837 837 globbed = glob.glob(pat)
838 838 except re.error:
839 839 globbed = [pat]
840 840 if globbed:
841 841 ret.extend(globbed)
842 842 continue
843 843 ret.append(kindpat)
844 844 return ret
845 845
846 846
847 847 def matchandpats(
848 848 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
849 849 ):
850 850 '''Return a matcher and the patterns that were used.
851 851 The matcher will warn about bad matches, unless an alternate badfn callback
852 852 is provided.'''
853 853 if opts is None:
854 854 opts = {}
855 855 if not globbed and default == b'relpath':
856 856 pats = expandpats(pats or [])
857 857
858 858 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
859 859
860 860 def bad(f, msg):
861 861 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
862 862
863 863 if badfn is None:
864 864 badfn = bad
865 865
866 866 m = ctx.match(
867 867 pats,
868 868 opts.get(b'include'),
869 869 opts.get(b'exclude'),
870 870 default,
871 871 listsubrepos=opts.get(b'subrepos'),
872 872 badfn=badfn,
873 873 )
874 874
875 875 if m.always():
876 876 pats = []
877 877 return m, pats
878 878
879 879
880 880 def match(
881 881 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
882 882 ):
883 883 '''Return a matcher that will warn about bad matches.'''
884 884 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
885 885
886 886
887 887 def matchall(repo):
888 888 '''Return a matcher that will efficiently match everything.'''
889 889 return matchmod.always()
890 890
891 891
892 892 def matchfiles(repo, files, badfn=None):
893 893 '''Return a matcher that will efficiently match exactly these files.'''
894 894 return matchmod.exact(files, badfn=badfn)
895 895
896 896
897 897 def parsefollowlinespattern(repo, rev, pat, msg):
898 898 """Return a file name from `pat` pattern suitable for usage in followlines
899 899 logic.
900 900 """
901 901 if not matchmod.patkind(pat):
902 902 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
903 903 else:
904 904 ctx = repo[rev]
905 905 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
906 906 files = [f for f in ctx if m(f)]
907 907 if len(files) != 1:
908 908 raise error.ParseError(msg)
909 909 return files[0]
910 910
911 911
912 912 def getorigvfs(ui, repo):
913 913 """return a vfs suitable to save 'orig' file
914 914
915 915 return None if no special directory is configured"""
916 916 origbackuppath = ui.config(b'ui', b'origbackuppath')
917 917 if not origbackuppath:
918 918 return None
919 919 return vfs.vfs(repo.wvfs.join(origbackuppath))
920 920
921 921
922 922 def backuppath(ui, repo, filepath):
923 923 '''customize where working copy backup files (.orig files) are created
924 924
925 925 Fetch user defined path from config file: [ui] origbackuppath = <path>
926 926 Fall back to default (filepath with .orig suffix) if not specified
927 927
928 928 filepath is repo-relative
929 929
930 930 Returns an absolute path
931 931 '''
932 932 origvfs = getorigvfs(ui, repo)
933 933 if origvfs is None:
934 934 return repo.wjoin(filepath + b".orig")
935 935
936 936 origbackupdir = origvfs.dirname(filepath)
937 937 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
938 938 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
939 939
940 940 # Remove any files that conflict with the backup file's path
941 941 for f in reversed(list(pathutil.finddirs(filepath))):
942 942 if origvfs.isfileorlink(f):
943 943 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
944 944 origvfs.unlink(f)
945 945 break
946 946
947 947 origvfs.makedirs(origbackupdir)
948 948
949 949 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
950 950 ui.note(
951 951 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
952 952 )
953 953 origvfs.rmtree(filepath, forcibly=True)
954 954
955 955 return origvfs.join(filepath)
956 956
957 957
958 958 class _containsnode(object):
959 959 """proxy __contains__(node) to container.__contains__ which accepts revs"""
960 960
961 961 def __init__(self, repo, revcontainer):
962 962 self._torev = repo.changelog.rev
963 963 self._revcontains = revcontainer.__contains__
964 964
965 965 def __contains__(self, node):
966 966 return self._revcontains(self._torev(node))
967 967
968 968
969 969 def cleanupnodes(
970 970 repo,
971 971 replacements,
972 972 operation,
973 973 moves=None,
974 974 metadata=None,
975 975 fixphase=False,
976 976 targetphase=None,
977 977 backup=True,
978 978 ):
979 979 """do common cleanups when old nodes are replaced by new nodes
980 980
981 981 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
982 982 (we might also want to move working directory parent in the future)
983 983
984 984 By default, bookmark moves are calculated automatically from 'replacements',
985 985 but 'moves' can be used to override that. Also, 'moves' may include
986 986 additional bookmark moves that should not have associated obsmarkers.
987 987
988 988 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
989 989 have replacements. operation is a string, like "rebase".
990 990
991 991 metadata is dictionary containing metadata to be stored in obsmarker if
992 992 obsolescence is enabled.
993 993 """
994 994 assert fixphase or targetphase is None
995 995 if not replacements and not moves:
996 996 return
997 997
998 998 # translate mapping's other forms
999 999 if not util.safehasattr(replacements, b'items'):
1000 1000 replacements = {(n,): () for n in replacements}
1001 1001 else:
1002 1002 # upgrading non tuple "source" to tuple ones for BC
1003 1003 repls = {}
1004 1004 for key, value in replacements.items():
1005 1005 if not isinstance(key, tuple):
1006 1006 key = (key,)
1007 1007 repls[key] = value
1008 1008 replacements = repls
1009 1009
1010 1010 # Unfiltered repo is needed since nodes in replacements might be hidden.
1011 1011 unfi = repo.unfiltered()
1012 1012
1013 1013 # Calculate bookmark movements
1014 1014 if moves is None:
1015 1015 moves = {}
1016 1016 for oldnodes, newnodes in replacements.items():
1017 1017 for oldnode in oldnodes:
1018 1018 if oldnode in moves:
1019 1019 continue
1020 1020 if len(newnodes) > 1:
1021 1021 # usually a split, take the one with biggest rev number
1022 1022 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1023 1023 elif len(newnodes) == 0:
1024 1024 # move bookmark backwards
1025 1025 allreplaced = []
1026 1026 for rep in replacements:
1027 1027 allreplaced.extend(rep)
1028 1028 roots = list(
1029 1029 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1030 1030 )
1031 1031 if roots:
1032 1032 newnode = roots[0].node()
1033 1033 else:
1034 1034 newnode = nullid
1035 1035 else:
1036 1036 newnode = newnodes[0]
1037 1037 moves[oldnode] = newnode
1038 1038
1039 1039 allnewnodes = [n for ns in replacements.values() for n in ns]
1040 1040 toretract = {}
1041 1041 toadvance = {}
1042 1042 if fixphase:
1043 1043 precursors = {}
1044 1044 for oldnodes, newnodes in replacements.items():
1045 1045 for oldnode in oldnodes:
1046 1046 for newnode in newnodes:
1047 1047 precursors.setdefault(newnode, []).append(oldnode)
1048 1048
1049 1049 allnewnodes.sort(key=lambda n: unfi[n].rev())
1050 1050 newphases = {}
1051 1051
1052 1052 def phase(ctx):
1053 1053 return newphases.get(ctx.node(), ctx.phase())
1054 1054
1055 1055 for newnode in allnewnodes:
1056 1056 ctx = unfi[newnode]
1057 1057 parentphase = max(phase(p) for p in ctx.parents())
1058 1058 if targetphase is None:
1059 1059 oldphase = max(
1060 1060 unfi[oldnode].phase() for oldnode in precursors[newnode]
1061 1061 )
1062 1062 newphase = max(oldphase, parentphase)
1063 1063 else:
1064 1064 newphase = max(targetphase, parentphase)
1065 1065 newphases[newnode] = newphase
1066 1066 if newphase > ctx.phase():
1067 1067 toretract.setdefault(newphase, []).append(newnode)
1068 1068 elif newphase < ctx.phase():
1069 1069 toadvance.setdefault(newphase, []).append(newnode)
1070 1070
1071 1071 with repo.transaction(b'cleanup') as tr:
1072 1072 # Move bookmarks
1073 1073 bmarks = repo._bookmarks
1074 1074 bmarkchanges = []
1075 1075 for oldnode, newnode in moves.items():
1076 1076 oldbmarks = repo.nodebookmarks(oldnode)
1077 1077 if not oldbmarks:
1078 1078 continue
1079 1079 from . import bookmarks # avoid import cycle
1080 1080
1081 1081 repo.ui.debug(
1082 1082 b'moving bookmarks %r from %s to %s\n'
1083 1083 % (
1084 1084 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1085 1085 hex(oldnode),
1086 1086 hex(newnode),
1087 1087 )
1088 1088 )
1089 1089 # Delete divergent bookmarks being parents of related newnodes
1090 1090 deleterevs = repo.revs(
1091 1091 b'parents(roots(%ln & (::%n))) - parents(%n)',
1092 1092 allnewnodes,
1093 1093 newnode,
1094 1094 oldnode,
1095 1095 )
1096 1096 deletenodes = _containsnode(repo, deleterevs)
1097 1097 for name in oldbmarks:
1098 1098 bmarkchanges.append((name, newnode))
1099 1099 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1100 1100 bmarkchanges.append((b, None))
1101 1101
1102 1102 if bmarkchanges:
1103 1103 bmarks.applychanges(repo, tr, bmarkchanges)
1104 1104
1105 1105 for phase, nodes in toretract.items():
1106 1106 phases.retractboundary(repo, tr, phase, nodes)
1107 1107 for phase, nodes in toadvance.items():
1108 1108 phases.advanceboundary(repo, tr, phase, nodes)
1109 1109
1110 1110 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1111 1111 # Obsolete or strip nodes
1112 1112 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1113 1113 # If a node is already obsoleted, and we want to obsolete it
1114 1114 # without a successor, skip that obssolete request since it's
1115 1115 # unnecessary. That's the "if s or not isobs(n)" check below.
1116 1116 # Also sort the node in topology order, that might be useful for
1117 1117 # some obsstore logic.
1118 1118 # NOTE: the sorting might belong to createmarkers.
1119 1119 torev = unfi.changelog.rev
1120 1120 sortfunc = lambda ns: torev(ns[0][0])
1121 1121 rels = []
1122 1122 for ns, s in sorted(replacements.items(), key=sortfunc):
1123 1123 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1124 1124 rels.append(rel)
1125 1125 if rels:
1126 1126 obsolete.createmarkers(
1127 1127 repo, rels, operation=operation, metadata=metadata
1128 1128 )
1129 1129 elif phases.supportinternal(repo) and mayusearchived:
1130 1130 # this assume we do not have "unstable" nodes above the cleaned ones
1131 1131 allreplaced = set()
1132 1132 for ns in replacements.keys():
1133 1133 allreplaced.update(ns)
1134 1134 if backup:
1135 1135 from . import repair # avoid import cycle
1136 1136
1137 1137 node = min(allreplaced, key=repo.changelog.rev)
1138 1138 repair.backupbundle(
1139 1139 repo, allreplaced, allreplaced, node, operation
1140 1140 )
1141 1141 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1142 1142 else:
1143 1143 from . import repair # avoid import cycle
1144 1144
1145 1145 tostrip = list(n for ns in replacements for n in ns)
1146 1146 if tostrip:
1147 1147 repair.delayedstrip(
1148 1148 repo.ui, repo, tostrip, operation, backup=backup
1149 1149 )
1150 1150
1151 1151
1152 1152 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1153 1153 if opts is None:
1154 1154 opts = {}
1155 1155 m = matcher
1156 1156 dry_run = opts.get(b'dry_run')
1157 1157 try:
1158 1158 similarity = float(opts.get(b'similarity') or 0)
1159 1159 except ValueError:
1160 1160 raise error.Abort(_(b'similarity must be a number'))
1161 1161 if similarity < 0 or similarity > 100:
1162 1162 raise error.Abort(_(b'similarity must be between 0 and 100'))
1163 1163 similarity /= 100.0
1164 1164
1165 1165 ret = 0
1166 1166
1167 1167 wctx = repo[None]
1168 1168 for subpath in sorted(wctx.substate):
1169 1169 submatch = matchmod.subdirmatcher(subpath, m)
1170 1170 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1171 1171 sub = wctx.sub(subpath)
1172 1172 subprefix = repo.wvfs.reljoin(prefix, subpath)
1173 1173 subuipathfn = subdiruipathfn(subpath, uipathfn)
1174 1174 try:
1175 1175 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1176 1176 ret = 1
1177 1177 except error.LookupError:
1178 1178 repo.ui.status(
1179 1179 _(b"skipping missing subrepository: %s\n")
1180 1180 % uipathfn(subpath)
1181 1181 )
1182 1182
1183 1183 rejected = []
1184 1184
1185 1185 def badfn(f, msg):
1186 1186 if f in m.files():
1187 1187 m.bad(f, msg)
1188 1188 rejected.append(f)
1189 1189
1190 1190 badmatch = matchmod.badmatch(m, badfn)
1191 1191 added, unknown, deleted, removed, forgotten = _interestingfiles(
1192 1192 repo, badmatch
1193 1193 )
1194 1194
1195 1195 unknownset = set(unknown + forgotten)
1196 1196 toprint = unknownset.copy()
1197 1197 toprint.update(deleted)
1198 1198 for abs in sorted(toprint):
1199 1199 if repo.ui.verbose or not m.exact(abs):
1200 1200 if abs in unknownset:
1201 1201 status = _(b'adding %s\n') % uipathfn(abs)
1202 1202 label = b'ui.addremove.added'
1203 1203 else:
1204 1204 status = _(b'removing %s\n') % uipathfn(abs)
1205 1205 label = b'ui.addremove.removed'
1206 1206 repo.ui.status(status, label=label)
1207 1207
1208 1208 renames = _findrenames(
1209 1209 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1210 1210 )
1211 1211
1212 1212 if not dry_run:
1213 1213 _markchanges(repo, unknown + forgotten, deleted, renames)
1214 1214
1215 1215 for f in rejected:
1216 1216 if f in m.files():
1217 1217 return 1
1218 1218 return ret
1219 1219
1220 1220
1221 1221 def marktouched(repo, files, similarity=0.0):
1222 1222 '''Assert that files have somehow been operated upon. files are relative to
1223 1223 the repo root.'''
1224 1224 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1225 1225 rejected = []
1226 1226
1227 1227 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1228 1228
1229 1229 if repo.ui.verbose:
1230 1230 unknownset = set(unknown + forgotten)
1231 1231 toprint = unknownset.copy()
1232 1232 toprint.update(deleted)
1233 1233 for abs in sorted(toprint):
1234 1234 if abs in unknownset:
1235 1235 status = _(b'adding %s\n') % abs
1236 1236 else:
1237 1237 status = _(b'removing %s\n') % abs
1238 1238 repo.ui.status(status)
1239 1239
1240 1240 # TODO: We should probably have the caller pass in uipathfn and apply it to
1241 1241 # the messages above too. legacyrelativevalue=True is consistent with how
1242 1242 # it used to work.
1243 1243 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1244 1244 renames = _findrenames(
1245 1245 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1246 1246 )
1247 1247
1248 1248 _markchanges(repo, unknown + forgotten, deleted, renames)
1249 1249
1250 1250 for f in rejected:
1251 1251 if f in m.files():
1252 1252 return 1
1253 1253 return 0
1254 1254
1255 1255
1256 1256 def _interestingfiles(repo, matcher):
1257 1257 '''Walk dirstate with matcher, looking for files that addremove would care
1258 1258 about.
1259 1259
1260 1260 This is different from dirstate.status because it doesn't care about
1261 1261 whether files are modified or clean.'''
1262 1262 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1263 1263 audit_path = pathutil.pathauditor(repo.root, cached=True)
1264 1264
1265 1265 ctx = repo[None]
1266 1266 dirstate = repo.dirstate
1267 1267 matcher = repo.narrowmatch(matcher, includeexact=True)
1268 1268 walkresults = dirstate.walk(
1269 1269 matcher,
1270 1270 subrepos=sorted(ctx.substate),
1271 1271 unknown=True,
1272 1272 ignored=False,
1273 1273 full=False,
1274 1274 )
1275 1275 for abs, st in pycompat.iteritems(walkresults):
1276 1276 dstate = dirstate[abs]
1277 1277 if dstate == b'?' and audit_path.check(abs):
1278 1278 unknown.append(abs)
1279 1279 elif dstate != b'r' and not st:
1280 1280 deleted.append(abs)
1281 1281 elif dstate == b'r' and st:
1282 1282 forgotten.append(abs)
1283 1283 # for finding renames
1284 1284 elif dstate == b'r' and not st:
1285 1285 removed.append(abs)
1286 1286 elif dstate == b'a':
1287 1287 added.append(abs)
1288 1288
1289 1289 return added, unknown, deleted, removed, forgotten
1290 1290
1291 1291
1292 1292 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1293 1293 '''Find renames from removed files to added ones.'''
1294 1294 renames = {}
1295 1295 if similarity > 0:
1296 1296 for old, new, score in similar.findrenames(
1297 1297 repo, added, removed, similarity
1298 1298 ):
1299 1299 if (
1300 1300 repo.ui.verbose
1301 1301 or not matcher.exact(old)
1302 1302 or not matcher.exact(new)
1303 1303 ):
1304 1304 repo.ui.status(
1305 1305 _(
1306 1306 b'recording removal of %s as rename to %s '
1307 1307 b'(%d%% similar)\n'
1308 1308 )
1309 1309 % (uipathfn(old), uipathfn(new), score * 100)
1310 1310 )
1311 1311 renames[new] = old
1312 1312 return renames
1313 1313
1314 1314
1315 1315 def _markchanges(repo, unknown, deleted, renames):
1316 1316 '''Marks the files in unknown as added, the files in deleted as removed,
1317 1317 and the files in renames as copied.'''
1318 1318 wctx = repo[None]
1319 1319 with repo.wlock():
1320 1320 wctx.forget(deleted)
1321 1321 wctx.add(unknown)
1322 1322 for new, old in pycompat.iteritems(renames):
1323 1323 wctx.copy(old, new)
1324 1324
1325 1325
1326 1326 def getrenamedfn(repo, endrev=None):
1327 1327 if copiesmod.usechangesetcentricalgo(repo):
1328 1328
1329 1329 def getrenamed(fn, rev):
1330 1330 ctx = repo[rev]
1331 1331 p1copies = ctx.p1copies()
1332 1332 if fn in p1copies:
1333 1333 return p1copies[fn]
1334 1334 p2copies = ctx.p2copies()
1335 1335 if fn in p2copies:
1336 1336 return p2copies[fn]
1337 1337 return None
1338 1338
1339 1339 return getrenamed
1340 1340
1341 1341 rcache = {}
1342 1342 if endrev is None:
1343 1343 endrev = len(repo)
1344 1344
1345 1345 def getrenamed(fn, rev):
1346 1346 '''looks up all renames for a file (up to endrev) the first
1347 1347 time the file is given. It indexes on the changerev and only
1348 1348 parses the manifest if linkrev != changerev.
1349 1349 Returns rename info for fn at changerev rev.'''
1350 1350 if fn not in rcache:
1351 1351 rcache[fn] = {}
1352 1352 fl = repo.file(fn)
1353 1353 for i in fl:
1354 1354 lr = fl.linkrev(i)
1355 1355 renamed = fl.renamed(fl.node(i))
1356 1356 rcache[fn][lr] = renamed and renamed[0]
1357 1357 if lr >= endrev:
1358 1358 break
1359 1359 if rev in rcache[fn]:
1360 1360 return rcache[fn][rev]
1361 1361
1362 1362 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1363 1363 # filectx logic.
1364 1364 try:
1365 1365 return repo[rev][fn].copysource()
1366 1366 except error.LookupError:
1367 1367 return None
1368 1368
1369 1369 return getrenamed
1370 1370
1371 1371
1372 1372 def getcopiesfn(repo, endrev=None):
1373 1373 if copiesmod.usechangesetcentricalgo(repo):
1374 1374
1375 1375 def copiesfn(ctx):
1376 1376 if ctx.p2copies():
1377 1377 allcopies = ctx.p1copies().copy()
1378 1378 # There should be no overlap
1379 1379 allcopies.update(ctx.p2copies())
1380 1380 return sorted(allcopies.items())
1381 1381 else:
1382 1382 return sorted(ctx.p1copies().items())
1383 1383
1384 1384 else:
1385 1385 getrenamed = getrenamedfn(repo, endrev)
1386 1386
1387 1387 def copiesfn(ctx):
1388 1388 copies = []
1389 1389 for fn in ctx.files():
1390 1390 rename = getrenamed(fn, ctx.rev())
1391 1391 if rename:
1392 1392 copies.append((fn, rename))
1393 1393 return copies
1394 1394
1395 1395 return copiesfn
1396 1396
1397 1397
1398 1398 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1399 1399 """Update the dirstate to reflect the intent of copying src to dst. For
1400 1400 different reasons it might not end with dst being marked as copied from src.
1401 1401 """
1402 1402 origsrc = repo.dirstate.copied(src) or src
1403 1403 if dst == origsrc: # copying back a copy?
1404 1404 if repo.dirstate[dst] not in b'mn' and not dryrun:
1405 1405 repo.dirstate.normallookup(dst)
1406 1406 else:
1407 1407 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1408 1408 if not ui.quiet:
1409 1409 ui.warn(
1410 1410 _(
1411 1411 b"%s has not been committed yet, so no copy "
1412 1412 b"data will be stored for %s.\n"
1413 1413 )
1414 1414 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1415 1415 )
1416 1416 if repo.dirstate[dst] in b'?r' and not dryrun:
1417 1417 wctx.add([dst])
1418 1418 elif not dryrun:
1419 1419 wctx.copy(origsrc, dst)
1420 1420
1421 1421
1422 1422 def movedirstate(repo, newctx, match=None):
1423 1423 """Move the dirstate to newctx and adjust it as necessary.
1424 1424
1425 1425 A matcher can be provided as an optimization. It is probably a bug to pass
1426 1426 a matcher that doesn't match all the differences between the parent of the
1427 1427 working copy and newctx.
1428 1428 """
1429 1429 oldctx = repo[b'.']
1430 1430 ds = repo.dirstate
1431 1431 ds.setparents(newctx.node(), nullid)
1432 1432 copies = dict(ds.copies())
1433 1433 s = newctx.status(oldctx, match=match)
1434 1434 for f in s.modified:
1435 1435 if ds[f] == b'r':
1436 1436 # modified + removed -> removed
1437 1437 continue
1438 1438 ds.normallookup(f)
1439 1439
1440 1440 for f in s.added:
1441 1441 if ds[f] == b'r':
1442 1442 # added + removed -> unknown
1443 1443 ds.drop(f)
1444 1444 elif ds[f] != b'a':
1445 1445 ds.add(f)
1446 1446
1447 1447 for f in s.removed:
1448 1448 if ds[f] == b'a':
1449 1449 # removed + added -> normal
1450 1450 ds.normallookup(f)
1451 1451 elif ds[f] != b'r':
1452 1452 ds.remove(f)
1453 1453
1454 1454 # Merge old parent and old working dir copies
1455 1455 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1456 1456 oldcopies.update(copies)
1457 1457 copies = dict(
1458 1458 (dst, oldcopies.get(src, src))
1459 1459 for dst, src in pycompat.iteritems(oldcopies)
1460 1460 )
1461 1461 # Adjust the dirstate copies
1462 1462 for dst, src in pycompat.iteritems(copies):
1463 1463 if src not in newctx or dst in newctx or ds[dst] != b'a':
1464 1464 src = None
1465 1465 ds.copy(src, dst)
1466 1466
1467 1467
1468 1468 def writerequires(opener, requirements):
1469 1469 with opener(b'requires', b'w', atomictemp=True) as fp:
1470 1470 for r in sorted(requirements):
1471 1471 fp.write(b"%s\n" % r)
1472 1472
1473 1473
1474 1474 class filecachesubentry(object):
1475 1475 def __init__(self, path, stat):
1476 1476 self.path = path
1477 1477 self.cachestat = None
1478 1478 self._cacheable = None
1479 1479
1480 1480 if stat:
1481 1481 self.cachestat = filecachesubentry.stat(self.path)
1482 1482
1483 1483 if self.cachestat:
1484 1484 self._cacheable = self.cachestat.cacheable()
1485 1485 else:
1486 1486 # None means we don't know yet
1487 1487 self._cacheable = None
1488 1488
1489 1489 def refresh(self):
1490 1490 if self.cacheable():
1491 1491 self.cachestat = filecachesubentry.stat(self.path)
1492 1492
1493 1493 def cacheable(self):
1494 1494 if self._cacheable is not None:
1495 1495 return self._cacheable
1496 1496
1497 1497 # we don't know yet, assume it is for now
1498 1498 return True
1499 1499
1500 1500 def changed(self):
1501 1501 # no point in going further if we can't cache it
1502 1502 if not self.cacheable():
1503 1503 return True
1504 1504
1505 1505 newstat = filecachesubentry.stat(self.path)
1506 1506
1507 1507 # we may not know if it's cacheable yet, check again now
1508 1508 if newstat and self._cacheable is None:
1509 1509 self._cacheable = newstat.cacheable()
1510 1510
1511 1511 # check again
1512 1512 if not self._cacheable:
1513 1513 return True
1514 1514
1515 1515 if self.cachestat != newstat:
1516 1516 self.cachestat = newstat
1517 1517 return True
1518 1518 else:
1519 1519 return False
1520 1520
1521 1521 @staticmethod
1522 1522 def stat(path):
1523 1523 try:
1524 1524 return util.cachestat(path)
1525 1525 except OSError as e:
1526 1526 if e.errno != errno.ENOENT:
1527 1527 raise
1528 1528
1529 1529
1530 1530 class filecacheentry(object):
1531 1531 def __init__(self, paths, stat=True):
1532 1532 self._entries = []
1533 1533 for path in paths:
1534 1534 self._entries.append(filecachesubentry(path, stat))
1535 1535
1536 1536 def changed(self):
1537 1537 '''true if any entry has changed'''
1538 1538 for entry in self._entries:
1539 1539 if entry.changed():
1540 1540 return True
1541 1541 return False
1542 1542
1543 1543 def refresh(self):
1544 1544 for entry in self._entries:
1545 1545 entry.refresh()
1546 1546
1547 1547
1548 1548 class filecache(object):
1549 1549 """A property like decorator that tracks files under .hg/ for updates.
1550 1550
1551 1551 On first access, the files defined as arguments are stat()ed and the
1552 1552 results cached. The decorated function is called. The results are stashed
1553 1553 away in a ``_filecache`` dict on the object whose method is decorated.
1554 1554
1555 1555 On subsequent access, the cached result is used as it is set to the
1556 1556 instance dictionary.
1557 1557
1558 1558 On external property set/delete operations, the caller must update the
1559 1559 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1560 1560 instead of directly setting <attr>.
1561 1561
1562 1562 When using the property API, the cached data is always used if available.
1563 1563 No stat() is performed to check if the file has changed.
1564 1564
1565 1565 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1566 1566 can populate an entry before the property's getter is called. In this case,
1567 1567 entries in ``_filecache`` will be used during property operations,
1568 1568 if available. If the underlying file changes, it is up to external callers
1569 1569 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1570 1570 method result as well as possibly calling ``del obj._filecache[attr]`` to
1571 1571 remove the ``filecacheentry``.
1572 1572 """
1573 1573
1574 1574 def __init__(self, *paths):
1575 1575 self.paths = paths
1576 1576
1577 1577 def join(self, obj, fname):
1578 1578 """Used to compute the runtime path of a cached file.
1579 1579
1580 1580 Users should subclass filecache and provide their own version of this
1581 1581 function to call the appropriate join function on 'obj' (an instance
1582 1582 of the class that its member function was decorated).
1583 1583 """
1584 1584 raise NotImplementedError
1585 1585
1586 1586 def __call__(self, func):
1587 1587 self.func = func
1588 1588 self.sname = func.__name__
1589 1589 self.name = pycompat.sysbytes(self.sname)
1590 1590 return self
1591 1591
1592 1592 def __get__(self, obj, type=None):
1593 1593 # if accessed on the class, return the descriptor itself.
1594 1594 if obj is None:
1595 1595 return self
1596 1596
1597 1597 assert self.sname not in obj.__dict__
1598 1598
1599 1599 entry = obj._filecache.get(self.name)
1600 1600
1601 1601 if entry:
1602 1602 if entry.changed():
1603 1603 entry.obj = self.func(obj)
1604 1604 else:
1605 1605 paths = [self.join(obj, path) for path in self.paths]
1606 1606
1607 1607 # We stat -before- creating the object so our cache doesn't lie if
1608 1608 # a writer modified between the time we read and stat
1609 1609 entry = filecacheentry(paths, True)
1610 1610 entry.obj = self.func(obj)
1611 1611
1612 1612 obj._filecache[self.name] = entry
1613 1613
1614 1614 obj.__dict__[self.sname] = entry.obj
1615 1615 return entry.obj
1616 1616
1617 1617 # don't implement __set__(), which would make __dict__ lookup as slow as
1618 1618 # function call.
1619 1619
1620 1620 def set(self, obj, value):
1621 1621 if self.name not in obj._filecache:
1622 1622 # we add an entry for the missing value because X in __dict__
1623 1623 # implies X in _filecache
1624 1624 paths = [self.join(obj, path) for path in self.paths]
1625 1625 ce = filecacheentry(paths, False)
1626 1626 obj._filecache[self.name] = ce
1627 1627 else:
1628 1628 ce = obj._filecache[self.name]
1629 1629
1630 1630 ce.obj = value # update cached copy
1631 1631 obj.__dict__[self.sname] = value # update copy returned by obj.x
1632 1632
1633 1633
1634 1634 def extdatasource(repo, source):
1635 1635 """Gather a map of rev -> value dict from the specified source
1636 1636
1637 1637 A source spec is treated as a URL, with a special case shell: type
1638 1638 for parsing the output from a shell command.
1639 1639
1640 1640 The data is parsed as a series of newline-separated records where
1641 1641 each record is a revision specifier optionally followed by a space
1642 1642 and a freeform string value. If the revision is known locally, it
1643 1643 is converted to a rev, otherwise the record is skipped.
1644 1644
1645 1645 Note that both key and value are treated as UTF-8 and converted to
1646 1646 the local encoding. This allows uniformity between local and
1647 1647 remote data sources.
1648 1648 """
1649 1649
1650 1650 spec = repo.ui.config(b"extdata", source)
1651 1651 if not spec:
1652 1652 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1653 1653
1654 1654 data = {}
1655 1655 src = proc = None
1656 1656 try:
1657 1657 if spec.startswith(b"shell:"):
1658 1658 # external commands should be run relative to the repo root
1659 1659 cmd = spec[6:]
1660 1660 proc = subprocess.Popen(
1661 1661 procutil.tonativestr(cmd),
1662 1662 shell=True,
1663 1663 bufsize=-1,
1664 1664 close_fds=procutil.closefds,
1665 1665 stdout=subprocess.PIPE,
1666 1666 cwd=procutil.tonativestr(repo.root),
1667 1667 )
1668 1668 src = proc.stdout
1669 1669 else:
1670 1670 # treat as a URL or file
1671 1671 src = url.open(repo.ui, spec)
1672 1672 for l in src:
1673 1673 if b" " in l:
1674 1674 k, v = l.strip().split(b" ", 1)
1675 1675 else:
1676 1676 k, v = l.strip(), b""
1677 1677
1678 1678 k = encoding.tolocal(k)
1679 1679 try:
1680 1680 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1681 1681 except (error.LookupError, error.RepoLookupError):
1682 1682 pass # we ignore data for nodes that don't exist locally
1683 1683 finally:
1684 1684 if proc:
1685 1685 try:
1686 1686 proc.communicate()
1687 1687 except ValueError:
1688 1688 # This happens if we started iterating src and then
1689 1689 # get a parse error on a line. It should be safe to ignore.
1690 1690 pass
1691 1691 if src:
1692 1692 src.close()
1693 1693 if proc and proc.returncode != 0:
1694 1694 raise error.Abort(
1695 1695 _(b"extdata command '%s' failed: %s")
1696 1696 % (cmd, procutil.explainexit(proc.returncode))
1697 1697 )
1698 1698
1699 1699 return data
1700 1700
1701 1701
1702 1702 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1703 1703 if lock is None:
1704 1704 raise error.LockInheritanceContractViolation(
1705 1705 b'lock can only be inherited while held'
1706 1706 )
1707 1707 if environ is None:
1708 1708 environ = {}
1709 1709 with lock.inherit() as locker:
1710 1710 environ[envvar] = locker
1711 1711 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1712 1712
1713 1713
1714 1714 def wlocksub(repo, cmd, *args, **kwargs):
1715 1715 """run cmd as a subprocess that allows inheriting repo's wlock
1716 1716
1717 1717 This can only be called while the wlock is held. This takes all the
1718 1718 arguments that ui.system does, and returns the exit code of the
1719 1719 subprocess."""
1720 1720 return _locksub(
1721 1721 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1722 1722 )
1723 1723
1724 1724
1725 1725 class progress(object):
1726 1726 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1727 1727 self.ui = ui
1728 1728 self.pos = 0
1729 1729 self.topic = topic
1730 1730 self.unit = unit
1731 1731 self.total = total
1732 1732 self.debug = ui.configbool(b'progress', b'debug')
1733 1733 self._updatebar = updatebar
1734 1734
1735 1735 def __enter__(self):
1736 1736 return self
1737 1737
1738 1738 def __exit__(self, exc_type, exc_value, exc_tb):
1739 1739 self.complete()
1740 1740
1741 1741 def update(self, pos, item=b"", total=None):
1742 1742 assert pos is not None
1743 1743 if total:
1744 1744 self.total = total
1745 1745 self.pos = pos
1746 1746 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1747 1747 if self.debug:
1748 1748 self._printdebug(item)
1749 1749
1750 1750 def increment(self, step=1, item=b"", total=None):
1751 1751 self.update(self.pos + step, item, total)
1752 1752
1753 1753 def complete(self):
1754 1754 self.pos = None
1755 1755 self.unit = b""
1756 1756 self.total = None
1757 1757 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1758 1758
1759 1759 def _printdebug(self, item):
1760 1760 if self.unit:
1761 1761 unit = b' ' + self.unit
1762 1762 if item:
1763 1763 item = b' ' + item
1764 1764
1765 1765 if self.total:
1766 1766 pct = 100.0 * self.pos / self.total
1767 1767 self.ui.debug(
1768 1768 b'%s:%s %d/%d%s (%4.2f%%)\n'
1769 1769 % (self.topic, item, self.pos, self.total, unit, pct)
1770 1770 )
1771 1771 else:
1772 1772 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1773 1773
1774 1774
1775 1775 def gdinitconfig(ui):
1776 1776 """helper function to know if a repo should be created as general delta
1777 1777 """
1778 1778 # experimental config: format.generaldelta
1779 1779 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1780 1780 b'format', b'usegeneraldelta'
1781 1781 )
1782 1782
1783 1783
1784 1784 def gddeltaconfig(ui):
1785 1785 """helper function to know if incoming delta should be optimised
1786 1786 """
1787 1787 # experimental config: format.generaldelta
1788 1788 return ui.configbool(b'format', b'generaldelta')
1789 1789
1790 1790
1791 1791 class simplekeyvaluefile(object):
1792 1792 """A simple file with key=value lines
1793 1793
1794 1794 Keys must be alphanumerics and start with a letter, values must not
1795 1795 contain '\n' characters"""
1796 1796
1797 1797 firstlinekey = b'__firstline'
1798 1798
1799 1799 def __init__(self, vfs, path, keys=None):
1800 1800 self.vfs = vfs
1801 1801 self.path = path
1802 1802
1803 1803 def read(self, firstlinenonkeyval=False):
1804 1804 """Read the contents of a simple key-value file
1805 1805
1806 1806 'firstlinenonkeyval' indicates whether the first line of file should
1807 1807 be treated as a key-value pair or reuturned fully under the
1808 1808 __firstline key."""
1809 1809 lines = self.vfs.readlines(self.path)
1810 1810 d = {}
1811 1811 if firstlinenonkeyval:
1812 1812 if not lines:
1813 1813 e = _(b"empty simplekeyvalue file")
1814 1814 raise error.CorruptedState(e)
1815 1815 # we don't want to include '\n' in the __firstline
1816 1816 d[self.firstlinekey] = lines[0][:-1]
1817 1817 del lines[0]
1818 1818
1819 1819 try:
1820 1820 # the 'if line.strip()' part prevents us from failing on empty
1821 1821 # lines which only contain '\n' therefore are not skipped
1822 1822 # by 'if line'
1823 1823 updatedict = dict(
1824 1824 line[:-1].split(b'=', 1) for line in lines if line.strip()
1825 1825 )
1826 1826 if self.firstlinekey in updatedict:
1827 1827 e = _(b"%r can't be used as a key")
1828 1828 raise error.CorruptedState(e % self.firstlinekey)
1829 1829 d.update(updatedict)
1830 1830 except ValueError as e:
1831 1831 raise error.CorruptedState(stringutil.forcebytestr(e))
1832 1832 return d
1833 1833
1834 1834 def write(self, data, firstline=None):
1835 1835 """Write key=>value mapping to a file
1836 1836 data is a dict. Keys must be alphanumerical and start with a letter.
1837 1837 Values must not contain newline characters.
1838 1838
1839 1839 If 'firstline' is not None, it is written to file before
1840 1840 everything else, as it is, not in a key=value form"""
1841 1841 lines = []
1842 1842 if firstline is not None:
1843 1843 lines.append(b'%s\n' % firstline)
1844 1844
1845 1845 for k, v in data.items():
1846 1846 if k == self.firstlinekey:
1847 1847 e = b"key name '%s' is reserved" % self.firstlinekey
1848 1848 raise error.ProgrammingError(e)
1849 1849 if not k[0:1].isalpha():
1850 1850 e = b"keys must start with a letter in a key-value file"
1851 1851 raise error.ProgrammingError(e)
1852 1852 if not k.isalnum():
1853 1853 e = b"invalid key name in a simple key-value file"
1854 1854 raise error.ProgrammingError(e)
1855 1855 if b'\n' in v:
1856 1856 e = b"invalid value in a simple key-value file"
1857 1857 raise error.ProgrammingError(e)
1858 1858 lines.append(b"%s=%s\n" % (k, v))
1859 1859 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1860 1860 fp.write(b''.join(lines))
1861 1861
1862 1862
1863 1863 _reportobsoletedsource = [
1864 1864 b'debugobsolete',
1865 1865 b'pull',
1866 1866 b'push',
1867 1867 b'serve',
1868 1868 b'unbundle',
1869 1869 ]
1870 1870
1871 1871 _reportnewcssource = [
1872 1872 b'pull',
1873 1873 b'unbundle',
1874 1874 ]
1875 1875
1876 1876
1877 1877 def prefetchfiles(repo, revs, match):
1878 1878 """Invokes the registered file prefetch functions, allowing extensions to
1879 1879 ensure the corresponding files are available locally, before the command
1880 1880 uses them."""
1881 1881 if match:
1882 1882 # The command itself will complain about files that don't exist, so
1883 1883 # don't duplicate the message.
1884 1884 match = matchmod.badmatch(match, lambda fn, msg: None)
1885 1885 else:
1886 1886 match = matchall(repo)
1887 1887
1888 1888 fileprefetchhooks(repo, revs, match)
1889 1889
1890 1890
1891 1891 # a list of (repo, revs, match) prefetch functions
1892 1892 fileprefetchhooks = util.hooks()
1893 1893
1894 1894 # A marker that tells the evolve extension to suppress its own reporting
1895 1895 _reportstroubledchangesets = True
1896 1896
1897 1897
1898 1898 def registersummarycallback(repo, otr, txnname=b''):
1899 1899 """register a callback to issue a summary after the transaction is closed
1900 1900 """
1901 1901
1902 1902 def txmatch(sources):
1903 1903 return any(txnname.startswith(source) for source in sources)
1904 1904
1905 1905 categories = []
1906 1906
1907 1907 def reportsummary(func):
1908 1908 """decorator for report callbacks."""
1909 1909 # The repoview life cycle is shorter than the one of the actual
1910 1910 # underlying repository. So the filtered object can die before the
1911 1911 # weakref is used leading to troubles. We keep a reference to the
1912 1912 # unfiltered object and restore the filtering when retrieving the
1913 1913 # repository through the weakref.
1914 1914 filtername = repo.filtername
1915 1915 reporef = weakref.ref(repo.unfiltered())
1916 1916
1917 1917 def wrapped(tr):
1918 1918 repo = reporef()
1919 1919 if filtername:
1920 1920 assert repo is not None # help pytype
1921 1921 repo = repo.filtered(filtername)
1922 1922 func(repo, tr)
1923 1923
1924 1924 newcat = b'%02i-txnreport' % len(categories)
1925 1925 otr.addpostclose(newcat, wrapped)
1926 1926 categories.append(newcat)
1927 1927 return wrapped
1928 1928
1929 1929 @reportsummary
1930 1930 def reportchangegroup(repo, tr):
1931 1931 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1932 1932 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1933 1933 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1934 1934 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1935 1935 if cgchangesets or cgrevisions or cgfiles:
1936 1936 htext = b""
1937 1937 if cgheads:
1938 1938 htext = _(b" (%+d heads)") % cgheads
1939 1939 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1940 1940 assert repo is not None # help pytype
1941 1941 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1942 1942
1943 1943 if txmatch(_reportobsoletedsource):
1944 1944
1945 1945 @reportsummary
1946 1946 def reportobsoleted(repo, tr):
1947 1947 obsoleted = obsutil.getobsoleted(repo, tr)
1948 1948 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1949 1949 if newmarkers:
1950 1950 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1951 1951 if obsoleted:
1952 1952 repo.ui.status(_(b'obsoleted %i changesets\n') % len(obsoleted))
1953 1953
1954 1954 if obsolete.isenabled(
1955 1955 repo, obsolete.createmarkersopt
1956 1956 ) and repo.ui.configbool(
1957 1957 b'experimental', b'evolution.report-instabilities'
1958 1958 ):
1959 1959 instabilitytypes = [
1960 1960 (b'orphan', b'orphan'),
1961 1961 (b'phase-divergent', b'phasedivergent'),
1962 1962 (b'content-divergent', b'contentdivergent'),
1963 1963 ]
1964 1964
1965 1965 def getinstabilitycounts(repo):
1966 1966 filtered = repo.changelog.filteredrevs
1967 1967 counts = {}
1968 1968 for instability, revset in instabilitytypes:
1969 1969 counts[instability] = len(
1970 1970 set(obsolete.getrevs(repo, revset)) - filtered
1971 1971 )
1972 1972 return counts
1973 1973
1974 1974 oldinstabilitycounts = getinstabilitycounts(repo)
1975 1975
1976 1976 @reportsummary
1977 1977 def reportnewinstabilities(repo, tr):
1978 1978 newinstabilitycounts = getinstabilitycounts(repo)
1979 1979 for instability, revset in instabilitytypes:
1980 1980 delta = (
1981 1981 newinstabilitycounts[instability]
1982 1982 - oldinstabilitycounts[instability]
1983 1983 )
1984 1984 msg = getinstabilitymessage(delta, instability)
1985 1985 if msg:
1986 1986 repo.ui.warn(msg)
1987 1987
1988 1988 if txmatch(_reportnewcssource):
1989 1989
1990 1990 @reportsummary
1991 1991 def reportnewcs(repo, tr):
1992 1992 """Report the range of new revisions pulled/unbundled."""
1993 1993 origrepolen = tr.changes.get(b'origrepolen', len(repo))
1994 1994 unfi = repo.unfiltered()
1995 1995 if origrepolen >= len(unfi):
1996 1996 return
1997 1997
1998 1998 # Compute the bounds of new visible revisions' range.
1999 1999 revs = smartset.spanset(repo, start=origrepolen)
2000 2000 if revs:
2001 2001 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2002 2002
2003 2003 if minrev == maxrev:
2004 2004 revrange = minrev
2005 2005 else:
2006 2006 revrange = b'%s:%s' % (minrev, maxrev)
2007 2007 draft = len(repo.revs(b'%ld and draft()', revs))
2008 2008 secret = len(repo.revs(b'%ld and secret()', revs))
2009 2009 if not (draft or secret):
2010 2010 msg = _(b'new changesets %s\n') % revrange
2011 2011 elif draft and secret:
2012 2012 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2013 2013 msg %= (revrange, draft, secret)
2014 2014 elif draft:
2015 2015 msg = _(b'new changesets %s (%d drafts)\n')
2016 2016 msg %= (revrange, draft)
2017 2017 elif secret:
2018 2018 msg = _(b'new changesets %s (%d secrets)\n')
2019 2019 msg %= (revrange, secret)
2020 2020 else:
2021 2021 errormsg = b'entered unreachable condition'
2022 2022 raise error.ProgrammingError(errormsg)
2023 2023 repo.ui.status(msg)
2024 2024
2025 2025 # search new changesets directly pulled as obsolete
2026 2026 duplicates = tr.changes.get(b'revduplicates', ())
2027 2027 obsadded = unfi.revs(
2028 2028 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2029 2029 )
2030 2030 cl = repo.changelog
2031 2031 extinctadded = [r for r in obsadded if r not in cl]
2032 2032 if extinctadded:
2033 2033 # They are not just obsolete, but obsolete and invisible
2034 2034 # we call them "extinct" internally but the terms have not been
2035 2035 # exposed to users.
2036 2036 msg = b'(%d other changesets obsolete on arrival)\n'
2037 2037 repo.ui.status(msg % len(extinctadded))
2038 2038
2039 2039 @reportsummary
2040 2040 def reportphasechanges(repo, tr):
2041 2041 """Report statistics of phase changes for changesets pre-existing
2042 2042 pull/unbundle.
2043 2043 """
2044 2044 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2045 2045 phasetracking = tr.changes.get(b'phases', {})
2046 2046 if not phasetracking:
2047 2047 return
2048 2048 published = [
2049 2049 rev
2050 2050 for rev, (old, new) in pycompat.iteritems(phasetracking)
2051 2051 if new == phases.public and rev < origrepolen
2052 2052 ]
2053 2053 if not published:
2054 2054 return
2055 2055 repo.ui.status(
2056 2056 _(b'%d local changesets published\n') % len(published)
2057 2057 )
2058 2058
2059 2059
2060 2060 def getinstabilitymessage(delta, instability):
2061 2061 """function to return the message to show warning about new instabilities
2062 2062
2063 2063 exists as a separate function so that extension can wrap to show more
2064 2064 information like how to fix instabilities"""
2065 2065 if delta > 0:
2066 2066 return _(b'%i new %s changesets\n') % (delta, instability)
2067 2067
2068 2068
2069 2069 def nodesummaries(repo, nodes, maxnumnodes=4):
2070 2070 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2071 2071 return b' '.join(short(h) for h in nodes)
2072 2072 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2073 2073 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2074 2074
2075 2075
2076 2076 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2077 2077 """check that no named branch has multiple heads"""
2078 2078 if desc in (b'strip', b'repair'):
2079 2079 # skip the logic during strip
2080 2080 return
2081 2081 visible = repo.filtered(b'visible')
2082 2082 # possible improvement: we could restrict the check to affected branch
2083 2083 bm = visible.branchmap()
2084 2084 for name in bm:
2085 2085 heads = bm.branchheads(name, closed=accountclosed)
2086 2086 if len(heads) > 1:
2087 2087 msg = _(b'rejecting multiple heads on branch "%s"')
2088 2088 msg %= name
2089 2089 hint = _(b'%d heads: %s')
2090 2090 hint %= (len(heads), nodesummaries(repo, heads))
2091 2091 raise error.Abort(msg, hint=hint)
2092 2092
2093 2093
2094 2094 def wrapconvertsink(sink):
2095 2095 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2096 2096 before it is used, whether or not the convert extension was formally loaded.
2097 2097 """
2098 2098 return sink
2099 2099
2100 2100
2101 2101 def unhidehashlikerevs(repo, specs, hiddentype):
2102 2102 """parse the user specs and unhide changesets whose hash or revision number
2103 2103 is passed.
2104 2104
2105 2105 hiddentype can be: 1) 'warn': warn while unhiding changesets
2106 2106 2) 'nowarn': don't warn while unhiding changesets
2107 2107
2108 2108 returns a repo object with the required changesets unhidden
2109 2109 """
2110 2110 if not repo.filtername or not repo.ui.configbool(
2111 2111 b'experimental', b'directaccess'
2112 2112 ):
2113 2113 return repo
2114 2114
2115 2115 if repo.filtername not in (b'visible', b'visible-hidden'):
2116 2116 return repo
2117 2117
2118 2118 symbols = set()
2119 2119 for spec in specs:
2120 2120 try:
2121 2121 tree = revsetlang.parse(spec)
2122 2122 except error.ParseError: # will be reported by scmutil.revrange()
2123 2123 continue
2124 2124
2125 2125 symbols.update(revsetlang.gethashlikesymbols(tree))
2126 2126
2127 2127 if not symbols:
2128 2128 return repo
2129 2129
2130 2130 revs = _getrevsfromsymbols(repo, symbols)
2131 2131
2132 2132 if not revs:
2133 2133 return repo
2134 2134
2135 2135 if hiddentype == b'warn':
2136 2136 unfi = repo.unfiltered()
2137 2137 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2138 2138 repo.ui.warn(
2139 2139 _(
2140 2140 b"warning: accessing hidden changesets for write "
2141 2141 b"operation: %s\n"
2142 2142 )
2143 2143 % revstr
2144 2144 )
2145 2145
2146 2146 # we have to use new filtername to separate branch/tags cache until we can
2147 2147 # disbale these cache when revisions are dynamically pinned.
2148 2148 return repo.filtered(b'visible-hidden', revs)
2149 2149
2150 2150
2151 2151 def _getrevsfromsymbols(repo, symbols):
2152 2152 """parse the list of symbols and returns a set of revision numbers of hidden
2153 2153 changesets present in symbols"""
2154 2154 revs = set()
2155 2155 unfi = repo.unfiltered()
2156 2156 unficl = unfi.changelog
2157 2157 cl = repo.changelog
2158 2158 tiprev = len(unficl)
2159 2159 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2160 2160 for s in symbols:
2161 2161 try:
2162 2162 n = int(s)
2163 2163 if n <= tiprev:
2164 2164 if not allowrevnums:
2165 2165 continue
2166 2166 else:
2167 2167 if n not in cl:
2168 2168 revs.add(n)
2169 2169 continue
2170 2170 except ValueError:
2171 2171 pass
2172 2172
2173 2173 try:
2174 2174 s = resolvehexnodeidprefix(unfi, s)
2175 2175 except (error.LookupError, error.WdirUnsupported):
2176 2176 s = None
2177 2177
2178 2178 if s is not None:
2179 2179 rev = unficl.rev(s)
2180 2180 if rev not in cl:
2181 2181 revs.add(rev)
2182 2182
2183 2183 return revs
2184 2184
2185 2185
2186 2186 def bookmarkrevs(repo, mark):
2187 2187 """
2188 2188 Select revisions reachable by a given bookmark
2189 2189 """
2190 2190 return repo.revs(
2191 2191 b"ancestors(bookmark(%s)) - "
2192 2192 b"ancestors(head() and not bookmark(%s)) - "
2193 2193 b"ancestors(bookmark() and not bookmark(%s))",
2194 2194 mark,
2195 2195 mark,
2196 2196 mark,
2197 2197 )
General Comments 0
You need to be logged in to leave comments. Login now