##// END OF EJS Templates
scmutil: document that bookmarkrevs() ignores non-head bookmark branch...
Yuya Nishihara -
r46654:b9ebe0bf default
parent child Browse files
Show More
@@ -1,2313 +1,2314 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28 from .pycompat import getattr
29 29 from .thirdparty import attr
30 30 from . import (
31 31 copies as copiesmod,
32 32 encoding,
33 33 error,
34 34 match as matchmod,
35 35 obsolete,
36 36 obsutil,
37 37 pathutil,
38 38 phases,
39 39 policy,
40 40 pycompat,
41 41 requirements as requirementsmod,
42 42 revsetlang,
43 43 similar,
44 44 smartset,
45 45 url,
46 46 util,
47 47 vfs,
48 48 )
49 49
50 50 from .utils import (
51 51 hashutil,
52 52 procutil,
53 53 stringutil,
54 54 )
55 55
56 56 if pycompat.iswindows:
57 57 from . import scmwindows as scmplatform
58 58 else:
59 59 from . import scmposix as scmplatform
60 60
61 61 parsers = policy.importmod('parsers')
62 62 rustrevlog = policy.importrust('revlog')
63 63
64 64 termsize = scmplatform.termsize
65 65
66 66
67 67 @attr.s(slots=True, repr=False)
68 68 class status(object):
69 69 """Struct with a list of files per status.
70 70
71 71 The 'deleted', 'unknown' and 'ignored' properties are only
72 72 relevant to the working copy.
73 73 """
74 74
75 75 modified = attr.ib(default=attr.Factory(list))
76 76 added = attr.ib(default=attr.Factory(list))
77 77 removed = attr.ib(default=attr.Factory(list))
78 78 deleted = attr.ib(default=attr.Factory(list))
79 79 unknown = attr.ib(default=attr.Factory(list))
80 80 ignored = attr.ib(default=attr.Factory(list))
81 81 clean = attr.ib(default=attr.Factory(list))
82 82
83 83 def __iter__(self):
84 84 yield self.modified
85 85 yield self.added
86 86 yield self.removed
87 87 yield self.deleted
88 88 yield self.unknown
89 89 yield self.ignored
90 90 yield self.clean
91 91
92 92 def __repr__(self):
93 93 return (
94 94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
95 95 r'unknown=%s, ignored=%s, clean=%s>'
96 96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
97 97
98 98
99 99 def itersubrepos(ctx1, ctx2):
100 100 """find subrepos in ctx1 or ctx2"""
101 101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106 106
107 107 missing = set()
108 108
109 109 for subpath in ctx2.substate:
110 110 if subpath not in ctx1.substate:
111 111 del subpaths[subpath]
112 112 missing.add(subpath)
113 113
114 114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
115 115 yield subpath, ctx.sub(subpath)
116 116
117 117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 118 # status and diff will have an accurate result when it does
119 119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 120 # against itself.
121 121 for subpath in missing:
122 122 yield subpath, ctx2.nullsub(subpath, ctx1)
123 123
124 124
125 125 def nochangesfound(ui, repo, excluded=None):
126 126 """Report no changes for push/pull, excluded is None or a list of
127 127 nodes excluded from the push/pull.
128 128 """
129 129 secretlist = []
130 130 if excluded:
131 131 for n in excluded:
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(
138 138 _(b"no changes found (ignored %d secret changesets)\n")
139 139 % len(secretlist)
140 140 )
141 141 else:
142 142 ui.status(_(b"no changes found\n"))
143 143
144 144
145 145 def callcatch(ui, func):
146 146 """call func() with global exception handling
147 147
148 148 return func() if no exception happens. otherwise do some error handling
149 149 and return an exit code accordingly. does not handle all exceptions.
150 150 """
151 151 coarse_exit_code = -1
152 152 detailed_exit_code = -1
153 153 try:
154 154 try:
155 155 return func()
156 156 except: # re-raises
157 157 ui.traceback()
158 158 raise
159 159 # Global exception handling, alphabetically
160 160 # Mercurial-specific first, followed by built-in and library exceptions
161 161 except error.LockHeld as inst:
162 162 detailed_exit_code = 20
163 163 if inst.errno == errno.ETIMEDOUT:
164 164 reason = _(b'timed out waiting for lock held by %r') % (
165 165 pycompat.bytestr(inst.locker)
166 166 )
167 167 else:
168 168 reason = _(b'lock held by %r') % inst.locker
169 169 ui.error(
170 170 _(b"abort: %s: %s\n")
171 171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
172 172 )
173 173 if not inst.locker:
174 174 ui.error(_(b"(lock might be very busy)\n"))
175 175 except error.LockUnavailable as inst:
176 176 detailed_exit_code = 20
177 177 ui.error(
178 178 _(b"abort: could not lock %s: %s\n")
179 179 % (
180 180 inst.desc or stringutil.forcebytestr(inst.filename),
181 181 encoding.strtolocal(inst.strerror),
182 182 )
183 183 )
184 184 except error.OutOfBandError as inst:
185 185 detailed_exit_code = 100
186 186 if inst.args:
187 187 msg = _(b"abort: remote error:\n")
188 188 else:
189 189 msg = _(b"abort: remote error\n")
190 190 ui.error(msg)
191 191 if inst.args:
192 192 ui.error(b''.join(inst.args))
193 193 if inst.hint:
194 194 ui.error(b'(%s)\n' % inst.hint)
195 195 except error.RepoError as inst:
196 196 ui.error(_(b"abort: %s\n") % inst)
197 197 if inst.hint:
198 198 ui.error(_(b"(%s)\n") % inst.hint)
199 199 except error.ResponseError as inst:
200 200 ui.error(_(b"abort: %s") % inst.args[0])
201 201 msg = inst.args[1]
202 202 if isinstance(msg, type(u'')):
203 203 msg = pycompat.sysbytes(msg)
204 204 if not isinstance(msg, bytes):
205 205 ui.error(b" %r\n" % (msg,))
206 206 elif not msg:
207 207 ui.error(_(b" empty string\n"))
208 208 else:
209 209 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
210 210 except error.CensoredNodeError as inst:
211 211 ui.error(_(b"abort: file censored %s\n") % inst)
212 212 except error.StorageError as inst:
213 213 ui.error(_(b"abort: %s\n") % inst)
214 214 if inst.hint:
215 215 ui.error(_(b"(%s)\n") % inst.hint)
216 216 except error.InterventionRequired as inst:
217 217 ui.error(b"%s\n" % inst)
218 218 if inst.hint:
219 219 ui.error(_(b"(%s)\n") % inst.hint)
220 220 detailed_exit_code = 240
221 221 coarse_exit_code = 1
222 222 except error.WdirUnsupported:
223 223 ui.error(_(b"abort: working directory revision cannot be specified\n"))
224 224 except error.Abort as inst:
225 225 if isinstance(inst, (error.InputError, error.ParseError)):
226 226 detailed_exit_code = 10
227 227 elif isinstance(inst, error.StateError):
228 228 detailed_exit_code = 20
229 229 elif isinstance(inst, error.ConfigError):
230 230 detailed_exit_code = 30
231 231 elif isinstance(inst, error.SecurityError):
232 232 detailed_exit_code = 150
233 233 elif isinstance(inst, error.CanceledError):
234 234 detailed_exit_code = 250
235 235 ui.error(inst.format())
236 236 except error.WorkerError as inst:
237 237 # Don't print a message -- the worker already should have
238 238 return inst.status_code
239 239 except ImportError as inst:
240 240 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
241 241 m = stringutil.forcebytestr(inst).split()[-1]
242 242 if m in b"mpatch bdiff".split():
243 243 ui.error(_(b"(did you forget to compile extensions?)\n"))
244 244 elif m in b"zlib".split():
245 245 ui.error(_(b"(is your Python install correct?)\n"))
246 246 except util.urlerr.httperror as inst:
247 247 detailed_exit_code = 100
248 248 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
249 249 except util.urlerr.urlerror as inst:
250 250 detailed_exit_code = 100
251 251 try: # usually it is in the form (errno, strerror)
252 252 reason = inst.reason.args[1]
253 253 except (AttributeError, IndexError):
254 254 # it might be anything, for example a string
255 255 reason = inst.reason
256 256 if isinstance(reason, pycompat.unicode):
257 257 # SSLError of Python 2.7.9 contains a unicode
258 258 reason = encoding.unitolocal(reason)
259 259 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
260 260 except (IOError, OSError) as inst:
261 261 if (
262 262 util.safehasattr(inst, b"args")
263 263 and inst.args
264 264 and inst.args[0] == errno.EPIPE
265 265 ):
266 266 pass
267 267 elif getattr(inst, "strerror", None): # common IOError or OSError
268 268 if getattr(inst, "filename", None) is not None:
269 269 ui.error(
270 270 _(b"abort: %s: '%s'\n")
271 271 % (
272 272 encoding.strtolocal(inst.strerror),
273 273 stringutil.forcebytestr(inst.filename),
274 274 )
275 275 )
276 276 else:
277 277 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
278 278 else: # suspicious IOError
279 279 raise
280 280 except MemoryError:
281 281 ui.error(_(b"abort: out of memory\n"))
282 282 except SystemExit as inst:
283 283 # Commands shouldn't sys.exit directly, but give a return code.
284 284 # Just in case catch this and and pass exit code to caller.
285 285 detailed_exit_code = 254
286 286 coarse_exit_code = inst.code
287 287
288 288 if ui.configbool(b'ui', b'detailed-exit-code'):
289 289 return detailed_exit_code
290 290 else:
291 291 return coarse_exit_code
292 292
293 293
294 294 def checknewlabel(repo, lbl, kind):
295 295 # Do not use the "kind" parameter in ui output.
296 296 # It makes strings difficult to translate.
297 297 if lbl in [b'tip', b'.', b'null']:
298 298 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
299 299 for c in (b':', b'\0', b'\n', b'\r'):
300 300 if c in lbl:
301 301 raise error.InputError(
302 302 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
303 303 )
304 304 try:
305 305 int(lbl)
306 306 raise error.InputError(_(b"cannot use an integer as a name"))
307 307 except ValueError:
308 308 pass
309 309 if lbl.strip() != lbl:
310 310 raise error.InputError(
311 311 _(b"leading or trailing whitespace in name %r") % lbl
312 312 )
313 313
314 314
315 315 def checkfilename(f):
316 316 '''Check that the filename f is an acceptable filename for a tracked file'''
317 317 if b'\r' in f or b'\n' in f:
318 318 raise error.InputError(
319 319 _(b"'\\n' and '\\r' disallowed in filenames: %r")
320 320 % pycompat.bytestr(f)
321 321 )
322 322
323 323
324 324 def checkportable(ui, f):
325 325 '''Check if filename f is portable and warn or abort depending on config'''
326 326 checkfilename(f)
327 327 abort, warn = checkportabilityalert(ui)
328 328 if abort or warn:
329 329 msg = util.checkwinfilename(f)
330 330 if msg:
331 331 msg = b"%s: %s" % (msg, procutil.shellquote(f))
332 332 if abort:
333 333 raise error.InputError(msg)
334 334 ui.warn(_(b"warning: %s\n") % msg)
335 335
336 336
337 337 def checkportabilityalert(ui):
338 338 """check if the user's config requests nothing, a warning, or abort for
339 339 non-portable filenames"""
340 340 val = ui.config(b'ui', b'portablefilenames')
341 341 lval = val.lower()
342 342 bval = stringutil.parsebool(val)
343 343 abort = pycompat.iswindows or lval == b'abort'
344 344 warn = bval or lval == b'warn'
345 345 if bval is None and not (warn or abort or lval == b'ignore'):
346 346 raise error.ConfigError(
347 347 _(b"ui.portablefilenames value is invalid ('%s')") % val
348 348 )
349 349 return abort, warn
350 350
351 351
352 352 class casecollisionauditor(object):
353 353 def __init__(self, ui, abort, dirstate):
354 354 self._ui = ui
355 355 self._abort = abort
356 356 allfiles = b'\0'.join(dirstate)
357 357 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
358 358 self._dirstate = dirstate
359 359 # The purpose of _newfiles is so that we don't complain about
360 360 # case collisions if someone were to call this object with the
361 361 # same filename twice.
362 362 self._newfiles = set()
363 363
364 364 def __call__(self, f):
365 365 if f in self._newfiles:
366 366 return
367 367 fl = encoding.lower(f)
368 368 if fl in self._loweredfiles and f not in self._dirstate:
369 369 msg = _(b'possible case-folding collision for %s') % f
370 370 if self._abort:
371 371 raise error.Abort(msg)
372 372 self._ui.warn(_(b"warning: %s\n") % msg)
373 373 self._loweredfiles.add(fl)
374 374 self._newfiles.add(f)
375 375
376 376
377 377 def filteredhash(repo, maxrev):
378 378 """build hash of filtered revisions in the current repoview.
379 379
380 380 Multiple caches perform up-to-date validation by checking that the
381 381 tiprev and tipnode stored in the cache file match the current repository.
382 382 However, this is not sufficient for validating repoviews because the set
383 383 of revisions in the view may change without the repository tiprev and
384 384 tipnode changing.
385 385
386 386 This function hashes all the revs filtered from the view and returns
387 387 that SHA-1 digest.
388 388 """
389 389 cl = repo.changelog
390 390 if not cl.filteredrevs:
391 391 return None
392 392 key = cl._filteredrevs_hashcache.get(maxrev)
393 393 if not key:
394 394 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
395 395 if revs:
396 396 s = hashutil.sha1()
397 397 for rev in revs:
398 398 s.update(b'%d;' % rev)
399 399 key = s.digest()
400 400 cl._filteredrevs_hashcache[maxrev] = key
401 401 return key
402 402
403 403
404 404 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
405 405 """yield every hg repository under path, always recursively.
406 406 The recurse flag will only control recursion into repo working dirs"""
407 407
408 408 def errhandler(err):
409 409 if err.filename == path:
410 410 raise err
411 411
412 412 samestat = getattr(os.path, 'samestat', None)
413 413 if followsym and samestat is not None:
414 414
415 415 def adddir(dirlst, dirname):
416 416 dirstat = os.stat(dirname)
417 417 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
418 418 if not match:
419 419 dirlst.append(dirstat)
420 420 return not match
421 421
422 422 else:
423 423 followsym = False
424 424
425 425 if (seen_dirs is None) and followsym:
426 426 seen_dirs = []
427 427 adddir(seen_dirs, path)
428 428 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
429 429 dirs.sort()
430 430 if b'.hg' in dirs:
431 431 yield root # found a repository
432 432 qroot = os.path.join(root, b'.hg', b'patches')
433 433 if os.path.isdir(os.path.join(qroot, b'.hg')):
434 434 yield qroot # we have a patch queue repo here
435 435 if recurse:
436 436 # avoid recursing inside the .hg directory
437 437 dirs.remove(b'.hg')
438 438 else:
439 439 dirs[:] = [] # don't descend further
440 440 elif followsym:
441 441 newdirs = []
442 442 for d in dirs:
443 443 fname = os.path.join(root, d)
444 444 if adddir(seen_dirs, fname):
445 445 if os.path.islink(fname):
446 446 for hgname in walkrepos(fname, True, seen_dirs):
447 447 yield hgname
448 448 else:
449 449 newdirs.append(d)
450 450 dirs[:] = newdirs
451 451
452 452
453 453 def binnode(ctx):
454 454 """Return binary node id for a given basectx"""
455 455 node = ctx.node()
456 456 if node is None:
457 457 return wdirid
458 458 return node
459 459
460 460
461 461 def intrev(ctx):
462 462 """Return integer for a given basectx that can be used in comparison or
463 463 arithmetic operation"""
464 464 rev = ctx.rev()
465 465 if rev is None:
466 466 return wdirrev
467 467 return rev
468 468
469 469
470 470 def formatchangeid(ctx):
471 471 """Format changectx as '{rev}:{node|formatnode}', which is the default
472 472 template provided by logcmdutil.changesettemplater"""
473 473 repo = ctx.repo()
474 474 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
475 475
476 476
477 477 def formatrevnode(ui, rev, node):
478 478 """Format given revision and node depending on the current verbosity"""
479 479 if ui.debugflag:
480 480 hexfunc = hex
481 481 else:
482 482 hexfunc = short
483 483 return b'%d:%s' % (rev, hexfunc(node))
484 484
485 485
486 486 def resolvehexnodeidprefix(repo, prefix):
487 487 if prefix.startswith(b'x'):
488 488 prefix = prefix[1:]
489 489 try:
490 490 # Uses unfiltered repo because it's faster when prefix is ambiguous/
491 491 # This matches the shortesthexnodeidprefix() function below.
492 492 node = repo.unfiltered().changelog._partialmatch(prefix)
493 493 except error.AmbiguousPrefixLookupError:
494 494 revset = repo.ui.config(
495 495 b'experimental', b'revisions.disambiguatewithin'
496 496 )
497 497 if revset:
498 498 # Clear config to avoid infinite recursion
499 499 configoverrides = {
500 500 (b'experimental', b'revisions.disambiguatewithin'): None
501 501 }
502 502 with repo.ui.configoverride(configoverrides):
503 503 revs = repo.anyrevs([revset], user=True)
504 504 matches = []
505 505 for rev in revs:
506 506 node = repo.changelog.node(rev)
507 507 if hex(node).startswith(prefix):
508 508 matches.append(node)
509 509 if len(matches) == 1:
510 510 return matches[0]
511 511 raise
512 512 if node is None:
513 513 return
514 514 repo.changelog.rev(node) # make sure node isn't filtered
515 515 return node
516 516
517 517
518 518 def mayberevnum(repo, prefix):
519 519 """Checks if the given prefix may be mistaken for a revision number"""
520 520 try:
521 521 i = int(prefix)
522 522 # if we are a pure int, then starting with zero will not be
523 523 # confused as a rev; or, obviously, if the int is larger
524 524 # than the value of the tip rev. We still need to disambiguate if
525 525 # prefix == '0', since that *is* a valid revnum.
526 526 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
527 527 return False
528 528 return True
529 529 except ValueError:
530 530 return False
531 531
532 532
533 533 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
534 534 """Find the shortest unambiguous prefix that matches hexnode.
535 535
536 536 If "cache" is not None, it must be a dictionary that can be used for
537 537 caching between calls to this method.
538 538 """
539 539 # _partialmatch() of filtered changelog could take O(len(repo)) time,
540 540 # which would be unacceptably slow. so we look for hash collision in
541 541 # unfiltered space, which means some hashes may be slightly longer.
542 542
543 543 minlength = max(minlength, 1)
544 544
545 545 def disambiguate(prefix):
546 546 """Disambiguate against revnums."""
547 547 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
548 548 if mayberevnum(repo, prefix):
549 549 return b'x' + prefix
550 550 else:
551 551 return prefix
552 552
553 553 hexnode = hex(node)
554 554 for length in range(len(prefix), len(hexnode) + 1):
555 555 prefix = hexnode[:length]
556 556 if not mayberevnum(repo, prefix):
557 557 return prefix
558 558
559 559 cl = repo.unfiltered().changelog
560 560 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
561 561 if revset:
562 562 revs = None
563 563 if cache is not None:
564 564 revs = cache.get(b'disambiguationrevset')
565 565 if revs is None:
566 566 revs = repo.anyrevs([revset], user=True)
567 567 if cache is not None:
568 568 cache[b'disambiguationrevset'] = revs
569 569 if cl.rev(node) in revs:
570 570 hexnode = hex(node)
571 571 nodetree = None
572 572 if cache is not None:
573 573 nodetree = cache.get(b'disambiguationnodetree')
574 574 if not nodetree:
575 575 if util.safehasattr(parsers, 'nodetree'):
576 576 # The CExt is the only implementation to provide a nodetree
577 577 # class so far.
578 578 index = cl.index
579 579 if util.safehasattr(index, 'get_cindex'):
580 580 # the rust wrapped need to give access to its internal index
581 581 index = index.get_cindex()
582 582 nodetree = parsers.nodetree(index, len(revs))
583 583 for r in revs:
584 584 nodetree.insert(r)
585 585 if cache is not None:
586 586 cache[b'disambiguationnodetree'] = nodetree
587 587 if nodetree is not None:
588 588 length = max(nodetree.shortest(node), minlength)
589 589 prefix = hexnode[:length]
590 590 return disambiguate(prefix)
591 591 for length in range(minlength, len(hexnode) + 1):
592 592 matches = []
593 593 prefix = hexnode[:length]
594 594 for rev in revs:
595 595 otherhexnode = repo[rev].hex()
596 596 if prefix == otherhexnode[:length]:
597 597 matches.append(otherhexnode)
598 598 if len(matches) == 1:
599 599 return disambiguate(prefix)
600 600
601 601 try:
602 602 return disambiguate(cl.shortest(node, minlength))
603 603 except error.LookupError:
604 604 raise error.RepoLookupError()
605 605
606 606
607 607 def isrevsymbol(repo, symbol):
608 608 """Checks if a symbol exists in the repo.
609 609
610 610 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
611 611 symbol is an ambiguous nodeid prefix.
612 612 """
613 613 try:
614 614 revsymbol(repo, symbol)
615 615 return True
616 616 except error.RepoLookupError:
617 617 return False
618 618
619 619
620 620 def revsymbol(repo, symbol):
621 621 """Returns a context given a single revision symbol (as string).
622 622
623 623 This is similar to revsingle(), but accepts only a single revision symbol,
624 624 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
625 625 not "max(public())".
626 626 """
627 627 if not isinstance(symbol, bytes):
628 628 msg = (
629 629 b"symbol (%s of type %s) was not a string, did you mean "
630 630 b"repo[symbol]?" % (symbol, type(symbol))
631 631 )
632 632 raise error.ProgrammingError(msg)
633 633 try:
634 634 if symbol in (b'.', b'tip', b'null'):
635 635 return repo[symbol]
636 636
637 637 try:
638 638 r = int(symbol)
639 639 if b'%d' % r != symbol:
640 640 raise ValueError
641 641 l = len(repo.changelog)
642 642 if r < 0:
643 643 r += l
644 644 if r < 0 or r >= l and r != wdirrev:
645 645 raise ValueError
646 646 return repo[r]
647 647 except error.FilteredIndexError:
648 648 raise
649 649 except (ValueError, OverflowError, IndexError):
650 650 pass
651 651
652 652 if len(symbol) == 40:
653 653 try:
654 654 node = bin(symbol)
655 655 rev = repo.changelog.rev(node)
656 656 return repo[rev]
657 657 except error.FilteredLookupError:
658 658 raise
659 659 except (TypeError, LookupError):
660 660 pass
661 661
662 662 # look up bookmarks through the name interface
663 663 try:
664 664 node = repo.names.singlenode(repo, symbol)
665 665 rev = repo.changelog.rev(node)
666 666 return repo[rev]
667 667 except KeyError:
668 668 pass
669 669
670 670 node = resolvehexnodeidprefix(repo, symbol)
671 671 if node is not None:
672 672 rev = repo.changelog.rev(node)
673 673 return repo[rev]
674 674
675 675 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
676 676
677 677 except error.WdirUnsupported:
678 678 return repo[None]
679 679 except (
680 680 error.FilteredIndexError,
681 681 error.FilteredLookupError,
682 682 error.FilteredRepoLookupError,
683 683 ):
684 684 raise _filterederror(repo, symbol)
685 685
686 686
687 687 def _filterederror(repo, changeid):
688 688 """build an exception to be raised about a filtered changeid
689 689
690 690 This is extracted in a function to help extensions (eg: evolve) to
691 691 experiment with various message variants."""
692 692 if repo.filtername.startswith(b'visible'):
693 693
694 694 # Check if the changeset is obsolete
695 695 unfilteredrepo = repo.unfiltered()
696 696 ctx = revsymbol(unfilteredrepo, changeid)
697 697
698 698 # If the changeset is obsolete, enrich the message with the reason
699 699 # that made this changeset not visible
700 700 if ctx.obsolete():
701 701 msg = obsutil._getfilteredreason(repo, changeid, ctx)
702 702 else:
703 703 msg = _(b"hidden revision '%s'") % changeid
704 704
705 705 hint = _(b'use --hidden to access hidden revisions')
706 706
707 707 return error.FilteredRepoLookupError(msg, hint=hint)
708 708 msg = _(b"filtered revision '%s' (not in '%s' subset)")
709 709 msg %= (changeid, repo.filtername)
710 710 return error.FilteredRepoLookupError(msg)
711 711
712 712
713 713 def revsingle(repo, revspec, default=b'.', localalias=None):
714 714 if not revspec and revspec != 0:
715 715 return repo[default]
716 716
717 717 l = revrange(repo, [revspec], localalias=localalias)
718 718 if not l:
719 719 raise error.Abort(_(b'empty revision set'))
720 720 return repo[l.last()]
721 721
722 722
723 723 def _pairspec(revspec):
724 724 tree = revsetlang.parse(revspec)
725 725 return tree and tree[0] in (
726 726 b'range',
727 727 b'rangepre',
728 728 b'rangepost',
729 729 b'rangeall',
730 730 )
731 731
732 732
733 733 def revpair(repo, revs):
734 734 if not revs:
735 735 return repo[b'.'], repo[None]
736 736
737 737 l = revrange(repo, revs)
738 738
739 739 if not l:
740 740 raise error.Abort(_(b'empty revision range'))
741 741
742 742 first = l.first()
743 743 second = l.last()
744 744
745 745 if (
746 746 first == second
747 747 and len(revs) >= 2
748 748 and not all(revrange(repo, [r]) for r in revs)
749 749 ):
750 750 raise error.Abort(_(b'empty revision on one side of range'))
751 751
752 752 # if top-level is range expression, the result must always be a pair
753 753 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
754 754 return repo[first], repo[None]
755 755
756 756 return repo[first], repo[second]
757 757
758 758
759 759 def revrange(repo, specs, localalias=None):
760 760 """Execute 1 to many revsets and return the union.
761 761
762 762 This is the preferred mechanism for executing revsets using user-specified
763 763 config options, such as revset aliases.
764 764
765 765 The revsets specified by ``specs`` will be executed via a chained ``OR``
766 766 expression. If ``specs`` is empty, an empty result is returned.
767 767
768 768 ``specs`` can contain integers, in which case they are assumed to be
769 769 revision numbers.
770 770
771 771 It is assumed the revsets are already formatted. If you have arguments
772 772 that need to be expanded in the revset, call ``revsetlang.formatspec()``
773 773 and pass the result as an element of ``specs``.
774 774
775 775 Specifying a single revset is allowed.
776 776
777 777 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
778 778 integer revisions.
779 779 """
780 780 allspecs = []
781 781 for spec in specs:
782 782 if isinstance(spec, int):
783 783 spec = revsetlang.formatspec(b'%d', spec)
784 784 allspecs.append(spec)
785 785 return repo.anyrevs(allspecs, user=True, localalias=localalias)
786 786
787 787
788 788 def increasingwindows(windowsize=8, sizelimit=512):
789 789 while True:
790 790 yield windowsize
791 791 if windowsize < sizelimit:
792 792 windowsize *= 2
793 793
794 794
795 795 def walkchangerevs(repo, revs, makefilematcher, prepare):
796 796 """Iterate over files and the revs in a "windowed" way.
797 797
798 798 Callers most commonly need to iterate backwards over the history
799 799 in which they are interested. Doing so has awful (quadratic-looking)
800 800 performance, so we use iterators in a "windowed" way.
801 801
802 802 We walk a window of revisions in the desired order. Within the
803 803 window, we first walk forwards to gather data, then in the desired
804 804 order (usually backwards) to display it.
805 805
806 806 This function returns an iterator yielding contexts. Before
807 807 yielding each context, the iterator will first call the prepare
808 808 function on each context in the window in forward order."""
809 809
810 810 if not revs:
811 811 return []
812 812 change = repo.__getitem__
813 813
814 814 def iterate():
815 815 it = iter(revs)
816 816 stopiteration = False
817 817 for windowsize in increasingwindows():
818 818 nrevs = []
819 819 for i in pycompat.xrange(windowsize):
820 820 rev = next(it, None)
821 821 if rev is None:
822 822 stopiteration = True
823 823 break
824 824 nrevs.append(rev)
825 825 for rev in sorted(nrevs):
826 826 ctx = change(rev)
827 827 prepare(ctx, makefilematcher(ctx))
828 828 for rev in nrevs:
829 829 yield change(rev)
830 830
831 831 if stopiteration:
832 832 break
833 833
834 834 return iterate()
835 835
836 836
837 837 def meaningfulparents(repo, ctx):
838 838 """Return list of meaningful (or all if debug) parentrevs for rev.
839 839
840 840 For merges (two non-nullrev revisions) both parents are meaningful.
841 841 Otherwise the first parent revision is considered meaningful if it
842 842 is not the preceding revision.
843 843 """
844 844 parents = ctx.parents()
845 845 if len(parents) > 1:
846 846 return parents
847 847 if repo.ui.debugflag:
848 848 return [parents[0], repo[nullrev]]
849 849 if parents[0].rev() >= intrev(ctx) - 1:
850 850 return []
851 851 return parents
852 852
853 853
854 854 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
855 855 """Return a function that produced paths for presenting to the user.
856 856
857 857 The returned function takes a repo-relative path and produces a path
858 858 that can be presented in the UI.
859 859
860 860 Depending on the value of ui.relative-paths, either a repo-relative or
861 861 cwd-relative path will be produced.
862 862
863 863 legacyrelativevalue is the value to use if ui.relative-paths=legacy
864 864
865 865 If forcerelativevalue is not None, then that value will be used regardless
866 866 of what ui.relative-paths is set to.
867 867 """
868 868 if forcerelativevalue is not None:
869 869 relative = forcerelativevalue
870 870 else:
871 871 config = repo.ui.config(b'ui', b'relative-paths')
872 872 if config == b'legacy':
873 873 relative = legacyrelativevalue
874 874 else:
875 875 relative = stringutil.parsebool(config)
876 876 if relative is None:
877 877 raise error.ConfigError(
878 878 _(b"ui.relative-paths is not a boolean ('%s')") % config
879 879 )
880 880
881 881 if relative:
882 882 cwd = repo.getcwd()
883 883 if cwd != b'':
884 884 # this branch would work even if cwd == b'' (ie cwd = repo
885 885 # root), but its generality makes the returned function slower
886 886 pathto = repo.pathto
887 887 return lambda f: pathto(f, cwd)
888 888 if repo.ui.configbool(b'ui', b'slash'):
889 889 return lambda f: f
890 890 else:
891 891 return util.localpath
892 892
893 893
894 894 def subdiruipathfn(subpath, uipathfn):
895 895 '''Create a new uipathfn that treats the file as relative to subpath.'''
896 896 return lambda f: uipathfn(posixpath.join(subpath, f))
897 897
898 898
899 899 def anypats(pats, opts):
900 900 """Checks if any patterns, including --include and --exclude were given.
901 901
902 902 Some commands (e.g. addremove) use this condition for deciding whether to
903 903 print absolute or relative paths.
904 904 """
905 905 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
906 906
907 907
908 908 def expandpats(pats):
909 909 """Expand bare globs when running on windows.
910 910 On posix we assume it already has already been done by sh."""
911 911 if not util.expandglobs:
912 912 return list(pats)
913 913 ret = []
914 914 for kindpat in pats:
915 915 kind, pat = matchmod._patsplit(kindpat, None)
916 916 if kind is None:
917 917 try:
918 918 globbed = glob.glob(pat)
919 919 except re.error:
920 920 globbed = [pat]
921 921 if globbed:
922 922 ret.extend(globbed)
923 923 continue
924 924 ret.append(kindpat)
925 925 return ret
926 926
927 927
928 928 def matchandpats(
929 929 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
930 930 ):
931 931 """Return a matcher and the patterns that were used.
932 932 The matcher will warn about bad matches, unless an alternate badfn callback
933 933 is provided."""
934 934 if opts is None:
935 935 opts = {}
936 936 if not globbed and default == b'relpath':
937 937 pats = expandpats(pats or [])
938 938
939 939 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
940 940
941 941 def bad(f, msg):
942 942 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
943 943
944 944 if badfn is None:
945 945 badfn = bad
946 946
947 947 m = ctx.match(
948 948 pats,
949 949 opts.get(b'include'),
950 950 opts.get(b'exclude'),
951 951 default,
952 952 listsubrepos=opts.get(b'subrepos'),
953 953 badfn=badfn,
954 954 )
955 955
956 956 if m.always():
957 957 pats = []
958 958 return m, pats
959 959
960 960
961 961 def match(
962 962 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
963 963 ):
964 964 '''Return a matcher that will warn about bad matches.'''
965 965 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
966 966
967 967
968 968 def matchall(repo):
969 969 '''Return a matcher that will efficiently match everything.'''
970 970 return matchmod.always()
971 971
972 972
973 973 def matchfiles(repo, files, badfn=None):
974 974 '''Return a matcher that will efficiently match exactly these files.'''
975 975 return matchmod.exact(files, badfn=badfn)
976 976
977 977
978 978 def parsefollowlinespattern(repo, rev, pat, msg):
979 979 """Return a file name from `pat` pattern suitable for usage in followlines
980 980 logic.
981 981 """
982 982 if not matchmod.patkind(pat):
983 983 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
984 984 else:
985 985 ctx = repo[rev]
986 986 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
987 987 files = [f for f in ctx if m(f)]
988 988 if len(files) != 1:
989 989 raise error.ParseError(msg)
990 990 return files[0]
991 991
992 992
993 993 def getorigvfs(ui, repo):
994 994 """return a vfs suitable to save 'orig' file
995 995
996 996 return None if no special directory is configured"""
997 997 origbackuppath = ui.config(b'ui', b'origbackuppath')
998 998 if not origbackuppath:
999 999 return None
1000 1000 return vfs.vfs(repo.wvfs.join(origbackuppath))
1001 1001
1002 1002
1003 1003 def backuppath(ui, repo, filepath):
1004 1004 """customize where working copy backup files (.orig files) are created
1005 1005
1006 1006 Fetch user defined path from config file: [ui] origbackuppath = <path>
1007 1007 Fall back to default (filepath with .orig suffix) if not specified
1008 1008
1009 1009 filepath is repo-relative
1010 1010
1011 1011 Returns an absolute path
1012 1012 """
1013 1013 origvfs = getorigvfs(ui, repo)
1014 1014 if origvfs is None:
1015 1015 return repo.wjoin(filepath + b".orig")
1016 1016
1017 1017 origbackupdir = origvfs.dirname(filepath)
1018 1018 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1019 1019 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1020 1020
1021 1021 # Remove any files that conflict with the backup file's path
1022 1022 for f in reversed(list(pathutil.finddirs(filepath))):
1023 1023 if origvfs.isfileorlink(f):
1024 1024 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1025 1025 origvfs.unlink(f)
1026 1026 break
1027 1027
1028 1028 origvfs.makedirs(origbackupdir)
1029 1029
1030 1030 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1031 1031 ui.note(
1032 1032 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1033 1033 )
1034 1034 origvfs.rmtree(filepath, forcibly=True)
1035 1035
1036 1036 return origvfs.join(filepath)
1037 1037
1038 1038
1039 1039 class _containsnode(object):
1040 1040 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1041 1041
1042 1042 def __init__(self, repo, revcontainer):
1043 1043 self._torev = repo.changelog.rev
1044 1044 self._revcontains = revcontainer.__contains__
1045 1045
1046 1046 def __contains__(self, node):
1047 1047 return self._revcontains(self._torev(node))
1048 1048
1049 1049
1050 1050 def cleanupnodes(
1051 1051 repo,
1052 1052 replacements,
1053 1053 operation,
1054 1054 moves=None,
1055 1055 metadata=None,
1056 1056 fixphase=False,
1057 1057 targetphase=None,
1058 1058 backup=True,
1059 1059 ):
1060 1060 """do common cleanups when old nodes are replaced by new nodes
1061 1061
1062 1062 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1063 1063 (we might also want to move working directory parent in the future)
1064 1064
1065 1065 By default, bookmark moves are calculated automatically from 'replacements',
1066 1066 but 'moves' can be used to override that. Also, 'moves' may include
1067 1067 additional bookmark moves that should not have associated obsmarkers.
1068 1068
1069 1069 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1070 1070 have replacements. operation is a string, like "rebase".
1071 1071
1072 1072 metadata is dictionary containing metadata to be stored in obsmarker if
1073 1073 obsolescence is enabled.
1074 1074 """
1075 1075 assert fixphase or targetphase is None
1076 1076 if not replacements and not moves:
1077 1077 return
1078 1078
1079 1079 # translate mapping's other forms
1080 1080 if not util.safehasattr(replacements, b'items'):
1081 1081 replacements = {(n,): () for n in replacements}
1082 1082 else:
1083 1083 # upgrading non tuple "source" to tuple ones for BC
1084 1084 repls = {}
1085 1085 for key, value in replacements.items():
1086 1086 if not isinstance(key, tuple):
1087 1087 key = (key,)
1088 1088 repls[key] = value
1089 1089 replacements = repls
1090 1090
1091 1091 # Unfiltered repo is needed since nodes in replacements might be hidden.
1092 1092 unfi = repo.unfiltered()
1093 1093
1094 1094 # Calculate bookmark movements
1095 1095 if moves is None:
1096 1096 moves = {}
1097 1097 for oldnodes, newnodes in replacements.items():
1098 1098 for oldnode in oldnodes:
1099 1099 if oldnode in moves:
1100 1100 continue
1101 1101 if len(newnodes) > 1:
1102 1102 # usually a split, take the one with biggest rev number
1103 1103 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1104 1104 elif len(newnodes) == 0:
1105 1105 # move bookmark backwards
1106 1106 allreplaced = []
1107 1107 for rep in replacements:
1108 1108 allreplaced.extend(rep)
1109 1109 roots = list(
1110 1110 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1111 1111 )
1112 1112 if roots:
1113 1113 newnode = roots[0].node()
1114 1114 else:
1115 1115 newnode = nullid
1116 1116 else:
1117 1117 newnode = newnodes[0]
1118 1118 moves[oldnode] = newnode
1119 1119
1120 1120 allnewnodes = [n for ns in replacements.values() for n in ns]
1121 1121 toretract = {}
1122 1122 toadvance = {}
1123 1123 if fixphase:
1124 1124 precursors = {}
1125 1125 for oldnodes, newnodes in replacements.items():
1126 1126 for oldnode in oldnodes:
1127 1127 for newnode in newnodes:
1128 1128 precursors.setdefault(newnode, []).append(oldnode)
1129 1129
1130 1130 allnewnodes.sort(key=lambda n: unfi[n].rev())
1131 1131 newphases = {}
1132 1132
1133 1133 def phase(ctx):
1134 1134 return newphases.get(ctx.node(), ctx.phase())
1135 1135
1136 1136 for newnode in allnewnodes:
1137 1137 ctx = unfi[newnode]
1138 1138 parentphase = max(phase(p) for p in ctx.parents())
1139 1139 if targetphase is None:
1140 1140 oldphase = max(
1141 1141 unfi[oldnode].phase() for oldnode in precursors[newnode]
1142 1142 )
1143 1143 newphase = max(oldphase, parentphase)
1144 1144 else:
1145 1145 newphase = max(targetphase, parentphase)
1146 1146 newphases[newnode] = newphase
1147 1147 if newphase > ctx.phase():
1148 1148 toretract.setdefault(newphase, []).append(newnode)
1149 1149 elif newphase < ctx.phase():
1150 1150 toadvance.setdefault(newphase, []).append(newnode)
1151 1151
1152 1152 with repo.transaction(b'cleanup') as tr:
1153 1153 # Move bookmarks
1154 1154 bmarks = repo._bookmarks
1155 1155 bmarkchanges = []
1156 1156 for oldnode, newnode in moves.items():
1157 1157 oldbmarks = repo.nodebookmarks(oldnode)
1158 1158 if not oldbmarks:
1159 1159 continue
1160 1160 from . import bookmarks # avoid import cycle
1161 1161
1162 1162 repo.ui.debug(
1163 1163 b'moving bookmarks %r from %s to %s\n'
1164 1164 % (
1165 1165 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1166 1166 hex(oldnode),
1167 1167 hex(newnode),
1168 1168 )
1169 1169 )
1170 1170 # Delete divergent bookmarks being parents of related newnodes
1171 1171 deleterevs = repo.revs(
1172 1172 b'parents(roots(%ln & (::%n))) - parents(%n)',
1173 1173 allnewnodes,
1174 1174 newnode,
1175 1175 oldnode,
1176 1176 )
1177 1177 deletenodes = _containsnode(repo, deleterevs)
1178 1178 for name in oldbmarks:
1179 1179 bmarkchanges.append((name, newnode))
1180 1180 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1181 1181 bmarkchanges.append((b, None))
1182 1182
1183 1183 if bmarkchanges:
1184 1184 bmarks.applychanges(repo, tr, bmarkchanges)
1185 1185
1186 1186 for phase, nodes in toretract.items():
1187 1187 phases.retractboundary(repo, tr, phase, nodes)
1188 1188 for phase, nodes in toadvance.items():
1189 1189 phases.advanceboundary(repo, tr, phase, nodes)
1190 1190
1191 1191 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1192 1192 # Obsolete or strip nodes
1193 1193 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1194 1194 # If a node is already obsoleted, and we want to obsolete it
1195 1195 # without a successor, skip that obssolete request since it's
1196 1196 # unnecessary. That's the "if s or not isobs(n)" check below.
1197 1197 # Also sort the node in topology order, that might be useful for
1198 1198 # some obsstore logic.
1199 1199 # NOTE: the sorting might belong to createmarkers.
1200 1200 torev = unfi.changelog.rev
1201 1201 sortfunc = lambda ns: torev(ns[0][0])
1202 1202 rels = []
1203 1203 for ns, s in sorted(replacements.items(), key=sortfunc):
1204 1204 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1205 1205 rels.append(rel)
1206 1206 if rels:
1207 1207 obsolete.createmarkers(
1208 1208 repo, rels, operation=operation, metadata=metadata
1209 1209 )
1210 1210 elif phases.supportinternal(repo) and mayusearchived:
1211 1211 # this assume we do not have "unstable" nodes above the cleaned ones
1212 1212 allreplaced = set()
1213 1213 for ns in replacements.keys():
1214 1214 allreplaced.update(ns)
1215 1215 if backup:
1216 1216 from . import repair # avoid import cycle
1217 1217
1218 1218 node = min(allreplaced, key=repo.changelog.rev)
1219 1219 repair.backupbundle(
1220 1220 repo, allreplaced, allreplaced, node, operation
1221 1221 )
1222 1222 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1223 1223 else:
1224 1224 from . import repair # avoid import cycle
1225 1225
1226 1226 tostrip = list(n for ns in replacements for n in ns)
1227 1227 if tostrip:
1228 1228 repair.delayedstrip(
1229 1229 repo.ui, repo, tostrip, operation, backup=backup
1230 1230 )
1231 1231
1232 1232
1233 1233 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1234 1234 if opts is None:
1235 1235 opts = {}
1236 1236 m = matcher
1237 1237 dry_run = opts.get(b'dry_run')
1238 1238 try:
1239 1239 similarity = float(opts.get(b'similarity') or 0)
1240 1240 except ValueError:
1241 1241 raise error.Abort(_(b'similarity must be a number'))
1242 1242 if similarity < 0 or similarity > 100:
1243 1243 raise error.Abort(_(b'similarity must be between 0 and 100'))
1244 1244 similarity /= 100.0
1245 1245
1246 1246 ret = 0
1247 1247
1248 1248 wctx = repo[None]
1249 1249 for subpath in sorted(wctx.substate):
1250 1250 submatch = matchmod.subdirmatcher(subpath, m)
1251 1251 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1252 1252 sub = wctx.sub(subpath)
1253 1253 subprefix = repo.wvfs.reljoin(prefix, subpath)
1254 1254 subuipathfn = subdiruipathfn(subpath, uipathfn)
1255 1255 try:
1256 1256 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1257 1257 ret = 1
1258 1258 except error.LookupError:
1259 1259 repo.ui.status(
1260 1260 _(b"skipping missing subrepository: %s\n")
1261 1261 % uipathfn(subpath)
1262 1262 )
1263 1263
1264 1264 rejected = []
1265 1265
1266 1266 def badfn(f, msg):
1267 1267 if f in m.files():
1268 1268 m.bad(f, msg)
1269 1269 rejected.append(f)
1270 1270
1271 1271 badmatch = matchmod.badmatch(m, badfn)
1272 1272 added, unknown, deleted, removed, forgotten = _interestingfiles(
1273 1273 repo, badmatch
1274 1274 )
1275 1275
1276 1276 unknownset = set(unknown + forgotten)
1277 1277 toprint = unknownset.copy()
1278 1278 toprint.update(deleted)
1279 1279 for abs in sorted(toprint):
1280 1280 if repo.ui.verbose or not m.exact(abs):
1281 1281 if abs in unknownset:
1282 1282 status = _(b'adding %s\n') % uipathfn(abs)
1283 1283 label = b'ui.addremove.added'
1284 1284 else:
1285 1285 status = _(b'removing %s\n') % uipathfn(abs)
1286 1286 label = b'ui.addremove.removed'
1287 1287 repo.ui.status(status, label=label)
1288 1288
1289 1289 renames = _findrenames(
1290 1290 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1291 1291 )
1292 1292
1293 1293 if not dry_run:
1294 1294 _markchanges(repo, unknown + forgotten, deleted, renames)
1295 1295
1296 1296 for f in rejected:
1297 1297 if f in m.files():
1298 1298 return 1
1299 1299 return ret
1300 1300
1301 1301
1302 1302 def marktouched(repo, files, similarity=0.0):
1303 1303 """Assert that files have somehow been operated upon. files are relative to
1304 1304 the repo root."""
1305 1305 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1306 1306 rejected = []
1307 1307
1308 1308 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1309 1309
1310 1310 if repo.ui.verbose:
1311 1311 unknownset = set(unknown + forgotten)
1312 1312 toprint = unknownset.copy()
1313 1313 toprint.update(deleted)
1314 1314 for abs in sorted(toprint):
1315 1315 if abs in unknownset:
1316 1316 status = _(b'adding %s\n') % abs
1317 1317 else:
1318 1318 status = _(b'removing %s\n') % abs
1319 1319 repo.ui.status(status)
1320 1320
1321 1321 # TODO: We should probably have the caller pass in uipathfn and apply it to
1322 1322 # the messages above too. legacyrelativevalue=True is consistent with how
1323 1323 # it used to work.
1324 1324 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1325 1325 renames = _findrenames(
1326 1326 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1327 1327 )
1328 1328
1329 1329 _markchanges(repo, unknown + forgotten, deleted, renames)
1330 1330
1331 1331 for f in rejected:
1332 1332 if f in m.files():
1333 1333 return 1
1334 1334 return 0
1335 1335
1336 1336
1337 1337 def _interestingfiles(repo, matcher):
1338 1338 """Walk dirstate with matcher, looking for files that addremove would care
1339 1339 about.
1340 1340
1341 1341 This is different from dirstate.status because it doesn't care about
1342 1342 whether files are modified or clean."""
1343 1343 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1344 1344 audit_path = pathutil.pathauditor(repo.root, cached=True)
1345 1345
1346 1346 ctx = repo[None]
1347 1347 dirstate = repo.dirstate
1348 1348 matcher = repo.narrowmatch(matcher, includeexact=True)
1349 1349 walkresults = dirstate.walk(
1350 1350 matcher,
1351 1351 subrepos=sorted(ctx.substate),
1352 1352 unknown=True,
1353 1353 ignored=False,
1354 1354 full=False,
1355 1355 )
1356 1356 for abs, st in pycompat.iteritems(walkresults):
1357 1357 dstate = dirstate[abs]
1358 1358 if dstate == b'?' and audit_path.check(abs):
1359 1359 unknown.append(abs)
1360 1360 elif dstate != b'r' and not st:
1361 1361 deleted.append(abs)
1362 1362 elif dstate == b'r' and st:
1363 1363 forgotten.append(abs)
1364 1364 # for finding renames
1365 1365 elif dstate == b'r' and not st:
1366 1366 removed.append(abs)
1367 1367 elif dstate == b'a':
1368 1368 added.append(abs)
1369 1369
1370 1370 return added, unknown, deleted, removed, forgotten
1371 1371
1372 1372
1373 1373 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1374 1374 '''Find renames from removed files to added ones.'''
1375 1375 renames = {}
1376 1376 if similarity > 0:
1377 1377 for old, new, score in similar.findrenames(
1378 1378 repo, added, removed, similarity
1379 1379 ):
1380 1380 if (
1381 1381 repo.ui.verbose
1382 1382 or not matcher.exact(old)
1383 1383 or not matcher.exact(new)
1384 1384 ):
1385 1385 repo.ui.status(
1386 1386 _(
1387 1387 b'recording removal of %s as rename to %s '
1388 1388 b'(%d%% similar)\n'
1389 1389 )
1390 1390 % (uipathfn(old), uipathfn(new), score * 100)
1391 1391 )
1392 1392 renames[new] = old
1393 1393 return renames
1394 1394
1395 1395
1396 1396 def _markchanges(repo, unknown, deleted, renames):
1397 1397 """Marks the files in unknown as added, the files in deleted as removed,
1398 1398 and the files in renames as copied."""
1399 1399 wctx = repo[None]
1400 1400 with repo.wlock():
1401 1401 wctx.forget(deleted)
1402 1402 wctx.add(unknown)
1403 1403 for new, old in pycompat.iteritems(renames):
1404 1404 wctx.copy(old, new)
1405 1405
1406 1406
1407 1407 def getrenamedfn(repo, endrev=None):
1408 1408 if copiesmod.usechangesetcentricalgo(repo):
1409 1409
1410 1410 def getrenamed(fn, rev):
1411 1411 ctx = repo[rev]
1412 1412 p1copies = ctx.p1copies()
1413 1413 if fn in p1copies:
1414 1414 return p1copies[fn]
1415 1415 p2copies = ctx.p2copies()
1416 1416 if fn in p2copies:
1417 1417 return p2copies[fn]
1418 1418 return None
1419 1419
1420 1420 return getrenamed
1421 1421
1422 1422 rcache = {}
1423 1423 if endrev is None:
1424 1424 endrev = len(repo)
1425 1425
1426 1426 def getrenamed(fn, rev):
1427 1427 """looks up all renames for a file (up to endrev) the first
1428 1428 time the file is given. It indexes on the changerev and only
1429 1429 parses the manifest if linkrev != changerev.
1430 1430 Returns rename info for fn at changerev rev."""
1431 1431 if fn not in rcache:
1432 1432 rcache[fn] = {}
1433 1433 fl = repo.file(fn)
1434 1434 for i in fl:
1435 1435 lr = fl.linkrev(i)
1436 1436 renamed = fl.renamed(fl.node(i))
1437 1437 rcache[fn][lr] = renamed and renamed[0]
1438 1438 if lr >= endrev:
1439 1439 break
1440 1440 if rev in rcache[fn]:
1441 1441 return rcache[fn][rev]
1442 1442
1443 1443 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1444 1444 # filectx logic.
1445 1445 try:
1446 1446 return repo[rev][fn].copysource()
1447 1447 except error.LookupError:
1448 1448 return None
1449 1449
1450 1450 return getrenamed
1451 1451
1452 1452
1453 1453 def getcopiesfn(repo, endrev=None):
1454 1454 if copiesmod.usechangesetcentricalgo(repo):
1455 1455
1456 1456 def copiesfn(ctx):
1457 1457 if ctx.p2copies():
1458 1458 allcopies = ctx.p1copies().copy()
1459 1459 # There should be no overlap
1460 1460 allcopies.update(ctx.p2copies())
1461 1461 return sorted(allcopies.items())
1462 1462 else:
1463 1463 return sorted(ctx.p1copies().items())
1464 1464
1465 1465 else:
1466 1466 getrenamed = getrenamedfn(repo, endrev)
1467 1467
1468 1468 def copiesfn(ctx):
1469 1469 copies = []
1470 1470 for fn in ctx.files():
1471 1471 rename = getrenamed(fn, ctx.rev())
1472 1472 if rename:
1473 1473 copies.append((fn, rename))
1474 1474 return copies
1475 1475
1476 1476 return copiesfn
1477 1477
1478 1478
1479 1479 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1480 1480 """Update the dirstate to reflect the intent of copying src to dst. For
1481 1481 different reasons it might not end with dst being marked as copied from src.
1482 1482 """
1483 1483 origsrc = repo.dirstate.copied(src) or src
1484 1484 if dst == origsrc: # copying back a copy?
1485 1485 if repo.dirstate[dst] not in b'mn' and not dryrun:
1486 1486 repo.dirstate.normallookup(dst)
1487 1487 else:
1488 1488 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1489 1489 if not ui.quiet:
1490 1490 ui.warn(
1491 1491 _(
1492 1492 b"%s has not been committed yet, so no copy "
1493 1493 b"data will be stored for %s.\n"
1494 1494 )
1495 1495 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1496 1496 )
1497 1497 if repo.dirstate[dst] in b'?r' and not dryrun:
1498 1498 wctx.add([dst])
1499 1499 elif not dryrun:
1500 1500 wctx.copy(origsrc, dst)
1501 1501
1502 1502
1503 1503 def movedirstate(repo, newctx, match=None):
1504 1504 """Move the dirstate to newctx and adjust it as necessary.
1505 1505
1506 1506 A matcher can be provided as an optimization. It is probably a bug to pass
1507 1507 a matcher that doesn't match all the differences between the parent of the
1508 1508 working copy and newctx.
1509 1509 """
1510 1510 oldctx = repo[b'.']
1511 1511 ds = repo.dirstate
1512 1512 copies = dict(ds.copies())
1513 1513 ds.setparents(newctx.node(), nullid)
1514 1514 s = newctx.status(oldctx, match=match)
1515 1515 for f in s.modified:
1516 1516 if ds[f] == b'r':
1517 1517 # modified + removed -> removed
1518 1518 continue
1519 1519 ds.normallookup(f)
1520 1520
1521 1521 for f in s.added:
1522 1522 if ds[f] == b'r':
1523 1523 # added + removed -> unknown
1524 1524 ds.drop(f)
1525 1525 elif ds[f] != b'a':
1526 1526 ds.add(f)
1527 1527
1528 1528 for f in s.removed:
1529 1529 if ds[f] == b'a':
1530 1530 # removed + added -> normal
1531 1531 ds.normallookup(f)
1532 1532 elif ds[f] != b'r':
1533 1533 ds.remove(f)
1534 1534
1535 1535 # Merge old parent and old working dir copies
1536 1536 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1537 1537 oldcopies.update(copies)
1538 1538 copies = {
1539 1539 dst: oldcopies.get(src, src)
1540 1540 for dst, src in pycompat.iteritems(oldcopies)
1541 1541 }
1542 1542 # Adjust the dirstate copies
1543 1543 for dst, src in pycompat.iteritems(copies):
1544 1544 if src not in newctx or dst in newctx or ds[dst] != b'a':
1545 1545 src = None
1546 1546 ds.copy(src, dst)
1547 1547 repo._quick_access_changeid_invalidate()
1548 1548
1549 1549
1550 1550 def filterrequirements(requirements):
1551 1551 """filters the requirements into two sets:
1552 1552
1553 1553 wcreq: requirements which should be written in .hg/requires
1554 1554 storereq: which should be written in .hg/store/requires
1555 1555
1556 1556 Returns (wcreq, storereq)
1557 1557 """
1558 1558 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1559 1559 wc, store = set(), set()
1560 1560 for r in requirements:
1561 1561 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1562 1562 wc.add(r)
1563 1563 else:
1564 1564 store.add(r)
1565 1565 return wc, store
1566 1566 return requirements, None
1567 1567
1568 1568
1569 1569 def istreemanifest(repo):
1570 1570 """ returns whether the repository is using treemanifest or not """
1571 1571 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1572 1572
1573 1573
1574 1574 def writereporequirements(repo, requirements=None):
1575 1575 """ writes requirements for the repo to .hg/requires """
1576 1576 if requirements:
1577 1577 repo.requirements = requirements
1578 1578 wcreq, storereq = filterrequirements(repo.requirements)
1579 1579 if wcreq is not None:
1580 1580 writerequires(repo.vfs, wcreq)
1581 1581 if storereq is not None:
1582 1582 writerequires(repo.svfs, storereq)
1583 1583 elif repo.ui.configbool(b'format', b'usestore'):
1584 1584 # only remove store requires if we are using store
1585 1585 repo.svfs.tryunlink(b'requires')
1586 1586
1587 1587
1588 1588 def writerequires(opener, requirements):
1589 1589 with opener(b'requires', b'w', atomictemp=True) as fp:
1590 1590 for r in sorted(requirements):
1591 1591 fp.write(b"%s\n" % r)
1592 1592
1593 1593
1594 1594 class filecachesubentry(object):
1595 1595 def __init__(self, path, stat):
1596 1596 self.path = path
1597 1597 self.cachestat = None
1598 1598 self._cacheable = None
1599 1599
1600 1600 if stat:
1601 1601 self.cachestat = filecachesubentry.stat(self.path)
1602 1602
1603 1603 if self.cachestat:
1604 1604 self._cacheable = self.cachestat.cacheable()
1605 1605 else:
1606 1606 # None means we don't know yet
1607 1607 self._cacheable = None
1608 1608
1609 1609 def refresh(self):
1610 1610 if self.cacheable():
1611 1611 self.cachestat = filecachesubentry.stat(self.path)
1612 1612
1613 1613 def cacheable(self):
1614 1614 if self._cacheable is not None:
1615 1615 return self._cacheable
1616 1616
1617 1617 # we don't know yet, assume it is for now
1618 1618 return True
1619 1619
1620 1620 def changed(self):
1621 1621 # no point in going further if we can't cache it
1622 1622 if not self.cacheable():
1623 1623 return True
1624 1624
1625 1625 newstat = filecachesubentry.stat(self.path)
1626 1626
1627 1627 # we may not know if it's cacheable yet, check again now
1628 1628 if newstat and self._cacheable is None:
1629 1629 self._cacheable = newstat.cacheable()
1630 1630
1631 1631 # check again
1632 1632 if not self._cacheable:
1633 1633 return True
1634 1634
1635 1635 if self.cachestat != newstat:
1636 1636 self.cachestat = newstat
1637 1637 return True
1638 1638 else:
1639 1639 return False
1640 1640
1641 1641 @staticmethod
1642 1642 def stat(path):
1643 1643 try:
1644 1644 return util.cachestat(path)
1645 1645 except OSError as e:
1646 1646 if e.errno != errno.ENOENT:
1647 1647 raise
1648 1648
1649 1649
1650 1650 class filecacheentry(object):
1651 1651 def __init__(self, paths, stat=True):
1652 1652 self._entries = []
1653 1653 for path in paths:
1654 1654 self._entries.append(filecachesubentry(path, stat))
1655 1655
1656 1656 def changed(self):
1657 1657 '''true if any entry has changed'''
1658 1658 for entry in self._entries:
1659 1659 if entry.changed():
1660 1660 return True
1661 1661 return False
1662 1662
1663 1663 def refresh(self):
1664 1664 for entry in self._entries:
1665 1665 entry.refresh()
1666 1666
1667 1667
1668 1668 class filecache(object):
1669 1669 """A property like decorator that tracks files under .hg/ for updates.
1670 1670
1671 1671 On first access, the files defined as arguments are stat()ed and the
1672 1672 results cached. The decorated function is called. The results are stashed
1673 1673 away in a ``_filecache`` dict on the object whose method is decorated.
1674 1674
1675 1675 On subsequent access, the cached result is used as it is set to the
1676 1676 instance dictionary.
1677 1677
1678 1678 On external property set/delete operations, the caller must update the
1679 1679 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1680 1680 instead of directly setting <attr>.
1681 1681
1682 1682 When using the property API, the cached data is always used if available.
1683 1683 No stat() is performed to check if the file has changed.
1684 1684
1685 1685 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1686 1686 can populate an entry before the property's getter is called. In this case,
1687 1687 entries in ``_filecache`` will be used during property operations,
1688 1688 if available. If the underlying file changes, it is up to external callers
1689 1689 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1690 1690 method result as well as possibly calling ``del obj._filecache[attr]`` to
1691 1691 remove the ``filecacheentry``.
1692 1692 """
1693 1693
1694 1694 def __init__(self, *paths):
1695 1695 self.paths = paths
1696 1696
1697 1697 def join(self, obj, fname):
1698 1698 """Used to compute the runtime path of a cached file.
1699 1699
1700 1700 Users should subclass filecache and provide their own version of this
1701 1701 function to call the appropriate join function on 'obj' (an instance
1702 1702 of the class that its member function was decorated).
1703 1703 """
1704 1704 raise NotImplementedError
1705 1705
1706 1706 def __call__(self, func):
1707 1707 self.func = func
1708 1708 self.sname = func.__name__
1709 1709 self.name = pycompat.sysbytes(self.sname)
1710 1710 return self
1711 1711
1712 1712 def __get__(self, obj, type=None):
1713 1713 # if accessed on the class, return the descriptor itself.
1714 1714 if obj is None:
1715 1715 return self
1716 1716
1717 1717 assert self.sname not in obj.__dict__
1718 1718
1719 1719 entry = obj._filecache.get(self.name)
1720 1720
1721 1721 if entry:
1722 1722 if entry.changed():
1723 1723 entry.obj = self.func(obj)
1724 1724 else:
1725 1725 paths = [self.join(obj, path) for path in self.paths]
1726 1726
1727 1727 # We stat -before- creating the object so our cache doesn't lie if
1728 1728 # a writer modified between the time we read and stat
1729 1729 entry = filecacheentry(paths, True)
1730 1730 entry.obj = self.func(obj)
1731 1731
1732 1732 obj._filecache[self.name] = entry
1733 1733
1734 1734 obj.__dict__[self.sname] = entry.obj
1735 1735 return entry.obj
1736 1736
1737 1737 # don't implement __set__(), which would make __dict__ lookup as slow as
1738 1738 # function call.
1739 1739
1740 1740 def set(self, obj, value):
1741 1741 if self.name not in obj._filecache:
1742 1742 # we add an entry for the missing value because X in __dict__
1743 1743 # implies X in _filecache
1744 1744 paths = [self.join(obj, path) for path in self.paths]
1745 1745 ce = filecacheentry(paths, False)
1746 1746 obj._filecache[self.name] = ce
1747 1747 else:
1748 1748 ce = obj._filecache[self.name]
1749 1749
1750 1750 ce.obj = value # update cached copy
1751 1751 obj.__dict__[self.sname] = value # update copy returned by obj.x
1752 1752
1753 1753
1754 1754 def extdatasource(repo, source):
1755 1755 """Gather a map of rev -> value dict from the specified source
1756 1756
1757 1757 A source spec is treated as a URL, with a special case shell: type
1758 1758 for parsing the output from a shell command.
1759 1759
1760 1760 The data is parsed as a series of newline-separated records where
1761 1761 each record is a revision specifier optionally followed by a space
1762 1762 and a freeform string value. If the revision is known locally, it
1763 1763 is converted to a rev, otherwise the record is skipped.
1764 1764
1765 1765 Note that both key and value are treated as UTF-8 and converted to
1766 1766 the local encoding. This allows uniformity between local and
1767 1767 remote data sources.
1768 1768 """
1769 1769
1770 1770 spec = repo.ui.config(b"extdata", source)
1771 1771 if not spec:
1772 1772 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1773 1773
1774 1774 data = {}
1775 1775 src = proc = None
1776 1776 try:
1777 1777 if spec.startswith(b"shell:"):
1778 1778 # external commands should be run relative to the repo root
1779 1779 cmd = spec[6:]
1780 1780 proc = subprocess.Popen(
1781 1781 procutil.tonativestr(cmd),
1782 1782 shell=True,
1783 1783 bufsize=-1,
1784 1784 close_fds=procutil.closefds,
1785 1785 stdout=subprocess.PIPE,
1786 1786 cwd=procutil.tonativestr(repo.root),
1787 1787 )
1788 1788 src = proc.stdout
1789 1789 else:
1790 1790 # treat as a URL or file
1791 1791 src = url.open(repo.ui, spec)
1792 1792 for l in src:
1793 1793 if b" " in l:
1794 1794 k, v = l.strip().split(b" ", 1)
1795 1795 else:
1796 1796 k, v = l.strip(), b""
1797 1797
1798 1798 k = encoding.tolocal(k)
1799 1799 try:
1800 1800 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1801 1801 except (error.LookupError, error.RepoLookupError):
1802 1802 pass # we ignore data for nodes that don't exist locally
1803 1803 finally:
1804 1804 if proc:
1805 1805 try:
1806 1806 proc.communicate()
1807 1807 except ValueError:
1808 1808 # This happens if we started iterating src and then
1809 1809 # get a parse error on a line. It should be safe to ignore.
1810 1810 pass
1811 1811 if src:
1812 1812 src.close()
1813 1813 if proc and proc.returncode != 0:
1814 1814 raise error.Abort(
1815 1815 _(b"extdata command '%s' failed: %s")
1816 1816 % (cmd, procutil.explainexit(proc.returncode))
1817 1817 )
1818 1818
1819 1819 return data
1820 1820
1821 1821
1822 1822 class progress(object):
1823 1823 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1824 1824 self.ui = ui
1825 1825 self.pos = 0
1826 1826 self.topic = topic
1827 1827 self.unit = unit
1828 1828 self.total = total
1829 1829 self.debug = ui.configbool(b'progress', b'debug')
1830 1830 self._updatebar = updatebar
1831 1831
1832 1832 def __enter__(self):
1833 1833 return self
1834 1834
1835 1835 def __exit__(self, exc_type, exc_value, exc_tb):
1836 1836 self.complete()
1837 1837
1838 1838 def update(self, pos, item=b"", total=None):
1839 1839 assert pos is not None
1840 1840 if total:
1841 1841 self.total = total
1842 1842 self.pos = pos
1843 1843 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1844 1844 if self.debug:
1845 1845 self._printdebug(item)
1846 1846
1847 1847 def increment(self, step=1, item=b"", total=None):
1848 1848 self.update(self.pos + step, item, total)
1849 1849
1850 1850 def complete(self):
1851 1851 self.pos = None
1852 1852 self.unit = b""
1853 1853 self.total = None
1854 1854 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1855 1855
1856 1856 def _printdebug(self, item):
1857 1857 unit = b''
1858 1858 if self.unit:
1859 1859 unit = b' ' + self.unit
1860 1860 if item:
1861 1861 item = b' ' + item
1862 1862
1863 1863 if self.total:
1864 1864 pct = 100.0 * self.pos / self.total
1865 1865 self.ui.debug(
1866 1866 b'%s:%s %d/%d%s (%4.2f%%)\n'
1867 1867 % (self.topic, item, self.pos, self.total, unit, pct)
1868 1868 )
1869 1869 else:
1870 1870 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1871 1871
1872 1872
1873 1873 def gdinitconfig(ui):
1874 1874 """helper function to know if a repo should be created as general delta"""
1875 1875 # experimental config: format.generaldelta
1876 1876 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1877 1877 b'format', b'usegeneraldelta'
1878 1878 )
1879 1879
1880 1880
1881 1881 def gddeltaconfig(ui):
1882 1882 """helper function to know if incoming delta should be optimised"""
1883 1883 # experimental config: format.generaldelta
1884 1884 return ui.configbool(b'format', b'generaldelta')
1885 1885
1886 1886
1887 1887 class simplekeyvaluefile(object):
1888 1888 """A simple file with key=value lines
1889 1889
1890 1890 Keys must be alphanumerics and start with a letter, values must not
1891 1891 contain '\n' characters"""
1892 1892
1893 1893 firstlinekey = b'__firstline'
1894 1894
1895 1895 def __init__(self, vfs, path, keys=None):
1896 1896 self.vfs = vfs
1897 1897 self.path = path
1898 1898
1899 1899 def read(self, firstlinenonkeyval=False):
1900 1900 """Read the contents of a simple key-value file
1901 1901
1902 1902 'firstlinenonkeyval' indicates whether the first line of file should
1903 1903 be treated as a key-value pair or reuturned fully under the
1904 1904 __firstline key."""
1905 1905 lines = self.vfs.readlines(self.path)
1906 1906 d = {}
1907 1907 if firstlinenonkeyval:
1908 1908 if not lines:
1909 1909 e = _(b"empty simplekeyvalue file")
1910 1910 raise error.CorruptedState(e)
1911 1911 # we don't want to include '\n' in the __firstline
1912 1912 d[self.firstlinekey] = lines[0][:-1]
1913 1913 del lines[0]
1914 1914
1915 1915 try:
1916 1916 # the 'if line.strip()' part prevents us from failing on empty
1917 1917 # lines which only contain '\n' therefore are not skipped
1918 1918 # by 'if line'
1919 1919 updatedict = dict(
1920 1920 line[:-1].split(b'=', 1) for line in lines if line.strip()
1921 1921 )
1922 1922 if self.firstlinekey in updatedict:
1923 1923 e = _(b"%r can't be used as a key")
1924 1924 raise error.CorruptedState(e % self.firstlinekey)
1925 1925 d.update(updatedict)
1926 1926 except ValueError as e:
1927 1927 raise error.CorruptedState(stringutil.forcebytestr(e))
1928 1928 return d
1929 1929
1930 1930 def write(self, data, firstline=None):
1931 1931 """Write key=>value mapping to a file
1932 1932 data is a dict. Keys must be alphanumerical and start with a letter.
1933 1933 Values must not contain newline characters.
1934 1934
1935 1935 If 'firstline' is not None, it is written to file before
1936 1936 everything else, as it is, not in a key=value form"""
1937 1937 lines = []
1938 1938 if firstline is not None:
1939 1939 lines.append(b'%s\n' % firstline)
1940 1940
1941 1941 for k, v in data.items():
1942 1942 if k == self.firstlinekey:
1943 1943 e = b"key name '%s' is reserved" % self.firstlinekey
1944 1944 raise error.ProgrammingError(e)
1945 1945 if not k[0:1].isalpha():
1946 1946 e = b"keys must start with a letter in a key-value file"
1947 1947 raise error.ProgrammingError(e)
1948 1948 if not k.isalnum():
1949 1949 e = b"invalid key name in a simple key-value file"
1950 1950 raise error.ProgrammingError(e)
1951 1951 if b'\n' in v:
1952 1952 e = b"invalid value in a simple key-value file"
1953 1953 raise error.ProgrammingError(e)
1954 1954 lines.append(b"%s=%s\n" % (k, v))
1955 1955 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1956 1956 fp.write(b''.join(lines))
1957 1957
1958 1958
1959 1959 _reportobsoletedsource = [
1960 1960 b'debugobsolete',
1961 1961 b'pull',
1962 1962 b'push',
1963 1963 b'serve',
1964 1964 b'unbundle',
1965 1965 ]
1966 1966
1967 1967 _reportnewcssource = [
1968 1968 b'pull',
1969 1969 b'unbundle',
1970 1970 ]
1971 1971
1972 1972
1973 1973 def prefetchfiles(repo, revmatches):
1974 1974 """Invokes the registered file prefetch functions, allowing extensions to
1975 1975 ensure the corresponding files are available locally, before the command
1976 1976 uses them.
1977 1977
1978 1978 Args:
1979 1979 revmatches: a list of (revision, match) tuples to indicate the files to
1980 1980 fetch at each revision. If any of the match elements is None, it matches
1981 1981 all files.
1982 1982 """
1983 1983
1984 1984 def _matcher(m):
1985 1985 if m:
1986 1986 assert isinstance(m, matchmod.basematcher)
1987 1987 # The command itself will complain about files that don't exist, so
1988 1988 # don't duplicate the message.
1989 1989 return matchmod.badmatch(m, lambda fn, msg: None)
1990 1990 else:
1991 1991 return matchall(repo)
1992 1992
1993 1993 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1994 1994
1995 1995 fileprefetchhooks(repo, revbadmatches)
1996 1996
1997 1997
1998 1998 # a list of (repo, revs, match) prefetch functions
1999 1999 fileprefetchhooks = util.hooks()
2000 2000
2001 2001 # A marker that tells the evolve extension to suppress its own reporting
2002 2002 _reportstroubledchangesets = True
2003 2003
2004 2004
2005 2005 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
2006 2006 """register a callback to issue a summary after the transaction is closed
2007 2007
2008 2008 If as_validator is true, then the callbacks are registered as transaction
2009 2009 validators instead
2010 2010 """
2011 2011
2012 2012 def txmatch(sources):
2013 2013 return any(txnname.startswith(source) for source in sources)
2014 2014
2015 2015 categories = []
2016 2016
2017 2017 def reportsummary(func):
2018 2018 """decorator for report callbacks."""
2019 2019 # The repoview life cycle is shorter than the one of the actual
2020 2020 # underlying repository. So the filtered object can die before the
2021 2021 # weakref is used leading to troubles. We keep a reference to the
2022 2022 # unfiltered object and restore the filtering when retrieving the
2023 2023 # repository through the weakref.
2024 2024 filtername = repo.filtername
2025 2025 reporef = weakref.ref(repo.unfiltered())
2026 2026
2027 2027 def wrapped(tr):
2028 2028 repo = reporef()
2029 2029 if filtername:
2030 2030 assert repo is not None # help pytype
2031 2031 repo = repo.filtered(filtername)
2032 2032 func(repo, tr)
2033 2033
2034 2034 newcat = b'%02i-txnreport' % len(categories)
2035 2035 if as_validator:
2036 2036 otr.addvalidator(newcat, wrapped)
2037 2037 else:
2038 2038 otr.addpostclose(newcat, wrapped)
2039 2039 categories.append(newcat)
2040 2040 return wrapped
2041 2041
2042 2042 @reportsummary
2043 2043 def reportchangegroup(repo, tr):
2044 2044 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2045 2045 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2046 2046 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2047 2047 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2048 2048 if cgchangesets or cgrevisions or cgfiles:
2049 2049 htext = b""
2050 2050 if cgheads:
2051 2051 htext = _(b" (%+d heads)") % cgheads
2052 2052 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2053 2053 if as_validator:
2054 2054 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2055 2055 assert repo is not None # help pytype
2056 2056 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2057 2057
2058 2058 if txmatch(_reportobsoletedsource):
2059 2059
2060 2060 @reportsummary
2061 2061 def reportobsoleted(repo, tr):
2062 2062 obsoleted = obsutil.getobsoleted(repo, tr)
2063 2063 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2064 2064 if newmarkers:
2065 2065 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2066 2066 if obsoleted:
2067 2067 msg = _(b'obsoleted %i changesets\n')
2068 2068 if as_validator:
2069 2069 msg = _(b'obsoleting %i changesets\n')
2070 2070 repo.ui.status(msg % len(obsoleted))
2071 2071
2072 2072 if obsolete.isenabled(
2073 2073 repo, obsolete.createmarkersopt
2074 2074 ) and repo.ui.configbool(
2075 2075 b'experimental', b'evolution.report-instabilities'
2076 2076 ):
2077 2077 instabilitytypes = [
2078 2078 (b'orphan', b'orphan'),
2079 2079 (b'phase-divergent', b'phasedivergent'),
2080 2080 (b'content-divergent', b'contentdivergent'),
2081 2081 ]
2082 2082
2083 2083 def getinstabilitycounts(repo):
2084 2084 filtered = repo.changelog.filteredrevs
2085 2085 counts = {}
2086 2086 for instability, revset in instabilitytypes:
2087 2087 counts[instability] = len(
2088 2088 set(obsolete.getrevs(repo, revset)) - filtered
2089 2089 )
2090 2090 return counts
2091 2091
2092 2092 oldinstabilitycounts = getinstabilitycounts(repo)
2093 2093
2094 2094 @reportsummary
2095 2095 def reportnewinstabilities(repo, tr):
2096 2096 newinstabilitycounts = getinstabilitycounts(repo)
2097 2097 for instability, revset in instabilitytypes:
2098 2098 delta = (
2099 2099 newinstabilitycounts[instability]
2100 2100 - oldinstabilitycounts[instability]
2101 2101 )
2102 2102 msg = getinstabilitymessage(delta, instability)
2103 2103 if msg:
2104 2104 repo.ui.warn(msg)
2105 2105
2106 2106 if txmatch(_reportnewcssource):
2107 2107
2108 2108 @reportsummary
2109 2109 def reportnewcs(repo, tr):
2110 2110 """Report the range of new revisions pulled/unbundled."""
2111 2111 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2112 2112 unfi = repo.unfiltered()
2113 2113 if origrepolen >= len(unfi):
2114 2114 return
2115 2115
2116 2116 # Compute the bounds of new visible revisions' range.
2117 2117 revs = smartset.spanset(repo, start=origrepolen)
2118 2118 if revs:
2119 2119 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2120 2120
2121 2121 if minrev == maxrev:
2122 2122 revrange = minrev
2123 2123 else:
2124 2124 revrange = b'%s:%s' % (minrev, maxrev)
2125 2125 draft = len(repo.revs(b'%ld and draft()', revs))
2126 2126 secret = len(repo.revs(b'%ld and secret()', revs))
2127 2127 if not (draft or secret):
2128 2128 msg = _(b'new changesets %s\n') % revrange
2129 2129 elif draft and secret:
2130 2130 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2131 2131 msg %= (revrange, draft, secret)
2132 2132 elif draft:
2133 2133 msg = _(b'new changesets %s (%d drafts)\n')
2134 2134 msg %= (revrange, draft)
2135 2135 elif secret:
2136 2136 msg = _(b'new changesets %s (%d secrets)\n')
2137 2137 msg %= (revrange, secret)
2138 2138 else:
2139 2139 errormsg = b'entered unreachable condition'
2140 2140 raise error.ProgrammingError(errormsg)
2141 2141 repo.ui.status(msg)
2142 2142
2143 2143 # search new changesets directly pulled as obsolete
2144 2144 duplicates = tr.changes.get(b'revduplicates', ())
2145 2145 obsadded = unfi.revs(
2146 2146 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2147 2147 )
2148 2148 cl = repo.changelog
2149 2149 extinctadded = [r for r in obsadded if r not in cl]
2150 2150 if extinctadded:
2151 2151 # They are not just obsolete, but obsolete and invisible
2152 2152 # we call them "extinct" internally but the terms have not been
2153 2153 # exposed to users.
2154 2154 msg = b'(%d other changesets obsolete on arrival)\n'
2155 2155 repo.ui.status(msg % len(extinctadded))
2156 2156
2157 2157 @reportsummary
2158 2158 def reportphasechanges(repo, tr):
2159 2159 """Report statistics of phase changes for changesets pre-existing
2160 2160 pull/unbundle.
2161 2161 """
2162 2162 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2163 2163 published = []
2164 2164 for revs, (old, new) in tr.changes.get(b'phases', []):
2165 2165 if new != phases.public:
2166 2166 continue
2167 2167 published.extend(rev for rev in revs if rev < origrepolen)
2168 2168 if not published:
2169 2169 return
2170 2170 msg = _(b'%d local changesets published\n')
2171 2171 if as_validator:
2172 2172 msg = _(b'%d local changesets will be published\n')
2173 2173 repo.ui.status(msg % len(published))
2174 2174
2175 2175
2176 2176 def getinstabilitymessage(delta, instability):
2177 2177 """function to return the message to show warning about new instabilities
2178 2178
2179 2179 exists as a separate function so that extension can wrap to show more
2180 2180 information like how to fix instabilities"""
2181 2181 if delta > 0:
2182 2182 return _(b'%i new %s changesets\n') % (delta, instability)
2183 2183
2184 2184
2185 2185 def nodesummaries(repo, nodes, maxnumnodes=4):
2186 2186 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2187 2187 return b' '.join(short(h) for h in nodes)
2188 2188 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2189 2189 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2190 2190
2191 2191
2192 2192 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2193 2193 """check that no named branch has multiple heads"""
2194 2194 if desc in (b'strip', b'repair'):
2195 2195 # skip the logic during strip
2196 2196 return
2197 2197 visible = repo.filtered(b'visible')
2198 2198 # possible improvement: we could restrict the check to affected branch
2199 2199 bm = visible.branchmap()
2200 2200 for name in bm:
2201 2201 heads = bm.branchheads(name, closed=accountclosed)
2202 2202 if len(heads) > 1:
2203 2203 msg = _(b'rejecting multiple heads on branch "%s"')
2204 2204 msg %= name
2205 2205 hint = _(b'%d heads: %s')
2206 2206 hint %= (len(heads), nodesummaries(repo, heads))
2207 2207 raise error.Abort(msg, hint=hint)
2208 2208
2209 2209
2210 2210 def wrapconvertsink(sink):
2211 2211 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2212 2212 before it is used, whether or not the convert extension was formally loaded.
2213 2213 """
2214 2214 return sink
2215 2215
2216 2216
2217 2217 def unhidehashlikerevs(repo, specs, hiddentype):
2218 2218 """parse the user specs and unhide changesets whose hash or revision number
2219 2219 is passed.
2220 2220
2221 2221 hiddentype can be: 1) 'warn': warn while unhiding changesets
2222 2222 2) 'nowarn': don't warn while unhiding changesets
2223 2223
2224 2224 returns a repo object with the required changesets unhidden
2225 2225 """
2226 2226 if not repo.filtername or not repo.ui.configbool(
2227 2227 b'experimental', b'directaccess'
2228 2228 ):
2229 2229 return repo
2230 2230
2231 2231 if repo.filtername not in (b'visible', b'visible-hidden'):
2232 2232 return repo
2233 2233
2234 2234 symbols = set()
2235 2235 for spec in specs:
2236 2236 try:
2237 2237 tree = revsetlang.parse(spec)
2238 2238 except error.ParseError: # will be reported by scmutil.revrange()
2239 2239 continue
2240 2240
2241 2241 symbols.update(revsetlang.gethashlikesymbols(tree))
2242 2242
2243 2243 if not symbols:
2244 2244 return repo
2245 2245
2246 2246 revs = _getrevsfromsymbols(repo, symbols)
2247 2247
2248 2248 if not revs:
2249 2249 return repo
2250 2250
2251 2251 if hiddentype == b'warn':
2252 2252 unfi = repo.unfiltered()
2253 2253 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2254 2254 repo.ui.warn(
2255 2255 _(
2256 2256 b"warning: accessing hidden changesets for write "
2257 2257 b"operation: %s\n"
2258 2258 )
2259 2259 % revstr
2260 2260 )
2261 2261
2262 2262 # we have to use new filtername to separate branch/tags cache until we can
2263 2263 # disbale these cache when revisions are dynamically pinned.
2264 2264 return repo.filtered(b'visible-hidden', revs)
2265 2265
2266 2266
2267 2267 def _getrevsfromsymbols(repo, symbols):
2268 2268 """parse the list of symbols and returns a set of revision numbers of hidden
2269 2269 changesets present in symbols"""
2270 2270 revs = set()
2271 2271 unfi = repo.unfiltered()
2272 2272 unficl = unfi.changelog
2273 2273 cl = repo.changelog
2274 2274 tiprev = len(unficl)
2275 2275 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2276 2276 for s in symbols:
2277 2277 try:
2278 2278 n = int(s)
2279 2279 if n <= tiprev:
2280 2280 if not allowrevnums:
2281 2281 continue
2282 2282 else:
2283 2283 if n not in cl:
2284 2284 revs.add(n)
2285 2285 continue
2286 2286 except ValueError:
2287 2287 pass
2288 2288
2289 2289 try:
2290 2290 s = resolvehexnodeidprefix(unfi, s)
2291 2291 except (error.LookupError, error.WdirUnsupported):
2292 2292 s = None
2293 2293
2294 2294 if s is not None:
2295 2295 rev = unficl.rev(s)
2296 2296 if rev not in cl:
2297 2297 revs.add(rev)
2298 2298
2299 2299 return revs
2300 2300
2301 2301
2302 2302 def bookmarkrevs(repo, mark):
2303 """
2304 Select revisions reachable by a given bookmark
2303 """Select revisions reachable by a given bookmark
2304
2305 If the bookmarked revision isn't a head, an empty set will be returned.
2305 2306 """
2306 2307 return repo.revs(
2307 2308 b"ancestors(bookmark(%s)) - "
2308 2309 b"ancestors(head() and not bookmark(%s)) - "
2309 2310 b"ancestors(bookmark() and not bookmark(%s))",
2310 2311 mark,
2311 2312 mark,
2312 2313 mark,
2313 2314 )
General Comments 0
You need to be logged in to leave comments. Login now