##// END OF EJS Templates
addremove: use uipathfn instead of m.rel() for recorded similatity message...
Martin von Zweigbergk -
r41811:0a5a6675 default
parent child Browse files
Show More
@@ -1,1855 +1,1859
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import posixpath
15 15 import re
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29
30 30 from . import (
31 31 encoding,
32 32 error,
33 33 match as matchmod,
34 34 obsolete,
35 35 obsutil,
36 36 pathutil,
37 37 phases,
38 38 policy,
39 39 pycompat,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 procutil,
50 50 stringutil,
51 51 )
52 52
53 53 if pycompat.iswindows:
54 54 from . import scmwindows as scmplatform
55 55 else:
56 56 from . import scmposix as scmplatform
57 57
58 58 parsers = policy.importmod(r'parsers')
59 59
60 60 termsize = scmplatform.termsize
61 61
62 62 class status(tuple):
63 63 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 64 and 'ignored' properties are only relevant to the working copy.
65 65 '''
66 66
67 67 __slots__ = ()
68 68
69 69 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 70 clean):
71 71 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 72 ignored, clean))
73 73
74 74 @property
75 75 def modified(self):
76 76 '''files that have been modified'''
77 77 return self[0]
78 78
79 79 @property
80 80 def added(self):
81 81 '''files that have been added'''
82 82 return self[1]
83 83
84 84 @property
85 85 def removed(self):
86 86 '''files that have been removed'''
87 87 return self[2]
88 88
89 89 @property
90 90 def deleted(self):
91 91 '''files that are in the dirstate, but have been deleted from the
92 92 working copy (aka "missing")
93 93 '''
94 94 return self[3]
95 95
96 96 @property
97 97 def unknown(self):
98 98 '''files not in the dirstate that are not ignored'''
99 99 return self[4]
100 100
101 101 @property
102 102 def ignored(self):
103 103 '''files not in the dirstate that are ignored (by _dirignore())'''
104 104 return self[5]
105 105
106 106 @property
107 107 def clean(self):
108 108 '''files that have not been modified'''
109 109 return self[6]
110 110
111 111 def __repr__(self, *args, **kwargs):
112 112 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 113 r'unknown=%s, ignored=%s, clean=%s>') %
114 114 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115 115
116 116 def itersubrepos(ctx1, ctx2):
117 117 """find subrepos in ctx1 or ctx2"""
118 118 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 119 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 120 # has been modified (in ctx2) but not yet committed (in ctx1).
121 121 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 122 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123 123
124 124 missing = set()
125 125
126 126 for subpath in ctx2.substate:
127 127 if subpath not in ctx1.substate:
128 128 del subpaths[subpath]
129 129 missing.add(subpath)
130 130
131 131 for subpath, ctx in sorted(subpaths.iteritems()):
132 132 yield subpath, ctx.sub(subpath)
133 133
134 134 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 135 # status and diff will have an accurate result when it does
136 136 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 137 # against itself.
138 138 for subpath in missing:
139 139 yield subpath, ctx2.nullsub(subpath, ctx1)
140 140
141 141 def nochangesfound(ui, repo, excluded=None):
142 142 '''Report no changes for push/pull, excluded is None or a list of
143 143 nodes excluded from the push/pull.
144 144 '''
145 145 secretlist = []
146 146 if excluded:
147 147 for n in excluded:
148 148 ctx = repo[n]
149 149 if ctx.phase() >= phases.secret and not ctx.extinct():
150 150 secretlist.append(n)
151 151
152 152 if secretlist:
153 153 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 154 % len(secretlist))
155 155 else:
156 156 ui.status(_("no changes found\n"))
157 157
158 158 def callcatch(ui, func):
159 159 """call func() with global exception handling
160 160
161 161 return func() if no exception happens. otherwise do some error handling
162 162 and return an exit code accordingly. does not handle all exceptions.
163 163 """
164 164 try:
165 165 try:
166 166 return func()
167 167 except: # re-raises
168 168 ui.traceback()
169 169 raise
170 170 # Global exception handling, alphabetically
171 171 # Mercurial-specific first, followed by built-in and library exceptions
172 172 except error.LockHeld as inst:
173 173 if inst.errno == errno.ETIMEDOUT:
174 174 reason = _('timed out waiting for lock held by %r') % (
175 175 pycompat.bytestr(inst.locker))
176 176 else:
177 177 reason = _('lock held by %r') % inst.locker
178 178 ui.error(_("abort: %s: %s\n") % (
179 179 inst.desc or stringutil.forcebytestr(inst.filename), reason))
180 180 if not inst.locker:
181 181 ui.error(_("(lock might be very busy)\n"))
182 182 except error.LockUnavailable as inst:
183 183 ui.error(_("abort: could not lock %s: %s\n") %
184 184 (inst.desc or stringutil.forcebytestr(inst.filename),
185 185 encoding.strtolocal(inst.strerror)))
186 186 except error.OutOfBandError as inst:
187 187 if inst.args:
188 188 msg = _("abort: remote error:\n")
189 189 else:
190 190 msg = _("abort: remote error\n")
191 191 ui.error(msg)
192 192 if inst.args:
193 193 ui.error(''.join(inst.args))
194 194 if inst.hint:
195 195 ui.error('(%s)\n' % inst.hint)
196 196 except error.RepoError as inst:
197 197 ui.error(_("abort: %s!\n") % inst)
198 198 if inst.hint:
199 199 ui.error(_("(%s)\n") % inst.hint)
200 200 except error.ResponseError as inst:
201 201 ui.error(_("abort: %s") % inst.args[0])
202 202 msg = inst.args[1]
203 203 if isinstance(msg, type(u'')):
204 204 msg = pycompat.sysbytes(msg)
205 205 if not isinstance(msg, bytes):
206 206 ui.error(" %r\n" % (msg,))
207 207 elif not msg:
208 208 ui.error(_(" empty string\n"))
209 209 else:
210 210 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
211 211 except error.CensoredNodeError as inst:
212 212 ui.error(_("abort: file censored %s!\n") % inst)
213 213 except error.StorageError as inst:
214 214 ui.error(_("abort: %s!\n") % inst)
215 215 if inst.hint:
216 216 ui.error(_("(%s)\n") % inst.hint)
217 217 except error.InterventionRequired as inst:
218 218 ui.error("%s\n" % inst)
219 219 if inst.hint:
220 220 ui.error(_("(%s)\n") % inst.hint)
221 221 return 1
222 222 except error.WdirUnsupported:
223 223 ui.error(_("abort: working directory revision cannot be specified\n"))
224 224 except error.Abort as inst:
225 225 ui.error(_("abort: %s\n") % inst)
226 226 if inst.hint:
227 227 ui.error(_("(%s)\n") % inst.hint)
228 228 except ImportError as inst:
229 229 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
230 230 m = stringutil.forcebytestr(inst).split()[-1]
231 231 if m in "mpatch bdiff".split():
232 232 ui.error(_("(did you forget to compile extensions?)\n"))
233 233 elif m in "zlib".split():
234 234 ui.error(_("(is your Python install correct?)\n"))
235 235 except (IOError, OSError) as inst:
236 236 if util.safehasattr(inst, "code"): # HTTPError
237 237 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
238 238 elif util.safehasattr(inst, "reason"): # URLError or SSLError
239 239 try: # usually it is in the form (errno, strerror)
240 240 reason = inst.reason.args[1]
241 241 except (AttributeError, IndexError):
242 242 # it might be anything, for example a string
243 243 reason = inst.reason
244 244 if isinstance(reason, pycompat.unicode):
245 245 # SSLError of Python 2.7.9 contains a unicode
246 246 reason = encoding.unitolocal(reason)
247 247 ui.error(_("abort: error: %s\n") % reason)
248 248 elif (util.safehasattr(inst, "args")
249 249 and inst.args and inst.args[0] == errno.EPIPE):
250 250 pass
251 251 elif getattr(inst, "strerror", None): # common IOError or OSError
252 252 if getattr(inst, "filename", None) is not None:
253 253 ui.error(_("abort: %s: '%s'\n") % (
254 254 encoding.strtolocal(inst.strerror),
255 255 stringutil.forcebytestr(inst.filename)))
256 256 else:
257 257 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 258 else: # suspicious IOError
259 259 raise
260 260 except MemoryError:
261 261 ui.error(_("abort: out of memory\n"))
262 262 except SystemExit as inst:
263 263 # Commands shouldn't sys.exit directly, but give a return code.
264 264 # Just in case catch this and and pass exit code to caller.
265 265 return inst.code
266 266
267 267 return -1
268 268
269 269 def checknewlabel(repo, lbl, kind):
270 270 # Do not use the "kind" parameter in ui output.
271 271 # It makes strings difficult to translate.
272 272 if lbl in ['tip', '.', 'null']:
273 273 raise error.Abort(_("the name '%s' is reserved") % lbl)
274 274 for c in (':', '\0', '\n', '\r'):
275 275 if c in lbl:
276 276 raise error.Abort(
277 277 _("%r cannot be used in a name") % pycompat.bytestr(c))
278 278 try:
279 279 int(lbl)
280 280 raise error.Abort(_("cannot use an integer as a name"))
281 281 except ValueError:
282 282 pass
283 283 if lbl.strip() != lbl:
284 284 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
285 285
286 286 def checkfilename(f):
287 287 '''Check that the filename f is an acceptable filename for a tracked file'''
288 288 if '\r' in f or '\n' in f:
289 289 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
290 290 % pycompat.bytestr(f))
291 291
292 292 def checkportable(ui, f):
293 293 '''Check if filename f is portable and warn or abort depending on config'''
294 294 checkfilename(f)
295 295 abort, warn = checkportabilityalert(ui)
296 296 if abort or warn:
297 297 msg = util.checkwinfilename(f)
298 298 if msg:
299 299 msg = "%s: %s" % (msg, procutil.shellquote(f))
300 300 if abort:
301 301 raise error.Abort(msg)
302 302 ui.warn(_("warning: %s\n") % msg)
303 303
304 304 def checkportabilityalert(ui):
305 305 '''check if the user's config requests nothing, a warning, or abort for
306 306 non-portable filenames'''
307 307 val = ui.config('ui', 'portablefilenames')
308 308 lval = val.lower()
309 309 bval = stringutil.parsebool(val)
310 310 abort = pycompat.iswindows or lval == 'abort'
311 311 warn = bval or lval == 'warn'
312 312 if bval is None and not (warn or abort or lval == 'ignore'):
313 313 raise error.ConfigError(
314 314 _("ui.portablefilenames value is invalid ('%s')") % val)
315 315 return abort, warn
316 316
317 317 class casecollisionauditor(object):
318 318 def __init__(self, ui, abort, dirstate):
319 319 self._ui = ui
320 320 self._abort = abort
321 321 allfiles = '\0'.join(dirstate._map)
322 322 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
323 323 self._dirstate = dirstate
324 324 # The purpose of _newfiles is so that we don't complain about
325 325 # case collisions if someone were to call this object with the
326 326 # same filename twice.
327 327 self._newfiles = set()
328 328
329 329 def __call__(self, f):
330 330 if f in self._newfiles:
331 331 return
332 332 fl = encoding.lower(f)
333 333 if fl in self._loweredfiles and f not in self._dirstate:
334 334 msg = _('possible case-folding collision for %s') % f
335 335 if self._abort:
336 336 raise error.Abort(msg)
337 337 self._ui.warn(_("warning: %s\n") % msg)
338 338 self._loweredfiles.add(fl)
339 339 self._newfiles.add(f)
340 340
341 341 def filteredhash(repo, maxrev):
342 342 """build hash of filtered revisions in the current repoview.
343 343
344 344 Multiple caches perform up-to-date validation by checking that the
345 345 tiprev and tipnode stored in the cache file match the current repository.
346 346 However, this is not sufficient for validating repoviews because the set
347 347 of revisions in the view may change without the repository tiprev and
348 348 tipnode changing.
349 349
350 350 This function hashes all the revs filtered from the view and returns
351 351 that SHA-1 digest.
352 352 """
353 353 cl = repo.changelog
354 354 if not cl.filteredrevs:
355 355 return None
356 356 key = None
357 357 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
358 358 if revs:
359 359 s = hashlib.sha1()
360 360 for rev in revs:
361 361 s.update('%d;' % rev)
362 362 key = s.digest()
363 363 return key
364 364
365 365 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
366 366 '''yield every hg repository under path, always recursively.
367 367 The recurse flag will only control recursion into repo working dirs'''
368 368 def errhandler(err):
369 369 if err.filename == path:
370 370 raise err
371 371 samestat = getattr(os.path, 'samestat', None)
372 372 if followsym and samestat is not None:
373 373 def adddir(dirlst, dirname):
374 374 dirstat = os.stat(dirname)
375 375 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
376 376 if not match:
377 377 dirlst.append(dirstat)
378 378 return not match
379 379 else:
380 380 followsym = False
381 381
382 382 if (seen_dirs is None) and followsym:
383 383 seen_dirs = []
384 384 adddir(seen_dirs, path)
385 385 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
386 386 dirs.sort()
387 387 if '.hg' in dirs:
388 388 yield root # found a repository
389 389 qroot = os.path.join(root, '.hg', 'patches')
390 390 if os.path.isdir(os.path.join(qroot, '.hg')):
391 391 yield qroot # we have a patch queue repo here
392 392 if recurse:
393 393 # avoid recursing inside the .hg directory
394 394 dirs.remove('.hg')
395 395 else:
396 396 dirs[:] = [] # don't descend further
397 397 elif followsym:
398 398 newdirs = []
399 399 for d in dirs:
400 400 fname = os.path.join(root, d)
401 401 if adddir(seen_dirs, fname):
402 402 if os.path.islink(fname):
403 403 for hgname in walkrepos(fname, True, seen_dirs):
404 404 yield hgname
405 405 else:
406 406 newdirs.append(d)
407 407 dirs[:] = newdirs
408 408
409 409 def binnode(ctx):
410 410 """Return binary node id for a given basectx"""
411 411 node = ctx.node()
412 412 if node is None:
413 413 return wdirid
414 414 return node
415 415
416 416 def intrev(ctx):
417 417 """Return integer for a given basectx that can be used in comparison or
418 418 arithmetic operation"""
419 419 rev = ctx.rev()
420 420 if rev is None:
421 421 return wdirrev
422 422 return rev
423 423
424 424 def formatchangeid(ctx):
425 425 """Format changectx as '{rev}:{node|formatnode}', which is the default
426 426 template provided by logcmdutil.changesettemplater"""
427 427 repo = ctx.repo()
428 428 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
429 429
430 430 def formatrevnode(ui, rev, node):
431 431 """Format given revision and node depending on the current verbosity"""
432 432 if ui.debugflag:
433 433 hexfunc = hex
434 434 else:
435 435 hexfunc = short
436 436 return '%d:%s' % (rev, hexfunc(node))
437 437
438 438 def resolvehexnodeidprefix(repo, prefix):
439 439 if (prefix.startswith('x') and
440 440 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
441 441 prefix = prefix[1:]
442 442 try:
443 443 # Uses unfiltered repo because it's faster when prefix is ambiguous/
444 444 # This matches the shortesthexnodeidprefix() function below.
445 445 node = repo.unfiltered().changelog._partialmatch(prefix)
446 446 except error.AmbiguousPrefixLookupError:
447 447 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
448 448 if revset:
449 449 # Clear config to avoid infinite recursion
450 450 configoverrides = {('experimental',
451 451 'revisions.disambiguatewithin'): None}
452 452 with repo.ui.configoverride(configoverrides):
453 453 revs = repo.anyrevs([revset], user=True)
454 454 matches = []
455 455 for rev in revs:
456 456 node = repo.changelog.node(rev)
457 457 if hex(node).startswith(prefix):
458 458 matches.append(node)
459 459 if len(matches) == 1:
460 460 return matches[0]
461 461 raise
462 462 if node is None:
463 463 return
464 464 repo.changelog.rev(node) # make sure node isn't filtered
465 465 return node
466 466
467 467 def mayberevnum(repo, prefix):
468 468 """Checks if the given prefix may be mistaken for a revision number"""
469 469 try:
470 470 i = int(prefix)
471 471 # if we are a pure int, then starting with zero will not be
472 472 # confused as a rev; or, obviously, if the int is larger
473 473 # than the value of the tip rev. We still need to disambiguate if
474 474 # prefix == '0', since that *is* a valid revnum.
475 475 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
476 476 return False
477 477 return True
478 478 except ValueError:
479 479 return False
480 480
481 481 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
482 482 """Find the shortest unambiguous prefix that matches hexnode.
483 483
484 484 If "cache" is not None, it must be a dictionary that can be used for
485 485 caching between calls to this method.
486 486 """
487 487 # _partialmatch() of filtered changelog could take O(len(repo)) time,
488 488 # which would be unacceptably slow. so we look for hash collision in
489 489 # unfiltered space, which means some hashes may be slightly longer.
490 490
491 491 minlength=max(minlength, 1)
492 492
493 493 def disambiguate(prefix):
494 494 """Disambiguate against revnums."""
495 495 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
496 496 if mayberevnum(repo, prefix):
497 497 return 'x' + prefix
498 498 else:
499 499 return prefix
500 500
501 501 hexnode = hex(node)
502 502 for length in range(len(prefix), len(hexnode) + 1):
503 503 prefix = hexnode[:length]
504 504 if not mayberevnum(repo, prefix):
505 505 return prefix
506 506
507 507 cl = repo.unfiltered().changelog
508 508 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
509 509 if revset:
510 510 revs = None
511 511 if cache is not None:
512 512 revs = cache.get('disambiguationrevset')
513 513 if revs is None:
514 514 revs = repo.anyrevs([revset], user=True)
515 515 if cache is not None:
516 516 cache['disambiguationrevset'] = revs
517 517 if cl.rev(node) in revs:
518 518 hexnode = hex(node)
519 519 nodetree = None
520 520 if cache is not None:
521 521 nodetree = cache.get('disambiguationnodetree')
522 522 if not nodetree:
523 523 try:
524 524 nodetree = parsers.nodetree(cl.index, len(revs))
525 525 except AttributeError:
526 526 # no native nodetree
527 527 pass
528 528 else:
529 529 for r in revs:
530 530 nodetree.insert(r)
531 531 if cache is not None:
532 532 cache['disambiguationnodetree'] = nodetree
533 533 if nodetree is not None:
534 534 length = max(nodetree.shortest(node), minlength)
535 535 prefix = hexnode[:length]
536 536 return disambiguate(prefix)
537 537 for length in range(minlength, len(hexnode) + 1):
538 538 matches = []
539 539 prefix = hexnode[:length]
540 540 for rev in revs:
541 541 otherhexnode = repo[rev].hex()
542 542 if prefix == otherhexnode[:length]:
543 543 matches.append(otherhexnode)
544 544 if len(matches) == 1:
545 545 return disambiguate(prefix)
546 546
547 547 try:
548 548 return disambiguate(cl.shortest(node, minlength))
549 549 except error.LookupError:
550 550 raise error.RepoLookupError()
551 551
552 552 def isrevsymbol(repo, symbol):
553 553 """Checks if a symbol exists in the repo.
554 554
555 555 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
556 556 symbol is an ambiguous nodeid prefix.
557 557 """
558 558 try:
559 559 revsymbol(repo, symbol)
560 560 return True
561 561 except error.RepoLookupError:
562 562 return False
563 563
564 564 def revsymbol(repo, symbol):
565 565 """Returns a context given a single revision symbol (as string).
566 566
567 567 This is similar to revsingle(), but accepts only a single revision symbol,
568 568 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
569 569 not "max(public())".
570 570 """
571 571 if not isinstance(symbol, bytes):
572 572 msg = ("symbol (%s of type %s) was not a string, did you mean "
573 573 "repo[symbol]?" % (symbol, type(symbol)))
574 574 raise error.ProgrammingError(msg)
575 575 try:
576 576 if symbol in ('.', 'tip', 'null'):
577 577 return repo[symbol]
578 578
579 579 try:
580 580 r = int(symbol)
581 581 if '%d' % r != symbol:
582 582 raise ValueError
583 583 l = len(repo.changelog)
584 584 if r < 0:
585 585 r += l
586 586 if r < 0 or r >= l and r != wdirrev:
587 587 raise ValueError
588 588 return repo[r]
589 589 except error.FilteredIndexError:
590 590 raise
591 591 except (ValueError, OverflowError, IndexError):
592 592 pass
593 593
594 594 if len(symbol) == 40:
595 595 try:
596 596 node = bin(symbol)
597 597 rev = repo.changelog.rev(node)
598 598 return repo[rev]
599 599 except error.FilteredLookupError:
600 600 raise
601 601 except (TypeError, LookupError):
602 602 pass
603 603
604 604 # look up bookmarks through the name interface
605 605 try:
606 606 node = repo.names.singlenode(repo, symbol)
607 607 rev = repo.changelog.rev(node)
608 608 return repo[rev]
609 609 except KeyError:
610 610 pass
611 611
612 612 node = resolvehexnodeidprefix(repo, symbol)
613 613 if node is not None:
614 614 rev = repo.changelog.rev(node)
615 615 return repo[rev]
616 616
617 617 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
618 618
619 619 except error.WdirUnsupported:
620 620 return repo[None]
621 621 except (error.FilteredIndexError, error.FilteredLookupError,
622 622 error.FilteredRepoLookupError):
623 623 raise _filterederror(repo, symbol)
624 624
625 625 def _filterederror(repo, changeid):
626 626 """build an exception to be raised about a filtered changeid
627 627
628 628 This is extracted in a function to help extensions (eg: evolve) to
629 629 experiment with various message variants."""
630 630 if repo.filtername.startswith('visible'):
631 631
632 632 # Check if the changeset is obsolete
633 633 unfilteredrepo = repo.unfiltered()
634 634 ctx = revsymbol(unfilteredrepo, changeid)
635 635
636 636 # If the changeset is obsolete, enrich the message with the reason
637 637 # that made this changeset not visible
638 638 if ctx.obsolete():
639 639 msg = obsutil._getfilteredreason(repo, changeid, ctx)
640 640 else:
641 641 msg = _("hidden revision '%s'") % changeid
642 642
643 643 hint = _('use --hidden to access hidden revisions')
644 644
645 645 return error.FilteredRepoLookupError(msg, hint=hint)
646 646 msg = _("filtered revision '%s' (not in '%s' subset)")
647 647 msg %= (changeid, repo.filtername)
648 648 return error.FilteredRepoLookupError(msg)
649 649
650 650 def revsingle(repo, revspec, default='.', localalias=None):
651 651 if not revspec and revspec != 0:
652 652 return repo[default]
653 653
654 654 l = revrange(repo, [revspec], localalias=localalias)
655 655 if not l:
656 656 raise error.Abort(_('empty revision set'))
657 657 return repo[l.last()]
658 658
659 659 def _pairspec(revspec):
660 660 tree = revsetlang.parse(revspec)
661 661 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
662 662
663 663 def revpair(repo, revs):
664 664 if not revs:
665 665 return repo['.'], repo[None]
666 666
667 667 l = revrange(repo, revs)
668 668
669 669 if not l:
670 670 raise error.Abort(_('empty revision range'))
671 671
672 672 first = l.first()
673 673 second = l.last()
674 674
675 675 if (first == second and len(revs) >= 2
676 676 and not all(revrange(repo, [r]) for r in revs)):
677 677 raise error.Abort(_('empty revision on one side of range'))
678 678
679 679 # if top-level is range expression, the result must always be a pair
680 680 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
681 681 return repo[first], repo[None]
682 682
683 683 return repo[first], repo[second]
684 684
685 685 def revrange(repo, specs, localalias=None):
686 686 """Execute 1 to many revsets and return the union.
687 687
688 688 This is the preferred mechanism for executing revsets using user-specified
689 689 config options, such as revset aliases.
690 690
691 691 The revsets specified by ``specs`` will be executed via a chained ``OR``
692 692 expression. If ``specs`` is empty, an empty result is returned.
693 693
694 694 ``specs`` can contain integers, in which case they are assumed to be
695 695 revision numbers.
696 696
697 697 It is assumed the revsets are already formatted. If you have arguments
698 698 that need to be expanded in the revset, call ``revsetlang.formatspec()``
699 699 and pass the result as an element of ``specs``.
700 700
701 701 Specifying a single revset is allowed.
702 702
703 703 Returns a ``revset.abstractsmartset`` which is a list-like interface over
704 704 integer revisions.
705 705 """
706 706 allspecs = []
707 707 for spec in specs:
708 708 if isinstance(spec, int):
709 709 spec = revsetlang.formatspec('%d', spec)
710 710 allspecs.append(spec)
711 711 return repo.anyrevs(allspecs, user=True, localalias=localalias)
712 712
713 713 def meaningfulparents(repo, ctx):
714 714 """Return list of meaningful (or all if debug) parentrevs for rev.
715 715
716 716 For merges (two non-nullrev revisions) both parents are meaningful.
717 717 Otherwise the first parent revision is considered meaningful if it
718 718 is not the preceding revision.
719 719 """
720 720 parents = ctx.parents()
721 721 if len(parents) > 1:
722 722 return parents
723 723 if repo.ui.debugflag:
724 724 return [parents[0], repo[nullrev]]
725 725 if parents[0].rev() >= intrev(ctx) - 1:
726 726 return []
727 727 return parents
728 728
729 729 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
730 730 """Return a function that produced paths for presenting to the user.
731 731
732 732 The returned function takes a repo-relative path and produces a path
733 733 that can be presented in the UI.
734 734
735 735 Depending on the value of ui.relative-paths, either a repo-relative or
736 736 cwd-relative path will be produced.
737 737
738 738 legacyrelativevalue is the value to use if ui.relative-paths=legacy
739 739
740 740 If forcerelativevalue is not None, then that value will be used regardless
741 741 of what ui.relative-paths is set to.
742 742 """
743 743 if forcerelativevalue is not None:
744 744 relative = forcerelativevalue
745 745 else:
746 746 config = repo.ui.config('ui', 'relative-paths')
747 747 if config == 'legacy':
748 748 relative = legacyrelativevalue
749 749 else:
750 750 relative = stringutil.parsebool(config)
751 751 if relative is None:
752 752 raise error.ConfigError(
753 753 _("ui.relative-paths is not a boolean ('%s')") % config)
754 754
755 755 if relative:
756 756 cwd = repo.getcwd()
757 757 pathto = repo.pathto
758 758 return lambda f: pathto(f, cwd)
759 759 else:
760 760 return lambda f: f
761 761
762 762 def subdiruipathfn(subpath, uipathfn):
763 763 '''Create a new uipathfn that treats the file as relative to subpath.'''
764 764 return lambda f: uipathfn(posixpath.join(subpath, f))
765 765
766 766 def anypats(pats, opts):
767 767 '''Checks if any patterns, including --include and --exclude were given.
768 768
769 769 Some commands (e.g. addremove) use this condition for deciding whether to
770 770 print absolute or relative paths.
771 771 '''
772 772 return bool(pats or opts.get('include') or opts.get('exclude'))
773 773
774 774 def expandpats(pats):
775 775 '''Expand bare globs when running on windows.
776 776 On posix we assume it already has already been done by sh.'''
777 777 if not util.expandglobs:
778 778 return list(pats)
779 779 ret = []
780 780 for kindpat in pats:
781 781 kind, pat = matchmod._patsplit(kindpat, None)
782 782 if kind is None:
783 783 try:
784 784 globbed = glob.glob(pat)
785 785 except re.error:
786 786 globbed = [pat]
787 787 if globbed:
788 788 ret.extend(globbed)
789 789 continue
790 790 ret.append(kindpat)
791 791 return ret
792 792
793 793 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
794 794 badfn=None):
795 795 '''Return a matcher and the patterns that were used.
796 796 The matcher will warn about bad matches, unless an alternate badfn callback
797 797 is provided.'''
798 798 if pats == ("",):
799 799 pats = []
800 800 if opts is None:
801 801 opts = {}
802 802 if not globbed and default == 'relpath':
803 803 pats = expandpats(pats or [])
804 804
805 805 def bad(f, msg):
806 806 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
807 807
808 808 if badfn is None:
809 809 badfn = bad
810 810
811 811 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
812 812 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
813 813
814 814 if m.always():
815 815 pats = []
816 816 return m, pats
817 817
818 818 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
819 819 badfn=None):
820 820 '''Return a matcher that will warn about bad matches.'''
821 821 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
822 822
823 823 def matchall(repo):
824 824 '''Return a matcher that will efficiently match everything.'''
825 825 return matchmod.always(repo.root, repo.getcwd())
826 826
827 827 def matchfiles(repo, files, badfn=None):
828 828 '''Return a matcher that will efficiently match exactly these files.'''
829 829 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
830 830
831 831 def parsefollowlinespattern(repo, rev, pat, msg):
832 832 """Return a file name from `pat` pattern suitable for usage in followlines
833 833 logic.
834 834 """
835 835 if not matchmod.patkind(pat):
836 836 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
837 837 else:
838 838 ctx = repo[rev]
839 839 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
840 840 files = [f for f in ctx if m(f)]
841 841 if len(files) != 1:
842 842 raise error.ParseError(msg)
843 843 return files[0]
844 844
845 845 def getorigvfs(ui, repo):
846 846 """return a vfs suitable to save 'orig' file
847 847
848 848 return None if no special directory is configured"""
849 849 origbackuppath = ui.config('ui', 'origbackuppath')
850 850 if not origbackuppath:
851 851 return None
852 852 return vfs.vfs(repo.wvfs.join(origbackuppath))
853 853
854 854 def backuppath(ui, repo, filepath):
855 855 '''customize where working copy backup files (.orig files) are created
856 856
857 857 Fetch user defined path from config file: [ui] origbackuppath = <path>
858 858 Fall back to default (filepath with .orig suffix) if not specified
859 859
860 860 filepath is repo-relative
861 861
862 862 Returns an absolute path
863 863 '''
864 864 origvfs = getorigvfs(ui, repo)
865 865 if origvfs is None:
866 866 return repo.wjoin(filepath + ".orig")
867 867
868 868 origbackupdir = origvfs.dirname(filepath)
869 869 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
870 870 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
871 871
872 872 # Remove any files that conflict with the backup file's path
873 873 for f in reversed(list(util.finddirs(filepath))):
874 874 if origvfs.isfileorlink(f):
875 875 ui.note(_('removing conflicting file: %s\n')
876 876 % origvfs.join(f))
877 877 origvfs.unlink(f)
878 878 break
879 879
880 880 origvfs.makedirs(origbackupdir)
881 881
882 882 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
883 883 ui.note(_('removing conflicting directory: %s\n')
884 884 % origvfs.join(filepath))
885 885 origvfs.rmtree(filepath, forcibly=True)
886 886
887 887 return origvfs.join(filepath)
888 888
889 889 class _containsnode(object):
890 890 """proxy __contains__(node) to container.__contains__ which accepts revs"""
891 891
892 892 def __init__(self, repo, revcontainer):
893 893 self._torev = repo.changelog.rev
894 894 self._revcontains = revcontainer.__contains__
895 895
896 896 def __contains__(self, node):
897 897 return self._revcontains(self._torev(node))
898 898
899 899 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
900 900 fixphase=False, targetphase=None, backup=True):
901 901 """do common cleanups when old nodes are replaced by new nodes
902 902
903 903 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
904 904 (we might also want to move working directory parent in the future)
905 905
906 906 By default, bookmark moves are calculated automatically from 'replacements',
907 907 but 'moves' can be used to override that. Also, 'moves' may include
908 908 additional bookmark moves that should not have associated obsmarkers.
909 909
910 910 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
911 911 have replacements. operation is a string, like "rebase".
912 912
913 913 metadata is dictionary containing metadata to be stored in obsmarker if
914 914 obsolescence is enabled.
915 915 """
916 916 assert fixphase or targetphase is None
917 917 if not replacements and not moves:
918 918 return
919 919
920 920 # translate mapping's other forms
921 921 if not util.safehasattr(replacements, 'items'):
922 922 replacements = {(n,): () for n in replacements}
923 923 else:
924 924 # upgrading non tuple "source" to tuple ones for BC
925 925 repls = {}
926 926 for key, value in replacements.items():
927 927 if not isinstance(key, tuple):
928 928 key = (key,)
929 929 repls[key] = value
930 930 replacements = repls
931 931
932 932 # Unfiltered repo is needed since nodes in replacements might be hidden.
933 933 unfi = repo.unfiltered()
934 934
935 935 # Calculate bookmark movements
936 936 if moves is None:
937 937 moves = {}
938 938 for oldnodes, newnodes in replacements.items():
939 939 for oldnode in oldnodes:
940 940 if oldnode in moves:
941 941 continue
942 942 if len(newnodes) > 1:
943 943 # usually a split, take the one with biggest rev number
944 944 newnode = next(unfi.set('max(%ln)', newnodes)).node()
945 945 elif len(newnodes) == 0:
946 946 # move bookmark backwards
947 947 allreplaced = []
948 948 for rep in replacements:
949 949 allreplaced.extend(rep)
950 950 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
951 951 allreplaced))
952 952 if roots:
953 953 newnode = roots[0].node()
954 954 else:
955 955 newnode = nullid
956 956 else:
957 957 newnode = newnodes[0]
958 958 moves[oldnode] = newnode
959 959
960 960 allnewnodes = [n for ns in replacements.values() for n in ns]
961 961 toretract = {}
962 962 toadvance = {}
963 963 if fixphase:
964 964 precursors = {}
965 965 for oldnodes, newnodes in replacements.items():
966 966 for oldnode in oldnodes:
967 967 for newnode in newnodes:
968 968 precursors.setdefault(newnode, []).append(oldnode)
969 969
970 970 allnewnodes.sort(key=lambda n: unfi[n].rev())
971 971 newphases = {}
972 972 def phase(ctx):
973 973 return newphases.get(ctx.node(), ctx.phase())
974 974 for newnode in allnewnodes:
975 975 ctx = unfi[newnode]
976 976 parentphase = max(phase(p) for p in ctx.parents())
977 977 if targetphase is None:
978 978 oldphase = max(unfi[oldnode].phase()
979 979 for oldnode in precursors[newnode])
980 980 newphase = max(oldphase, parentphase)
981 981 else:
982 982 newphase = max(targetphase, parentphase)
983 983 newphases[newnode] = newphase
984 984 if newphase > ctx.phase():
985 985 toretract.setdefault(newphase, []).append(newnode)
986 986 elif newphase < ctx.phase():
987 987 toadvance.setdefault(newphase, []).append(newnode)
988 988
989 989 with repo.transaction('cleanup') as tr:
990 990 # Move bookmarks
991 991 bmarks = repo._bookmarks
992 992 bmarkchanges = []
993 993 for oldnode, newnode in moves.items():
994 994 oldbmarks = repo.nodebookmarks(oldnode)
995 995 if not oldbmarks:
996 996 continue
997 997 from . import bookmarks # avoid import cycle
998 998 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
999 999 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1000 1000 hex(oldnode), hex(newnode)))
1001 1001 # Delete divergent bookmarks being parents of related newnodes
1002 1002 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
1003 1003 allnewnodes, newnode, oldnode)
1004 1004 deletenodes = _containsnode(repo, deleterevs)
1005 1005 for name in oldbmarks:
1006 1006 bmarkchanges.append((name, newnode))
1007 1007 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1008 1008 bmarkchanges.append((b, None))
1009 1009
1010 1010 if bmarkchanges:
1011 1011 bmarks.applychanges(repo, tr, bmarkchanges)
1012 1012
1013 1013 for phase, nodes in toretract.items():
1014 1014 phases.retractboundary(repo, tr, phase, nodes)
1015 1015 for phase, nodes in toadvance.items():
1016 1016 phases.advanceboundary(repo, tr, phase, nodes)
1017 1017
1018 1018 # Obsolete or strip nodes
1019 1019 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1020 1020 # If a node is already obsoleted, and we want to obsolete it
1021 1021 # without a successor, skip that obssolete request since it's
1022 1022 # unnecessary. That's the "if s or not isobs(n)" check below.
1023 1023 # Also sort the node in topology order, that might be useful for
1024 1024 # some obsstore logic.
1025 1025 # NOTE: the sorting might belong to createmarkers.
1026 1026 torev = unfi.changelog.rev
1027 1027 sortfunc = lambda ns: torev(ns[0][0])
1028 1028 rels = []
1029 1029 for ns, s in sorted(replacements.items(), key=sortfunc):
1030 1030 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1031 1031 rels.append(rel)
1032 1032 if rels:
1033 1033 obsolete.createmarkers(repo, rels, operation=operation,
1034 1034 metadata=metadata)
1035 1035 else:
1036 1036 from . import repair # avoid import cycle
1037 1037 tostrip = list(n for ns in replacements for n in ns)
1038 1038 if tostrip:
1039 1039 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1040 1040 backup=backup)
1041 1041
1042 1042 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1043 1043 if opts is None:
1044 1044 opts = {}
1045 1045 m = matcher
1046 1046 dry_run = opts.get('dry_run')
1047 1047 try:
1048 1048 similarity = float(opts.get('similarity') or 0)
1049 1049 except ValueError:
1050 1050 raise error.Abort(_('similarity must be a number'))
1051 1051 if similarity < 0 or similarity > 100:
1052 1052 raise error.Abort(_('similarity must be between 0 and 100'))
1053 1053 similarity /= 100.0
1054 1054
1055 1055 ret = 0
1056 1056
1057 1057 wctx = repo[None]
1058 1058 for subpath in sorted(wctx.substate):
1059 1059 submatch = matchmod.subdirmatcher(subpath, m)
1060 1060 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1061 1061 sub = wctx.sub(subpath)
1062 1062 subprefix = repo.wvfs.reljoin(prefix, subpath)
1063 1063 subuipathfn = subdiruipathfn(subpath, uipathfn)
1064 1064 try:
1065 1065 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1066 1066 ret = 1
1067 1067 except error.LookupError:
1068 1068 repo.ui.status(_("skipping missing subrepository: %s\n")
1069 1069 % uipathfn(subpath))
1070 1070
1071 1071 rejected = []
1072 1072 def badfn(f, msg):
1073 1073 if f in m.files():
1074 1074 m.bad(f, msg)
1075 1075 rejected.append(f)
1076 1076
1077 1077 badmatch = matchmod.badmatch(m, badfn)
1078 1078 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1079 1079 badmatch)
1080 1080
1081 1081 unknownset = set(unknown + forgotten)
1082 1082 toprint = unknownset.copy()
1083 1083 toprint.update(deleted)
1084 1084 for abs in sorted(toprint):
1085 1085 if repo.ui.verbose or not m.exact(abs):
1086 1086 if abs in unknownset:
1087 1087 status = _('adding %s\n') % uipathfn(abs)
1088 1088 label = 'ui.addremove.added'
1089 1089 else:
1090 1090 status = _('removing %s\n') % uipathfn(abs)
1091 1091 label = 'ui.addremove.removed'
1092 1092 repo.ui.status(status, label=label)
1093 1093
1094 1094 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1095 similarity)
1095 similarity, uipathfn)
1096 1096
1097 1097 if not dry_run:
1098 1098 _markchanges(repo, unknown + forgotten, deleted, renames)
1099 1099
1100 1100 for f in rejected:
1101 1101 if f in m.files():
1102 1102 return 1
1103 1103 return ret
1104 1104
1105 1105 def marktouched(repo, files, similarity=0.0):
1106 1106 '''Assert that files have somehow been operated upon. files are relative to
1107 1107 the repo root.'''
1108 1108 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1109 1109 rejected = []
1110 1110
1111 1111 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1112 1112
1113 1113 if repo.ui.verbose:
1114 1114 unknownset = set(unknown + forgotten)
1115 1115 toprint = unknownset.copy()
1116 1116 toprint.update(deleted)
1117 1117 for abs in sorted(toprint):
1118 1118 if abs in unknownset:
1119 1119 status = _('adding %s\n') % abs
1120 1120 else:
1121 1121 status = _('removing %s\n') % abs
1122 1122 repo.ui.status(status)
1123 1123
1124 # TODO: We should probably have the caller pass in uipathfn and apply it to
1125 # the messages above too. forcerelativevalue=True is consistent with how
1126 # it used to work.
1127 uipathfn = getuipathfn(repo, forcerelativevalue=True)
1124 1128 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1125 similarity)
1129 similarity, uipathfn)
1126 1130
1127 1131 _markchanges(repo, unknown + forgotten, deleted, renames)
1128 1132
1129 1133 for f in rejected:
1130 1134 if f in m.files():
1131 1135 return 1
1132 1136 return 0
1133 1137
1134 1138 def _interestingfiles(repo, matcher):
1135 1139 '''Walk dirstate with matcher, looking for files that addremove would care
1136 1140 about.
1137 1141
1138 1142 This is different from dirstate.status because it doesn't care about
1139 1143 whether files are modified or clean.'''
1140 1144 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1141 1145 audit_path = pathutil.pathauditor(repo.root, cached=True)
1142 1146
1143 1147 ctx = repo[None]
1144 1148 dirstate = repo.dirstate
1145 1149 matcher = repo.narrowmatch(matcher, includeexact=True)
1146 1150 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1147 1151 unknown=True, ignored=False, full=False)
1148 1152 for abs, st in walkresults.iteritems():
1149 1153 dstate = dirstate[abs]
1150 1154 if dstate == '?' and audit_path.check(abs):
1151 1155 unknown.append(abs)
1152 1156 elif dstate != 'r' and not st:
1153 1157 deleted.append(abs)
1154 1158 elif dstate == 'r' and st:
1155 1159 forgotten.append(abs)
1156 1160 # for finding renames
1157 1161 elif dstate == 'r' and not st:
1158 1162 removed.append(abs)
1159 1163 elif dstate == 'a':
1160 1164 added.append(abs)
1161 1165
1162 1166 return added, unknown, deleted, removed, forgotten
1163 1167
1164 def _findrenames(repo, matcher, added, removed, similarity):
1168 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1165 1169 '''Find renames from removed files to added ones.'''
1166 1170 renames = {}
1167 1171 if similarity > 0:
1168 1172 for old, new, score in similar.findrenames(repo, added, removed,
1169 1173 similarity):
1170 1174 if (repo.ui.verbose or not matcher.exact(old)
1171 1175 or not matcher.exact(new)):
1172 1176 repo.ui.status(_('recording removal of %s as rename to %s '
1173 1177 '(%d%% similar)\n') %
1174 (matcher.rel(old), matcher.rel(new),
1178 (uipathfn(old), uipathfn(new),
1175 1179 score * 100))
1176 1180 renames[new] = old
1177 1181 return renames
1178 1182
1179 1183 def _markchanges(repo, unknown, deleted, renames):
1180 1184 '''Marks the files in unknown as added, the files in deleted as removed,
1181 1185 and the files in renames as copied.'''
1182 1186 wctx = repo[None]
1183 1187 with repo.wlock():
1184 1188 wctx.forget(deleted)
1185 1189 wctx.add(unknown)
1186 1190 for new, old in renames.iteritems():
1187 1191 wctx.copy(old, new)
1188 1192
1189 1193 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1190 1194 """Update the dirstate to reflect the intent of copying src to dst. For
1191 1195 different reasons it might not end with dst being marked as copied from src.
1192 1196 """
1193 1197 origsrc = repo.dirstate.copied(src) or src
1194 1198 if dst == origsrc: # copying back a copy?
1195 1199 if repo.dirstate[dst] not in 'mn' and not dryrun:
1196 1200 repo.dirstate.normallookup(dst)
1197 1201 else:
1198 1202 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1199 1203 if not ui.quiet:
1200 1204 ui.warn(_("%s has not been committed yet, so no copy "
1201 1205 "data will be stored for %s.\n")
1202 1206 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1203 1207 if repo.dirstate[dst] in '?r' and not dryrun:
1204 1208 wctx.add([dst])
1205 1209 elif not dryrun:
1206 1210 wctx.copy(origsrc, dst)
1207 1211
1208 1212 def writerequires(opener, requirements):
1209 1213 with opener('requires', 'w', atomictemp=True) as fp:
1210 1214 for r in sorted(requirements):
1211 1215 fp.write("%s\n" % r)
1212 1216
1213 1217 class filecachesubentry(object):
1214 1218 def __init__(self, path, stat):
1215 1219 self.path = path
1216 1220 self.cachestat = None
1217 1221 self._cacheable = None
1218 1222
1219 1223 if stat:
1220 1224 self.cachestat = filecachesubentry.stat(self.path)
1221 1225
1222 1226 if self.cachestat:
1223 1227 self._cacheable = self.cachestat.cacheable()
1224 1228 else:
1225 1229 # None means we don't know yet
1226 1230 self._cacheable = None
1227 1231
1228 1232 def refresh(self):
1229 1233 if self.cacheable():
1230 1234 self.cachestat = filecachesubentry.stat(self.path)
1231 1235
1232 1236 def cacheable(self):
1233 1237 if self._cacheable is not None:
1234 1238 return self._cacheable
1235 1239
1236 1240 # we don't know yet, assume it is for now
1237 1241 return True
1238 1242
1239 1243 def changed(self):
1240 1244 # no point in going further if we can't cache it
1241 1245 if not self.cacheable():
1242 1246 return True
1243 1247
1244 1248 newstat = filecachesubentry.stat(self.path)
1245 1249
1246 1250 # we may not know if it's cacheable yet, check again now
1247 1251 if newstat and self._cacheable is None:
1248 1252 self._cacheable = newstat.cacheable()
1249 1253
1250 1254 # check again
1251 1255 if not self._cacheable:
1252 1256 return True
1253 1257
1254 1258 if self.cachestat != newstat:
1255 1259 self.cachestat = newstat
1256 1260 return True
1257 1261 else:
1258 1262 return False
1259 1263
1260 1264 @staticmethod
1261 1265 def stat(path):
1262 1266 try:
1263 1267 return util.cachestat(path)
1264 1268 except OSError as e:
1265 1269 if e.errno != errno.ENOENT:
1266 1270 raise
1267 1271
1268 1272 class filecacheentry(object):
1269 1273 def __init__(self, paths, stat=True):
1270 1274 self._entries = []
1271 1275 for path in paths:
1272 1276 self._entries.append(filecachesubentry(path, stat))
1273 1277
1274 1278 def changed(self):
1275 1279 '''true if any entry has changed'''
1276 1280 for entry in self._entries:
1277 1281 if entry.changed():
1278 1282 return True
1279 1283 return False
1280 1284
1281 1285 def refresh(self):
1282 1286 for entry in self._entries:
1283 1287 entry.refresh()
1284 1288
1285 1289 class filecache(object):
1286 1290 """A property like decorator that tracks files under .hg/ for updates.
1287 1291
1288 1292 On first access, the files defined as arguments are stat()ed and the
1289 1293 results cached. The decorated function is called. The results are stashed
1290 1294 away in a ``_filecache`` dict on the object whose method is decorated.
1291 1295
1292 1296 On subsequent access, the cached result is used as it is set to the
1293 1297 instance dictionary.
1294 1298
1295 1299 On external property set/delete operations, the caller must update the
1296 1300 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1297 1301 instead of directly setting <attr>.
1298 1302
1299 1303 When using the property API, the cached data is always used if available.
1300 1304 No stat() is performed to check if the file has changed.
1301 1305
1302 1306 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1303 1307 can populate an entry before the property's getter is called. In this case,
1304 1308 entries in ``_filecache`` will be used during property operations,
1305 1309 if available. If the underlying file changes, it is up to external callers
1306 1310 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1307 1311 method result as well as possibly calling ``del obj._filecache[attr]`` to
1308 1312 remove the ``filecacheentry``.
1309 1313 """
1310 1314
1311 1315 def __init__(self, *paths):
1312 1316 self.paths = paths
1313 1317
1314 1318 def join(self, obj, fname):
1315 1319 """Used to compute the runtime path of a cached file.
1316 1320
1317 1321 Users should subclass filecache and provide their own version of this
1318 1322 function to call the appropriate join function on 'obj' (an instance
1319 1323 of the class that its member function was decorated).
1320 1324 """
1321 1325 raise NotImplementedError
1322 1326
1323 1327 def __call__(self, func):
1324 1328 self.func = func
1325 1329 self.sname = func.__name__
1326 1330 self.name = pycompat.sysbytes(self.sname)
1327 1331 return self
1328 1332
1329 1333 def __get__(self, obj, type=None):
1330 1334 # if accessed on the class, return the descriptor itself.
1331 1335 if obj is None:
1332 1336 return self
1333 1337
1334 1338 assert self.sname not in obj.__dict__
1335 1339
1336 1340 entry = obj._filecache.get(self.name)
1337 1341
1338 1342 if entry:
1339 1343 if entry.changed():
1340 1344 entry.obj = self.func(obj)
1341 1345 else:
1342 1346 paths = [self.join(obj, path) for path in self.paths]
1343 1347
1344 1348 # We stat -before- creating the object so our cache doesn't lie if
1345 1349 # a writer modified between the time we read and stat
1346 1350 entry = filecacheentry(paths, True)
1347 1351 entry.obj = self.func(obj)
1348 1352
1349 1353 obj._filecache[self.name] = entry
1350 1354
1351 1355 obj.__dict__[self.sname] = entry.obj
1352 1356 return entry.obj
1353 1357
1354 1358 # don't implement __set__(), which would make __dict__ lookup as slow as
1355 1359 # function call.
1356 1360
1357 1361 def set(self, obj, value):
1358 1362 if self.name not in obj._filecache:
1359 1363 # we add an entry for the missing value because X in __dict__
1360 1364 # implies X in _filecache
1361 1365 paths = [self.join(obj, path) for path in self.paths]
1362 1366 ce = filecacheentry(paths, False)
1363 1367 obj._filecache[self.name] = ce
1364 1368 else:
1365 1369 ce = obj._filecache[self.name]
1366 1370
1367 1371 ce.obj = value # update cached copy
1368 1372 obj.__dict__[self.sname] = value # update copy returned by obj.x
1369 1373
1370 1374 def extdatasource(repo, source):
1371 1375 """Gather a map of rev -> value dict from the specified source
1372 1376
1373 1377 A source spec is treated as a URL, with a special case shell: type
1374 1378 for parsing the output from a shell command.
1375 1379
1376 1380 The data is parsed as a series of newline-separated records where
1377 1381 each record is a revision specifier optionally followed by a space
1378 1382 and a freeform string value. If the revision is known locally, it
1379 1383 is converted to a rev, otherwise the record is skipped.
1380 1384
1381 1385 Note that both key and value are treated as UTF-8 and converted to
1382 1386 the local encoding. This allows uniformity between local and
1383 1387 remote data sources.
1384 1388 """
1385 1389
1386 1390 spec = repo.ui.config("extdata", source)
1387 1391 if not spec:
1388 1392 raise error.Abort(_("unknown extdata source '%s'") % source)
1389 1393
1390 1394 data = {}
1391 1395 src = proc = None
1392 1396 try:
1393 1397 if spec.startswith("shell:"):
1394 1398 # external commands should be run relative to the repo root
1395 1399 cmd = spec[6:]
1396 1400 proc = subprocess.Popen(procutil.tonativestr(cmd),
1397 1401 shell=True, bufsize=-1,
1398 1402 close_fds=procutil.closefds,
1399 1403 stdout=subprocess.PIPE,
1400 1404 cwd=procutil.tonativestr(repo.root))
1401 1405 src = proc.stdout
1402 1406 else:
1403 1407 # treat as a URL or file
1404 1408 src = url.open(repo.ui, spec)
1405 1409 for l in src:
1406 1410 if " " in l:
1407 1411 k, v = l.strip().split(" ", 1)
1408 1412 else:
1409 1413 k, v = l.strip(), ""
1410 1414
1411 1415 k = encoding.tolocal(k)
1412 1416 try:
1413 1417 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1414 1418 except (error.LookupError, error.RepoLookupError):
1415 1419 pass # we ignore data for nodes that don't exist locally
1416 1420 finally:
1417 1421 if proc:
1418 1422 proc.communicate()
1419 1423 if src:
1420 1424 src.close()
1421 1425 if proc and proc.returncode != 0:
1422 1426 raise error.Abort(_("extdata command '%s' failed: %s")
1423 1427 % (cmd, procutil.explainexit(proc.returncode)))
1424 1428
1425 1429 return data
1426 1430
1427 1431 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1428 1432 if lock is None:
1429 1433 raise error.LockInheritanceContractViolation(
1430 1434 'lock can only be inherited while held')
1431 1435 if environ is None:
1432 1436 environ = {}
1433 1437 with lock.inherit() as locker:
1434 1438 environ[envvar] = locker
1435 1439 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1436 1440
1437 1441 def wlocksub(repo, cmd, *args, **kwargs):
1438 1442 """run cmd as a subprocess that allows inheriting repo's wlock
1439 1443
1440 1444 This can only be called while the wlock is held. This takes all the
1441 1445 arguments that ui.system does, and returns the exit code of the
1442 1446 subprocess."""
1443 1447 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1444 1448 **kwargs)
1445 1449
1446 1450 class progress(object):
1447 1451 def __init__(self, ui, updatebar, topic, unit="", total=None):
1448 1452 self.ui = ui
1449 1453 self.pos = 0
1450 1454 self.topic = topic
1451 1455 self.unit = unit
1452 1456 self.total = total
1453 1457 self.debug = ui.configbool('progress', 'debug')
1454 1458 self._updatebar = updatebar
1455 1459
1456 1460 def __enter__(self):
1457 1461 return self
1458 1462
1459 1463 def __exit__(self, exc_type, exc_value, exc_tb):
1460 1464 self.complete()
1461 1465
1462 1466 def update(self, pos, item="", total=None):
1463 1467 assert pos is not None
1464 1468 if total:
1465 1469 self.total = total
1466 1470 self.pos = pos
1467 1471 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1468 1472 if self.debug:
1469 1473 self._printdebug(item)
1470 1474
1471 1475 def increment(self, step=1, item="", total=None):
1472 1476 self.update(self.pos + step, item, total)
1473 1477
1474 1478 def complete(self):
1475 1479 self.pos = None
1476 1480 self.unit = ""
1477 1481 self.total = None
1478 1482 self._updatebar(self.topic, self.pos, "", self.unit, self.total)
1479 1483
1480 1484 def _printdebug(self, item):
1481 1485 if self.unit:
1482 1486 unit = ' ' + self.unit
1483 1487 if item:
1484 1488 item = ' ' + item
1485 1489
1486 1490 if self.total:
1487 1491 pct = 100.0 * self.pos / self.total
1488 1492 self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n'
1489 1493 % (self.topic, item, self.pos, self.total, unit, pct))
1490 1494 else:
1491 1495 self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1492 1496
1493 1497 def gdinitconfig(ui):
1494 1498 """helper function to know if a repo should be created as general delta
1495 1499 """
1496 1500 # experimental config: format.generaldelta
1497 1501 return (ui.configbool('format', 'generaldelta')
1498 1502 or ui.configbool('format', 'usegeneraldelta'))
1499 1503
1500 1504 def gddeltaconfig(ui):
1501 1505 """helper function to know if incoming delta should be optimised
1502 1506 """
1503 1507 # experimental config: format.generaldelta
1504 1508 return ui.configbool('format', 'generaldelta')
1505 1509
1506 1510 class simplekeyvaluefile(object):
1507 1511 """A simple file with key=value lines
1508 1512
1509 1513 Keys must be alphanumerics and start with a letter, values must not
1510 1514 contain '\n' characters"""
1511 1515 firstlinekey = '__firstline'
1512 1516
1513 1517 def __init__(self, vfs, path, keys=None):
1514 1518 self.vfs = vfs
1515 1519 self.path = path
1516 1520
1517 1521 def read(self, firstlinenonkeyval=False):
1518 1522 """Read the contents of a simple key-value file
1519 1523
1520 1524 'firstlinenonkeyval' indicates whether the first line of file should
1521 1525 be treated as a key-value pair or reuturned fully under the
1522 1526 __firstline key."""
1523 1527 lines = self.vfs.readlines(self.path)
1524 1528 d = {}
1525 1529 if firstlinenonkeyval:
1526 1530 if not lines:
1527 1531 e = _("empty simplekeyvalue file")
1528 1532 raise error.CorruptedState(e)
1529 1533 # we don't want to include '\n' in the __firstline
1530 1534 d[self.firstlinekey] = lines[0][:-1]
1531 1535 del lines[0]
1532 1536
1533 1537 try:
1534 1538 # the 'if line.strip()' part prevents us from failing on empty
1535 1539 # lines which only contain '\n' therefore are not skipped
1536 1540 # by 'if line'
1537 1541 updatedict = dict(line[:-1].split('=', 1) for line in lines
1538 1542 if line.strip())
1539 1543 if self.firstlinekey in updatedict:
1540 1544 e = _("%r can't be used as a key")
1541 1545 raise error.CorruptedState(e % self.firstlinekey)
1542 1546 d.update(updatedict)
1543 1547 except ValueError as e:
1544 1548 raise error.CorruptedState(str(e))
1545 1549 return d
1546 1550
1547 1551 def write(self, data, firstline=None):
1548 1552 """Write key=>value mapping to a file
1549 1553 data is a dict. Keys must be alphanumerical and start with a letter.
1550 1554 Values must not contain newline characters.
1551 1555
1552 1556 If 'firstline' is not None, it is written to file before
1553 1557 everything else, as it is, not in a key=value form"""
1554 1558 lines = []
1555 1559 if firstline is not None:
1556 1560 lines.append('%s\n' % firstline)
1557 1561
1558 1562 for k, v in data.items():
1559 1563 if k == self.firstlinekey:
1560 1564 e = "key name '%s' is reserved" % self.firstlinekey
1561 1565 raise error.ProgrammingError(e)
1562 1566 if not k[0:1].isalpha():
1563 1567 e = "keys must start with a letter in a key-value file"
1564 1568 raise error.ProgrammingError(e)
1565 1569 if not k.isalnum():
1566 1570 e = "invalid key name in a simple key-value file"
1567 1571 raise error.ProgrammingError(e)
1568 1572 if '\n' in v:
1569 1573 e = "invalid value in a simple key-value file"
1570 1574 raise error.ProgrammingError(e)
1571 1575 lines.append("%s=%s\n" % (k, v))
1572 1576 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1573 1577 fp.write(''.join(lines))
1574 1578
1575 1579 _reportobsoletedsource = [
1576 1580 'debugobsolete',
1577 1581 'pull',
1578 1582 'push',
1579 1583 'serve',
1580 1584 'unbundle',
1581 1585 ]
1582 1586
1583 1587 _reportnewcssource = [
1584 1588 'pull',
1585 1589 'unbundle',
1586 1590 ]
1587 1591
1588 1592 def prefetchfiles(repo, revs, match):
1589 1593 """Invokes the registered file prefetch functions, allowing extensions to
1590 1594 ensure the corresponding files are available locally, before the command
1591 1595 uses them."""
1592 1596 if match:
1593 1597 # The command itself will complain about files that don't exist, so
1594 1598 # don't duplicate the message.
1595 1599 match = matchmod.badmatch(match, lambda fn, msg: None)
1596 1600 else:
1597 1601 match = matchall(repo)
1598 1602
1599 1603 fileprefetchhooks(repo, revs, match)
1600 1604
1601 1605 # a list of (repo, revs, match) prefetch functions
1602 1606 fileprefetchhooks = util.hooks()
1603 1607
1604 1608 # A marker that tells the evolve extension to suppress its own reporting
1605 1609 _reportstroubledchangesets = True
1606 1610
1607 1611 def registersummarycallback(repo, otr, txnname=''):
1608 1612 """register a callback to issue a summary after the transaction is closed
1609 1613 """
1610 1614 def txmatch(sources):
1611 1615 return any(txnname.startswith(source) for source in sources)
1612 1616
1613 1617 categories = []
1614 1618
1615 1619 def reportsummary(func):
1616 1620 """decorator for report callbacks."""
1617 1621 # The repoview life cycle is shorter than the one of the actual
1618 1622 # underlying repository. So the filtered object can die before the
1619 1623 # weakref is used leading to troubles. We keep a reference to the
1620 1624 # unfiltered object and restore the filtering when retrieving the
1621 1625 # repository through the weakref.
1622 1626 filtername = repo.filtername
1623 1627 reporef = weakref.ref(repo.unfiltered())
1624 1628 def wrapped(tr):
1625 1629 repo = reporef()
1626 1630 if filtername:
1627 1631 repo = repo.filtered(filtername)
1628 1632 func(repo, tr)
1629 1633 newcat = '%02i-txnreport' % len(categories)
1630 1634 otr.addpostclose(newcat, wrapped)
1631 1635 categories.append(newcat)
1632 1636 return wrapped
1633 1637
1634 1638 if txmatch(_reportobsoletedsource):
1635 1639 @reportsummary
1636 1640 def reportobsoleted(repo, tr):
1637 1641 obsoleted = obsutil.getobsoleted(repo, tr)
1638 1642 if obsoleted:
1639 1643 repo.ui.status(_('obsoleted %i changesets\n')
1640 1644 % len(obsoleted))
1641 1645
1642 1646 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1643 1647 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1644 1648 instabilitytypes = [
1645 1649 ('orphan', 'orphan'),
1646 1650 ('phase-divergent', 'phasedivergent'),
1647 1651 ('content-divergent', 'contentdivergent'),
1648 1652 ]
1649 1653
1650 1654 def getinstabilitycounts(repo):
1651 1655 filtered = repo.changelog.filteredrevs
1652 1656 counts = {}
1653 1657 for instability, revset in instabilitytypes:
1654 1658 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1655 1659 filtered)
1656 1660 return counts
1657 1661
1658 1662 oldinstabilitycounts = getinstabilitycounts(repo)
1659 1663 @reportsummary
1660 1664 def reportnewinstabilities(repo, tr):
1661 1665 newinstabilitycounts = getinstabilitycounts(repo)
1662 1666 for instability, revset in instabilitytypes:
1663 1667 delta = (newinstabilitycounts[instability] -
1664 1668 oldinstabilitycounts[instability])
1665 1669 msg = getinstabilitymessage(delta, instability)
1666 1670 if msg:
1667 1671 repo.ui.warn(msg)
1668 1672
1669 1673 if txmatch(_reportnewcssource):
1670 1674 @reportsummary
1671 1675 def reportnewcs(repo, tr):
1672 1676 """Report the range of new revisions pulled/unbundled."""
1673 1677 origrepolen = tr.changes.get('origrepolen', len(repo))
1674 1678 unfi = repo.unfiltered()
1675 1679 if origrepolen >= len(unfi):
1676 1680 return
1677 1681
1678 1682 # Compute the bounds of new visible revisions' range.
1679 1683 revs = smartset.spanset(repo, start=origrepolen)
1680 1684 if revs:
1681 1685 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1682 1686
1683 1687 if minrev == maxrev:
1684 1688 revrange = minrev
1685 1689 else:
1686 1690 revrange = '%s:%s' % (minrev, maxrev)
1687 1691 draft = len(repo.revs('%ld and draft()', revs))
1688 1692 secret = len(repo.revs('%ld and secret()', revs))
1689 1693 if not (draft or secret):
1690 1694 msg = _('new changesets %s\n') % revrange
1691 1695 elif draft and secret:
1692 1696 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1693 1697 msg %= (revrange, draft, secret)
1694 1698 elif draft:
1695 1699 msg = _('new changesets %s (%d drafts)\n')
1696 1700 msg %= (revrange, draft)
1697 1701 elif secret:
1698 1702 msg = _('new changesets %s (%d secrets)\n')
1699 1703 msg %= (revrange, secret)
1700 1704 else:
1701 1705 errormsg = 'entered unreachable condition'
1702 1706 raise error.ProgrammingError(errormsg)
1703 1707 repo.ui.status(msg)
1704 1708
1705 1709 # search new changesets directly pulled as obsolete
1706 1710 duplicates = tr.changes.get('revduplicates', ())
1707 1711 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1708 1712 origrepolen, duplicates)
1709 1713 cl = repo.changelog
1710 1714 extinctadded = [r for r in obsadded if r not in cl]
1711 1715 if extinctadded:
1712 1716 # They are not just obsolete, but obsolete and invisible
1713 1717 # we call them "extinct" internally but the terms have not been
1714 1718 # exposed to users.
1715 1719 msg = '(%d other changesets obsolete on arrival)\n'
1716 1720 repo.ui.status(msg % len(extinctadded))
1717 1721
1718 1722 @reportsummary
1719 1723 def reportphasechanges(repo, tr):
1720 1724 """Report statistics of phase changes for changesets pre-existing
1721 1725 pull/unbundle.
1722 1726 """
1723 1727 origrepolen = tr.changes.get('origrepolen', len(repo))
1724 1728 phasetracking = tr.changes.get('phases', {})
1725 1729 if not phasetracking:
1726 1730 return
1727 1731 published = [
1728 1732 rev for rev, (old, new) in phasetracking.iteritems()
1729 1733 if new == phases.public and rev < origrepolen
1730 1734 ]
1731 1735 if not published:
1732 1736 return
1733 1737 repo.ui.status(_('%d local changesets published\n')
1734 1738 % len(published))
1735 1739
1736 1740 def getinstabilitymessage(delta, instability):
1737 1741 """function to return the message to show warning about new instabilities
1738 1742
1739 1743 exists as a separate function so that extension can wrap to show more
1740 1744 information like how to fix instabilities"""
1741 1745 if delta > 0:
1742 1746 return _('%i new %s changesets\n') % (delta, instability)
1743 1747
1744 1748 def nodesummaries(repo, nodes, maxnumnodes=4):
1745 1749 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1746 1750 return ' '.join(short(h) for h in nodes)
1747 1751 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1748 1752 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1749 1753
1750 1754 def enforcesinglehead(repo, tr, desc):
1751 1755 """check that no named branch has multiple heads"""
1752 1756 if desc in ('strip', 'repair'):
1753 1757 # skip the logic during strip
1754 1758 return
1755 1759 visible = repo.filtered('visible')
1756 1760 # possible improvement: we could restrict the check to affected branch
1757 1761 for name, heads in visible.branchmap().iteritems():
1758 1762 if len(heads) > 1:
1759 1763 msg = _('rejecting multiple heads on branch "%s"')
1760 1764 msg %= name
1761 1765 hint = _('%d heads: %s')
1762 1766 hint %= (len(heads), nodesummaries(repo, heads))
1763 1767 raise error.Abort(msg, hint=hint)
1764 1768
1765 1769 def wrapconvertsink(sink):
1766 1770 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1767 1771 before it is used, whether or not the convert extension was formally loaded.
1768 1772 """
1769 1773 return sink
1770 1774
1771 1775 def unhidehashlikerevs(repo, specs, hiddentype):
1772 1776 """parse the user specs and unhide changesets whose hash or revision number
1773 1777 is passed.
1774 1778
1775 1779 hiddentype can be: 1) 'warn': warn while unhiding changesets
1776 1780 2) 'nowarn': don't warn while unhiding changesets
1777 1781
1778 1782 returns a repo object with the required changesets unhidden
1779 1783 """
1780 1784 if not repo.filtername or not repo.ui.configbool('experimental',
1781 1785 'directaccess'):
1782 1786 return repo
1783 1787
1784 1788 if repo.filtername not in ('visible', 'visible-hidden'):
1785 1789 return repo
1786 1790
1787 1791 symbols = set()
1788 1792 for spec in specs:
1789 1793 try:
1790 1794 tree = revsetlang.parse(spec)
1791 1795 except error.ParseError: # will be reported by scmutil.revrange()
1792 1796 continue
1793 1797
1794 1798 symbols.update(revsetlang.gethashlikesymbols(tree))
1795 1799
1796 1800 if not symbols:
1797 1801 return repo
1798 1802
1799 1803 revs = _getrevsfromsymbols(repo, symbols)
1800 1804
1801 1805 if not revs:
1802 1806 return repo
1803 1807
1804 1808 if hiddentype == 'warn':
1805 1809 unfi = repo.unfiltered()
1806 1810 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1807 1811 repo.ui.warn(_("warning: accessing hidden changesets for write "
1808 1812 "operation: %s\n") % revstr)
1809 1813
1810 1814 # we have to use new filtername to separate branch/tags cache until we can
1811 1815 # disbale these cache when revisions are dynamically pinned.
1812 1816 return repo.filtered('visible-hidden', revs)
1813 1817
1814 1818 def _getrevsfromsymbols(repo, symbols):
1815 1819 """parse the list of symbols and returns a set of revision numbers of hidden
1816 1820 changesets present in symbols"""
1817 1821 revs = set()
1818 1822 unfi = repo.unfiltered()
1819 1823 unficl = unfi.changelog
1820 1824 cl = repo.changelog
1821 1825 tiprev = len(unficl)
1822 1826 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1823 1827 for s in symbols:
1824 1828 try:
1825 1829 n = int(s)
1826 1830 if n <= tiprev:
1827 1831 if not allowrevnums:
1828 1832 continue
1829 1833 else:
1830 1834 if n not in cl:
1831 1835 revs.add(n)
1832 1836 continue
1833 1837 except ValueError:
1834 1838 pass
1835 1839
1836 1840 try:
1837 1841 s = resolvehexnodeidprefix(unfi, s)
1838 1842 except (error.LookupError, error.WdirUnsupported):
1839 1843 s = None
1840 1844
1841 1845 if s is not None:
1842 1846 rev = unficl.rev(s)
1843 1847 if rev not in cl:
1844 1848 revs.add(rev)
1845 1849
1846 1850 return revs
1847 1851
1848 1852 def bookmarkrevs(repo, mark):
1849 1853 """
1850 1854 Select revisions reachable by a given bookmark
1851 1855 """
1852 1856 return repo.revs("ancestors(bookmark(%s)) - "
1853 1857 "ancestors(head() and not bookmark(%s)) - "
1854 1858 "ancestors(bookmark() and not bookmark(%s))",
1855 1859 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now