##// END OF EJS Templates
extdata: avoid crashing inside subprocess when we get a revset parse error...
Augie Fackler -
r42776:ea6558db default
parent child Browse files
Show More
@@ -1,1981 +1,1986 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import posixpath
15 15 import re
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29
30 30 from . import (
31 31 copies as copiesmod,
32 32 encoding,
33 33 error,
34 34 match as matchmod,
35 35 obsolete,
36 36 obsutil,
37 37 pathutil,
38 38 phases,
39 39 policy,
40 40 pycompat,
41 41 revsetlang,
42 42 similar,
43 43 smartset,
44 44 url,
45 45 util,
46 46 vfs,
47 47 )
48 48
49 49 from .utils import (
50 50 procutil,
51 51 stringutil,
52 52 )
53 53
54 54 if pycompat.iswindows:
55 55 from . import scmwindows as scmplatform
56 56 else:
57 57 from . import scmposix as scmplatform
58 58
59 59 parsers = policy.importmod(r'parsers')
60 60
61 61 termsize = scmplatform.termsize
62 62
63 63 class status(tuple):
64 64 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
65 65 and 'ignored' properties are only relevant to the working copy.
66 66 '''
67 67
68 68 __slots__ = ()
69 69
70 70 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
71 71 clean):
72 72 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
73 73 ignored, clean))
74 74
75 75 @property
76 76 def modified(self):
77 77 '''files that have been modified'''
78 78 return self[0]
79 79
80 80 @property
81 81 def added(self):
82 82 '''files that have been added'''
83 83 return self[1]
84 84
85 85 @property
86 86 def removed(self):
87 87 '''files that have been removed'''
88 88 return self[2]
89 89
90 90 @property
91 91 def deleted(self):
92 92 '''files that are in the dirstate, but have been deleted from the
93 93 working copy (aka "missing")
94 94 '''
95 95 return self[3]
96 96
97 97 @property
98 98 def unknown(self):
99 99 '''files not in the dirstate that are not ignored'''
100 100 return self[4]
101 101
102 102 @property
103 103 def ignored(self):
104 104 '''files not in the dirstate that are ignored (by _dirignore())'''
105 105 return self[5]
106 106
107 107 @property
108 108 def clean(self):
109 109 '''files that have not been modified'''
110 110 return self[6]
111 111
112 112 def __repr__(self, *args, **kwargs):
113 113 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
114 114 r'unknown=%s, ignored=%s, clean=%s>') %
115 115 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
116 116
117 117 def itersubrepos(ctx1, ctx2):
118 118 """find subrepos in ctx1 or ctx2"""
119 119 # Create a (subpath, ctx) mapping where we prefer subpaths from
120 120 # ctx1. The subpaths from ctx2 are important when the .hgsub file
121 121 # has been modified (in ctx2) but not yet committed (in ctx1).
122 122 subpaths = dict.fromkeys(ctx2.substate, ctx2)
123 123 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
124 124
125 125 missing = set()
126 126
127 127 for subpath in ctx2.substate:
128 128 if subpath not in ctx1.substate:
129 129 del subpaths[subpath]
130 130 missing.add(subpath)
131 131
132 132 for subpath, ctx in sorted(subpaths.iteritems()):
133 133 yield subpath, ctx.sub(subpath)
134 134
135 135 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
136 136 # status and diff will have an accurate result when it does
137 137 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
138 138 # against itself.
139 139 for subpath in missing:
140 140 yield subpath, ctx2.nullsub(subpath, ctx1)
141 141
142 142 def nochangesfound(ui, repo, excluded=None):
143 143 '''Report no changes for push/pull, excluded is None or a list of
144 144 nodes excluded from the push/pull.
145 145 '''
146 146 secretlist = []
147 147 if excluded:
148 148 for n in excluded:
149 149 ctx = repo[n]
150 150 if ctx.phase() >= phases.secret and not ctx.extinct():
151 151 secretlist.append(n)
152 152
153 153 if secretlist:
154 154 ui.status(_("no changes found (ignored %d secret changesets)\n")
155 155 % len(secretlist))
156 156 else:
157 157 ui.status(_("no changes found\n"))
158 158
159 159 def callcatch(ui, func):
160 160 """call func() with global exception handling
161 161
162 162 return func() if no exception happens. otherwise do some error handling
163 163 and return an exit code accordingly. does not handle all exceptions.
164 164 """
165 165 try:
166 166 try:
167 167 return func()
168 168 except: # re-raises
169 169 ui.traceback()
170 170 raise
171 171 # Global exception handling, alphabetically
172 172 # Mercurial-specific first, followed by built-in and library exceptions
173 173 except error.LockHeld as inst:
174 174 if inst.errno == errno.ETIMEDOUT:
175 175 reason = _('timed out waiting for lock held by %r') % (
176 176 pycompat.bytestr(inst.locker))
177 177 else:
178 178 reason = _('lock held by %r') % inst.locker
179 179 ui.error(_("abort: %s: %s\n") % (
180 180 inst.desc or stringutil.forcebytestr(inst.filename), reason))
181 181 if not inst.locker:
182 182 ui.error(_("(lock might be very busy)\n"))
183 183 except error.LockUnavailable as inst:
184 184 ui.error(_("abort: could not lock %s: %s\n") %
185 185 (inst.desc or stringutil.forcebytestr(inst.filename),
186 186 encoding.strtolocal(inst.strerror)))
187 187 except error.OutOfBandError as inst:
188 188 if inst.args:
189 189 msg = _("abort: remote error:\n")
190 190 else:
191 191 msg = _("abort: remote error\n")
192 192 ui.error(msg)
193 193 if inst.args:
194 194 ui.error(''.join(inst.args))
195 195 if inst.hint:
196 196 ui.error('(%s)\n' % inst.hint)
197 197 except error.RepoError as inst:
198 198 ui.error(_("abort: %s!\n") % inst)
199 199 if inst.hint:
200 200 ui.error(_("(%s)\n") % inst.hint)
201 201 except error.ResponseError as inst:
202 202 ui.error(_("abort: %s") % inst.args[0])
203 203 msg = inst.args[1]
204 204 if isinstance(msg, type(u'')):
205 205 msg = pycompat.sysbytes(msg)
206 206 if not isinstance(msg, bytes):
207 207 ui.error(" %r\n" % (msg,))
208 208 elif not msg:
209 209 ui.error(_(" empty string\n"))
210 210 else:
211 211 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
212 212 except error.CensoredNodeError as inst:
213 213 ui.error(_("abort: file censored %s!\n") % inst)
214 214 except error.StorageError as inst:
215 215 ui.error(_("abort: %s!\n") % inst)
216 216 if inst.hint:
217 217 ui.error(_("(%s)\n") % inst.hint)
218 218 except error.InterventionRequired as inst:
219 219 ui.error("%s\n" % inst)
220 220 if inst.hint:
221 221 ui.error(_("(%s)\n") % inst.hint)
222 222 return 1
223 223 except error.WdirUnsupported:
224 224 ui.error(_("abort: working directory revision cannot be specified\n"))
225 225 except error.Abort as inst:
226 226 ui.error(_("abort: %s\n") % inst)
227 227 if inst.hint:
228 228 ui.error(_("(%s)\n") % inst.hint)
229 229 except ImportError as inst:
230 230 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
231 231 m = stringutil.forcebytestr(inst).split()[-1]
232 232 if m in "mpatch bdiff".split():
233 233 ui.error(_("(did you forget to compile extensions?)\n"))
234 234 elif m in "zlib".split():
235 235 ui.error(_("(is your Python install correct?)\n"))
236 236 except (IOError, OSError) as inst:
237 237 if util.safehasattr(inst, "code"): # HTTPError
238 238 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
239 239 elif util.safehasattr(inst, "reason"): # URLError or SSLError
240 240 try: # usually it is in the form (errno, strerror)
241 241 reason = inst.reason.args[1]
242 242 except (AttributeError, IndexError):
243 243 # it might be anything, for example a string
244 244 reason = inst.reason
245 245 if isinstance(reason, pycompat.unicode):
246 246 # SSLError of Python 2.7.9 contains a unicode
247 247 reason = encoding.unitolocal(reason)
248 248 ui.error(_("abort: error: %s\n") % reason)
249 249 elif (util.safehasattr(inst, "args")
250 250 and inst.args and inst.args[0] == errno.EPIPE):
251 251 pass
252 252 elif getattr(inst, "strerror", None): # common IOError or OSError
253 253 if getattr(inst, "filename", None) is not None:
254 254 ui.error(_("abort: %s: '%s'\n") % (
255 255 encoding.strtolocal(inst.strerror),
256 256 stringutil.forcebytestr(inst.filename)))
257 257 else:
258 258 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
259 259 else: # suspicious IOError
260 260 raise
261 261 except MemoryError:
262 262 ui.error(_("abort: out of memory\n"))
263 263 except SystemExit as inst:
264 264 # Commands shouldn't sys.exit directly, but give a return code.
265 265 # Just in case catch this and and pass exit code to caller.
266 266 return inst.code
267 267
268 268 return -1
269 269
270 270 def checknewlabel(repo, lbl, kind):
271 271 # Do not use the "kind" parameter in ui output.
272 272 # It makes strings difficult to translate.
273 273 if lbl in ['tip', '.', 'null']:
274 274 raise error.Abort(_("the name '%s' is reserved") % lbl)
275 275 for c in (':', '\0', '\n', '\r'):
276 276 if c in lbl:
277 277 raise error.Abort(
278 278 _("%r cannot be used in a name") % pycompat.bytestr(c))
279 279 try:
280 280 int(lbl)
281 281 raise error.Abort(_("cannot use an integer as a name"))
282 282 except ValueError:
283 283 pass
284 284 if lbl.strip() != lbl:
285 285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
286 286
287 287 def checkfilename(f):
288 288 '''Check that the filename f is an acceptable filename for a tracked file'''
289 289 if '\r' in f or '\n' in f:
290 290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
291 291 % pycompat.bytestr(f))
292 292
293 293 def checkportable(ui, f):
294 294 '''Check if filename f is portable and warn or abort depending on config'''
295 295 checkfilename(f)
296 296 abort, warn = checkportabilityalert(ui)
297 297 if abort or warn:
298 298 msg = util.checkwinfilename(f)
299 299 if msg:
300 300 msg = "%s: %s" % (msg, procutil.shellquote(f))
301 301 if abort:
302 302 raise error.Abort(msg)
303 303 ui.warn(_("warning: %s\n") % msg)
304 304
305 305 def checkportabilityalert(ui):
306 306 '''check if the user's config requests nothing, a warning, or abort for
307 307 non-portable filenames'''
308 308 val = ui.config('ui', 'portablefilenames')
309 309 lval = val.lower()
310 310 bval = stringutil.parsebool(val)
311 311 abort = pycompat.iswindows or lval == 'abort'
312 312 warn = bval or lval == 'warn'
313 313 if bval is None and not (warn or abort or lval == 'ignore'):
314 314 raise error.ConfigError(
315 315 _("ui.portablefilenames value is invalid ('%s')") % val)
316 316 return abort, warn
317 317
318 318 class casecollisionauditor(object):
319 319 def __init__(self, ui, abort, dirstate):
320 320 self._ui = ui
321 321 self._abort = abort
322 322 allfiles = '\0'.join(dirstate._map)
323 323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
324 324 self._dirstate = dirstate
325 325 # The purpose of _newfiles is so that we don't complain about
326 326 # case collisions if someone were to call this object with the
327 327 # same filename twice.
328 328 self._newfiles = set()
329 329
330 330 def __call__(self, f):
331 331 if f in self._newfiles:
332 332 return
333 333 fl = encoding.lower(f)
334 334 if fl in self._loweredfiles and f not in self._dirstate:
335 335 msg = _('possible case-folding collision for %s') % f
336 336 if self._abort:
337 337 raise error.Abort(msg)
338 338 self._ui.warn(_("warning: %s\n") % msg)
339 339 self._loweredfiles.add(fl)
340 340 self._newfiles.add(f)
341 341
342 342 def filteredhash(repo, maxrev):
343 343 """build hash of filtered revisions in the current repoview.
344 344
345 345 Multiple caches perform up-to-date validation by checking that the
346 346 tiprev and tipnode stored in the cache file match the current repository.
347 347 However, this is not sufficient for validating repoviews because the set
348 348 of revisions in the view may change without the repository tiprev and
349 349 tipnode changing.
350 350
351 351 This function hashes all the revs filtered from the view and returns
352 352 that SHA-1 digest.
353 353 """
354 354 cl = repo.changelog
355 355 if not cl.filteredrevs:
356 356 return None
357 357 key = None
358 358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
359 359 if revs:
360 360 s = hashlib.sha1()
361 361 for rev in revs:
362 362 s.update('%d;' % rev)
363 363 key = s.digest()
364 364 return key
365 365
366 366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
367 367 '''yield every hg repository under path, always recursively.
368 368 The recurse flag will only control recursion into repo working dirs'''
369 369 def errhandler(err):
370 370 if err.filename == path:
371 371 raise err
372 372 samestat = getattr(os.path, 'samestat', None)
373 373 if followsym and samestat is not None:
374 374 def adddir(dirlst, dirname):
375 375 dirstat = os.stat(dirname)
376 376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
377 377 if not match:
378 378 dirlst.append(dirstat)
379 379 return not match
380 380 else:
381 381 followsym = False
382 382
383 383 if (seen_dirs is None) and followsym:
384 384 seen_dirs = []
385 385 adddir(seen_dirs, path)
386 386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
387 387 dirs.sort()
388 388 if '.hg' in dirs:
389 389 yield root # found a repository
390 390 qroot = os.path.join(root, '.hg', 'patches')
391 391 if os.path.isdir(os.path.join(qroot, '.hg')):
392 392 yield qroot # we have a patch queue repo here
393 393 if recurse:
394 394 # avoid recursing inside the .hg directory
395 395 dirs.remove('.hg')
396 396 else:
397 397 dirs[:] = [] # don't descend further
398 398 elif followsym:
399 399 newdirs = []
400 400 for d in dirs:
401 401 fname = os.path.join(root, d)
402 402 if adddir(seen_dirs, fname):
403 403 if os.path.islink(fname):
404 404 for hgname in walkrepos(fname, True, seen_dirs):
405 405 yield hgname
406 406 else:
407 407 newdirs.append(d)
408 408 dirs[:] = newdirs
409 409
410 410 def binnode(ctx):
411 411 """Return binary node id for a given basectx"""
412 412 node = ctx.node()
413 413 if node is None:
414 414 return wdirid
415 415 return node
416 416
417 417 def intrev(ctx):
418 418 """Return integer for a given basectx that can be used in comparison or
419 419 arithmetic operation"""
420 420 rev = ctx.rev()
421 421 if rev is None:
422 422 return wdirrev
423 423 return rev
424 424
425 425 def formatchangeid(ctx):
426 426 """Format changectx as '{rev}:{node|formatnode}', which is the default
427 427 template provided by logcmdutil.changesettemplater"""
428 428 repo = ctx.repo()
429 429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
430 430
431 431 def formatrevnode(ui, rev, node):
432 432 """Format given revision and node depending on the current verbosity"""
433 433 if ui.debugflag:
434 434 hexfunc = hex
435 435 else:
436 436 hexfunc = short
437 437 return '%d:%s' % (rev, hexfunc(node))
438 438
439 439 def resolvehexnodeidprefix(repo, prefix):
440 440 if (prefix.startswith('x') and
441 441 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
442 442 prefix = prefix[1:]
443 443 try:
444 444 # Uses unfiltered repo because it's faster when prefix is ambiguous/
445 445 # This matches the shortesthexnodeidprefix() function below.
446 446 node = repo.unfiltered().changelog._partialmatch(prefix)
447 447 except error.AmbiguousPrefixLookupError:
448 448 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
449 449 if revset:
450 450 # Clear config to avoid infinite recursion
451 451 configoverrides = {('experimental',
452 452 'revisions.disambiguatewithin'): None}
453 453 with repo.ui.configoverride(configoverrides):
454 454 revs = repo.anyrevs([revset], user=True)
455 455 matches = []
456 456 for rev in revs:
457 457 node = repo.changelog.node(rev)
458 458 if hex(node).startswith(prefix):
459 459 matches.append(node)
460 460 if len(matches) == 1:
461 461 return matches[0]
462 462 raise
463 463 if node is None:
464 464 return
465 465 repo.changelog.rev(node) # make sure node isn't filtered
466 466 return node
467 467
468 468 def mayberevnum(repo, prefix):
469 469 """Checks if the given prefix may be mistaken for a revision number"""
470 470 try:
471 471 i = int(prefix)
472 472 # if we are a pure int, then starting with zero will not be
473 473 # confused as a rev; or, obviously, if the int is larger
474 474 # than the value of the tip rev. We still need to disambiguate if
475 475 # prefix == '0', since that *is* a valid revnum.
476 476 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
477 477 return False
478 478 return True
479 479 except ValueError:
480 480 return False
481 481
482 482 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
483 483 """Find the shortest unambiguous prefix that matches hexnode.
484 484
485 485 If "cache" is not None, it must be a dictionary that can be used for
486 486 caching between calls to this method.
487 487 """
488 488 # _partialmatch() of filtered changelog could take O(len(repo)) time,
489 489 # which would be unacceptably slow. so we look for hash collision in
490 490 # unfiltered space, which means some hashes may be slightly longer.
491 491
492 492 minlength=max(minlength, 1)
493 493
494 494 def disambiguate(prefix):
495 495 """Disambiguate against revnums."""
496 496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
497 497 if mayberevnum(repo, prefix):
498 498 return 'x' + prefix
499 499 else:
500 500 return prefix
501 501
502 502 hexnode = hex(node)
503 503 for length in range(len(prefix), len(hexnode) + 1):
504 504 prefix = hexnode[:length]
505 505 if not mayberevnum(repo, prefix):
506 506 return prefix
507 507
508 508 cl = repo.unfiltered().changelog
509 509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
510 510 if revset:
511 511 revs = None
512 512 if cache is not None:
513 513 revs = cache.get('disambiguationrevset')
514 514 if revs is None:
515 515 revs = repo.anyrevs([revset], user=True)
516 516 if cache is not None:
517 517 cache['disambiguationrevset'] = revs
518 518 if cl.rev(node) in revs:
519 519 hexnode = hex(node)
520 520 nodetree = None
521 521 if cache is not None:
522 522 nodetree = cache.get('disambiguationnodetree')
523 523 if not nodetree:
524 524 try:
525 525 nodetree = parsers.nodetree(cl.index, len(revs))
526 526 except AttributeError:
527 527 # no native nodetree
528 528 pass
529 529 else:
530 530 for r in revs:
531 531 nodetree.insert(r)
532 532 if cache is not None:
533 533 cache['disambiguationnodetree'] = nodetree
534 534 if nodetree is not None:
535 535 length = max(nodetree.shortest(node), minlength)
536 536 prefix = hexnode[:length]
537 537 return disambiguate(prefix)
538 538 for length in range(minlength, len(hexnode) + 1):
539 539 matches = []
540 540 prefix = hexnode[:length]
541 541 for rev in revs:
542 542 otherhexnode = repo[rev].hex()
543 543 if prefix == otherhexnode[:length]:
544 544 matches.append(otherhexnode)
545 545 if len(matches) == 1:
546 546 return disambiguate(prefix)
547 547
548 548 try:
549 549 return disambiguate(cl.shortest(node, minlength))
550 550 except error.LookupError:
551 551 raise error.RepoLookupError()
552 552
553 553 def isrevsymbol(repo, symbol):
554 554 """Checks if a symbol exists in the repo.
555 555
556 556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
557 557 symbol is an ambiguous nodeid prefix.
558 558 """
559 559 try:
560 560 revsymbol(repo, symbol)
561 561 return True
562 562 except error.RepoLookupError:
563 563 return False
564 564
565 565 def revsymbol(repo, symbol):
566 566 """Returns a context given a single revision symbol (as string).
567 567
568 568 This is similar to revsingle(), but accepts only a single revision symbol,
569 569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
570 570 not "max(public())".
571 571 """
572 572 if not isinstance(symbol, bytes):
573 573 msg = ("symbol (%s of type %s) was not a string, did you mean "
574 574 "repo[symbol]?" % (symbol, type(symbol)))
575 575 raise error.ProgrammingError(msg)
576 576 try:
577 577 if symbol in ('.', 'tip', 'null'):
578 578 return repo[symbol]
579 579
580 580 try:
581 581 r = int(symbol)
582 582 if '%d' % r != symbol:
583 583 raise ValueError
584 584 l = len(repo.changelog)
585 585 if r < 0:
586 586 r += l
587 587 if r < 0 or r >= l and r != wdirrev:
588 588 raise ValueError
589 589 return repo[r]
590 590 except error.FilteredIndexError:
591 591 raise
592 592 except (ValueError, OverflowError, IndexError):
593 593 pass
594 594
595 595 if len(symbol) == 40:
596 596 try:
597 597 node = bin(symbol)
598 598 rev = repo.changelog.rev(node)
599 599 return repo[rev]
600 600 except error.FilteredLookupError:
601 601 raise
602 602 except (TypeError, LookupError):
603 603 pass
604 604
605 605 # look up bookmarks through the name interface
606 606 try:
607 607 node = repo.names.singlenode(repo, symbol)
608 608 rev = repo.changelog.rev(node)
609 609 return repo[rev]
610 610 except KeyError:
611 611 pass
612 612
613 613 node = resolvehexnodeidprefix(repo, symbol)
614 614 if node is not None:
615 615 rev = repo.changelog.rev(node)
616 616 return repo[rev]
617 617
618 618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
619 619
620 620 except error.WdirUnsupported:
621 621 return repo[None]
622 622 except (error.FilteredIndexError, error.FilteredLookupError,
623 623 error.FilteredRepoLookupError):
624 624 raise _filterederror(repo, symbol)
625 625
626 626 def _filterederror(repo, changeid):
627 627 """build an exception to be raised about a filtered changeid
628 628
629 629 This is extracted in a function to help extensions (eg: evolve) to
630 630 experiment with various message variants."""
631 631 if repo.filtername.startswith('visible'):
632 632
633 633 # Check if the changeset is obsolete
634 634 unfilteredrepo = repo.unfiltered()
635 635 ctx = revsymbol(unfilteredrepo, changeid)
636 636
637 637 # If the changeset is obsolete, enrich the message with the reason
638 638 # that made this changeset not visible
639 639 if ctx.obsolete():
640 640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
641 641 else:
642 642 msg = _("hidden revision '%s'") % changeid
643 643
644 644 hint = _('use --hidden to access hidden revisions')
645 645
646 646 return error.FilteredRepoLookupError(msg, hint=hint)
647 647 msg = _("filtered revision '%s' (not in '%s' subset)")
648 648 msg %= (changeid, repo.filtername)
649 649 return error.FilteredRepoLookupError(msg)
650 650
651 651 def revsingle(repo, revspec, default='.', localalias=None):
652 652 if not revspec and revspec != 0:
653 653 return repo[default]
654 654
655 655 l = revrange(repo, [revspec], localalias=localalias)
656 656 if not l:
657 657 raise error.Abort(_('empty revision set'))
658 658 return repo[l.last()]
659 659
660 660 def _pairspec(revspec):
661 661 tree = revsetlang.parse(revspec)
662 662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
663 663
664 664 def revpair(repo, revs):
665 665 if not revs:
666 666 return repo['.'], repo[None]
667 667
668 668 l = revrange(repo, revs)
669 669
670 670 if not l:
671 671 raise error.Abort(_('empty revision range'))
672 672
673 673 first = l.first()
674 674 second = l.last()
675 675
676 676 if (first == second and len(revs) >= 2
677 677 and not all(revrange(repo, [r]) for r in revs)):
678 678 raise error.Abort(_('empty revision on one side of range'))
679 679
680 680 # if top-level is range expression, the result must always be a pair
681 681 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
682 682 return repo[first], repo[None]
683 683
684 684 return repo[first], repo[second]
685 685
686 686 def revrange(repo, specs, localalias=None):
687 687 """Execute 1 to many revsets and return the union.
688 688
689 689 This is the preferred mechanism for executing revsets using user-specified
690 690 config options, such as revset aliases.
691 691
692 692 The revsets specified by ``specs`` will be executed via a chained ``OR``
693 693 expression. If ``specs`` is empty, an empty result is returned.
694 694
695 695 ``specs`` can contain integers, in which case they are assumed to be
696 696 revision numbers.
697 697
698 698 It is assumed the revsets are already formatted. If you have arguments
699 699 that need to be expanded in the revset, call ``revsetlang.formatspec()``
700 700 and pass the result as an element of ``specs``.
701 701
702 702 Specifying a single revset is allowed.
703 703
704 704 Returns a ``revset.abstractsmartset`` which is a list-like interface over
705 705 integer revisions.
706 706 """
707 707 allspecs = []
708 708 for spec in specs:
709 709 if isinstance(spec, int):
710 710 spec = revsetlang.formatspec('%d', spec)
711 711 allspecs.append(spec)
712 712 return repo.anyrevs(allspecs, user=True, localalias=localalias)
713 713
714 714 def meaningfulparents(repo, ctx):
715 715 """Return list of meaningful (or all if debug) parentrevs for rev.
716 716
717 717 For merges (two non-nullrev revisions) both parents are meaningful.
718 718 Otherwise the first parent revision is considered meaningful if it
719 719 is not the preceding revision.
720 720 """
721 721 parents = ctx.parents()
722 722 if len(parents) > 1:
723 723 return parents
724 724 if repo.ui.debugflag:
725 725 return [parents[0], repo[nullrev]]
726 726 if parents[0].rev() >= intrev(ctx) - 1:
727 727 return []
728 728 return parents
729 729
730 730 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
731 731 """Return a function that produced paths for presenting to the user.
732 732
733 733 The returned function takes a repo-relative path and produces a path
734 734 that can be presented in the UI.
735 735
736 736 Depending on the value of ui.relative-paths, either a repo-relative or
737 737 cwd-relative path will be produced.
738 738
739 739 legacyrelativevalue is the value to use if ui.relative-paths=legacy
740 740
741 741 If forcerelativevalue is not None, then that value will be used regardless
742 742 of what ui.relative-paths is set to.
743 743 """
744 744 if forcerelativevalue is not None:
745 745 relative = forcerelativevalue
746 746 else:
747 747 config = repo.ui.config('ui', 'relative-paths')
748 748 if config == 'legacy':
749 749 relative = legacyrelativevalue
750 750 else:
751 751 relative = stringutil.parsebool(config)
752 752 if relative is None:
753 753 raise error.ConfigError(
754 754 _("ui.relative-paths is not a boolean ('%s')") % config)
755 755
756 756 if relative:
757 757 cwd = repo.getcwd()
758 758 pathto = repo.pathto
759 759 return lambda f: pathto(f, cwd)
760 760 elif repo.ui.configbool('ui', 'slash'):
761 761 return lambda f: f
762 762 else:
763 763 return util.localpath
764 764
765 765 def subdiruipathfn(subpath, uipathfn):
766 766 '''Create a new uipathfn that treats the file as relative to subpath.'''
767 767 return lambda f: uipathfn(posixpath.join(subpath, f))
768 768
769 769 def anypats(pats, opts):
770 770 '''Checks if any patterns, including --include and --exclude were given.
771 771
772 772 Some commands (e.g. addremove) use this condition for deciding whether to
773 773 print absolute or relative paths.
774 774 '''
775 775 return bool(pats or opts.get('include') or opts.get('exclude'))
776 776
777 777 def expandpats(pats):
778 778 '''Expand bare globs when running on windows.
779 779 On posix we assume it already has already been done by sh.'''
780 780 if not util.expandglobs:
781 781 return list(pats)
782 782 ret = []
783 783 for kindpat in pats:
784 784 kind, pat = matchmod._patsplit(kindpat, None)
785 785 if kind is None:
786 786 try:
787 787 globbed = glob.glob(pat)
788 788 except re.error:
789 789 globbed = [pat]
790 790 if globbed:
791 791 ret.extend(globbed)
792 792 continue
793 793 ret.append(kindpat)
794 794 return ret
795 795
796 796 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
797 797 badfn=None):
798 798 '''Return a matcher and the patterns that were used.
799 799 The matcher will warn about bad matches, unless an alternate badfn callback
800 800 is provided.'''
801 801 if opts is None:
802 802 opts = {}
803 803 if not globbed and default == 'relpath':
804 804 pats = expandpats(pats or [])
805 805
806 806 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
807 807 def bad(f, msg):
808 808 ctx.repo().ui.warn("%s: %s\n" % (uipathfn(f), msg))
809 809
810 810 if badfn is None:
811 811 badfn = bad
812 812
813 813 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
814 814 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
815 815
816 816 if m.always():
817 817 pats = []
818 818 return m, pats
819 819
820 820 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
821 821 badfn=None):
822 822 '''Return a matcher that will warn about bad matches.'''
823 823 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
824 824
825 825 def matchall(repo):
826 826 '''Return a matcher that will efficiently match everything.'''
827 827 return matchmod.always()
828 828
829 829 def matchfiles(repo, files, badfn=None):
830 830 '''Return a matcher that will efficiently match exactly these files.'''
831 831 return matchmod.exact(files, badfn=badfn)
832 832
833 833 def parsefollowlinespattern(repo, rev, pat, msg):
834 834 """Return a file name from `pat` pattern suitable for usage in followlines
835 835 logic.
836 836 """
837 837 if not matchmod.patkind(pat):
838 838 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
839 839 else:
840 840 ctx = repo[rev]
841 841 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
842 842 files = [f for f in ctx if m(f)]
843 843 if len(files) != 1:
844 844 raise error.ParseError(msg)
845 845 return files[0]
846 846
847 847 def getorigvfs(ui, repo):
848 848 """return a vfs suitable to save 'orig' file
849 849
850 850 return None if no special directory is configured"""
851 851 origbackuppath = ui.config('ui', 'origbackuppath')
852 852 if not origbackuppath:
853 853 return None
854 854 return vfs.vfs(repo.wvfs.join(origbackuppath))
855 855
856 856 def backuppath(ui, repo, filepath):
857 857 '''customize where working copy backup files (.orig files) are created
858 858
859 859 Fetch user defined path from config file: [ui] origbackuppath = <path>
860 860 Fall back to default (filepath with .orig suffix) if not specified
861 861
862 862 filepath is repo-relative
863 863
864 864 Returns an absolute path
865 865 '''
866 866 origvfs = getorigvfs(ui, repo)
867 867 if origvfs is None:
868 868 return repo.wjoin(filepath + ".orig")
869 869
870 870 origbackupdir = origvfs.dirname(filepath)
871 871 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
872 872 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
873 873
874 874 # Remove any files that conflict with the backup file's path
875 875 for f in reversed(list(util.finddirs(filepath))):
876 876 if origvfs.isfileorlink(f):
877 877 ui.note(_('removing conflicting file: %s\n')
878 878 % origvfs.join(f))
879 879 origvfs.unlink(f)
880 880 break
881 881
882 882 origvfs.makedirs(origbackupdir)
883 883
884 884 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
885 885 ui.note(_('removing conflicting directory: %s\n')
886 886 % origvfs.join(filepath))
887 887 origvfs.rmtree(filepath, forcibly=True)
888 888
889 889 return origvfs.join(filepath)
890 890
891 891 class _containsnode(object):
892 892 """proxy __contains__(node) to container.__contains__ which accepts revs"""
893 893
894 894 def __init__(self, repo, revcontainer):
895 895 self._torev = repo.changelog.rev
896 896 self._revcontains = revcontainer.__contains__
897 897
898 898 def __contains__(self, node):
899 899 return self._revcontains(self._torev(node))
900 900
901 901 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
902 902 fixphase=False, targetphase=None, backup=True):
903 903 """do common cleanups when old nodes are replaced by new nodes
904 904
905 905 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
906 906 (we might also want to move working directory parent in the future)
907 907
908 908 By default, bookmark moves are calculated automatically from 'replacements',
909 909 but 'moves' can be used to override that. Also, 'moves' may include
910 910 additional bookmark moves that should not have associated obsmarkers.
911 911
912 912 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
913 913 have replacements. operation is a string, like "rebase".
914 914
915 915 metadata is dictionary containing metadata to be stored in obsmarker if
916 916 obsolescence is enabled.
917 917 """
918 918 assert fixphase or targetphase is None
919 919 if not replacements and not moves:
920 920 return
921 921
922 922 # translate mapping's other forms
923 923 if not util.safehasattr(replacements, 'items'):
924 924 replacements = {(n,): () for n in replacements}
925 925 else:
926 926 # upgrading non tuple "source" to tuple ones for BC
927 927 repls = {}
928 928 for key, value in replacements.items():
929 929 if not isinstance(key, tuple):
930 930 key = (key,)
931 931 repls[key] = value
932 932 replacements = repls
933 933
934 934 # Unfiltered repo is needed since nodes in replacements might be hidden.
935 935 unfi = repo.unfiltered()
936 936
937 937 # Calculate bookmark movements
938 938 if moves is None:
939 939 moves = {}
940 940 for oldnodes, newnodes in replacements.items():
941 941 for oldnode in oldnodes:
942 942 if oldnode in moves:
943 943 continue
944 944 if len(newnodes) > 1:
945 945 # usually a split, take the one with biggest rev number
946 946 newnode = next(unfi.set('max(%ln)', newnodes)).node()
947 947 elif len(newnodes) == 0:
948 948 # move bookmark backwards
949 949 allreplaced = []
950 950 for rep in replacements:
951 951 allreplaced.extend(rep)
952 952 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
953 953 allreplaced))
954 954 if roots:
955 955 newnode = roots[0].node()
956 956 else:
957 957 newnode = nullid
958 958 else:
959 959 newnode = newnodes[0]
960 960 moves[oldnode] = newnode
961 961
962 962 allnewnodes = [n for ns in replacements.values() for n in ns]
963 963 toretract = {}
964 964 toadvance = {}
965 965 if fixphase:
966 966 precursors = {}
967 967 for oldnodes, newnodes in replacements.items():
968 968 for oldnode in oldnodes:
969 969 for newnode in newnodes:
970 970 precursors.setdefault(newnode, []).append(oldnode)
971 971
972 972 allnewnodes.sort(key=lambda n: unfi[n].rev())
973 973 newphases = {}
974 974 def phase(ctx):
975 975 return newphases.get(ctx.node(), ctx.phase())
976 976 for newnode in allnewnodes:
977 977 ctx = unfi[newnode]
978 978 parentphase = max(phase(p) for p in ctx.parents())
979 979 if targetphase is None:
980 980 oldphase = max(unfi[oldnode].phase()
981 981 for oldnode in precursors[newnode])
982 982 newphase = max(oldphase, parentphase)
983 983 else:
984 984 newphase = max(targetphase, parentphase)
985 985 newphases[newnode] = newphase
986 986 if newphase > ctx.phase():
987 987 toretract.setdefault(newphase, []).append(newnode)
988 988 elif newphase < ctx.phase():
989 989 toadvance.setdefault(newphase, []).append(newnode)
990 990
991 991 with repo.transaction('cleanup') as tr:
992 992 # Move bookmarks
993 993 bmarks = repo._bookmarks
994 994 bmarkchanges = []
995 995 for oldnode, newnode in moves.items():
996 996 oldbmarks = repo.nodebookmarks(oldnode)
997 997 if not oldbmarks:
998 998 continue
999 999 from . import bookmarks # avoid import cycle
1000 1000 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
1001 1001 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1002 1002 hex(oldnode), hex(newnode)))
1003 1003 # Delete divergent bookmarks being parents of related newnodes
1004 1004 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
1005 1005 allnewnodes, newnode, oldnode)
1006 1006 deletenodes = _containsnode(repo, deleterevs)
1007 1007 for name in oldbmarks:
1008 1008 bmarkchanges.append((name, newnode))
1009 1009 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1010 1010 bmarkchanges.append((b, None))
1011 1011
1012 1012 if bmarkchanges:
1013 1013 bmarks.applychanges(repo, tr, bmarkchanges)
1014 1014
1015 1015 for phase, nodes in toretract.items():
1016 1016 phases.retractboundary(repo, tr, phase, nodes)
1017 1017 for phase, nodes in toadvance.items():
1018 1018 phases.advanceboundary(repo, tr, phase, nodes)
1019 1019
1020 1020 mayusearchived = repo.ui.config('experimental', 'cleanup-as-archived')
1021 1021 # Obsolete or strip nodes
1022 1022 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1023 1023 # If a node is already obsoleted, and we want to obsolete it
1024 1024 # without a successor, skip that obssolete request since it's
1025 1025 # unnecessary. That's the "if s or not isobs(n)" check below.
1026 1026 # Also sort the node in topology order, that might be useful for
1027 1027 # some obsstore logic.
1028 1028 # NOTE: the sorting might belong to createmarkers.
1029 1029 torev = unfi.changelog.rev
1030 1030 sortfunc = lambda ns: torev(ns[0][0])
1031 1031 rels = []
1032 1032 for ns, s in sorted(replacements.items(), key=sortfunc):
1033 1033 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1034 1034 rels.append(rel)
1035 1035 if rels:
1036 1036 obsolete.createmarkers(repo, rels, operation=operation,
1037 1037 metadata=metadata)
1038 1038 elif phases.supportinternal(repo) and mayusearchived:
1039 1039 # this assume we do not have "unstable" nodes above the cleaned ones
1040 1040 allreplaced = set()
1041 1041 for ns in replacements.keys():
1042 1042 allreplaced.update(ns)
1043 1043 if backup:
1044 1044 from . import repair # avoid import cycle
1045 1045 node = min(allreplaced, key=repo.changelog.rev)
1046 1046 repair.backupbundle(repo, allreplaced, allreplaced, node,
1047 1047 operation)
1048 1048 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1049 1049 else:
1050 1050 from . import repair # avoid import cycle
1051 1051 tostrip = list(n for ns in replacements for n in ns)
1052 1052 if tostrip:
1053 1053 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1054 1054 backup=backup)
1055 1055
1056 1056 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1057 1057 if opts is None:
1058 1058 opts = {}
1059 1059 m = matcher
1060 1060 dry_run = opts.get('dry_run')
1061 1061 try:
1062 1062 similarity = float(opts.get('similarity') or 0)
1063 1063 except ValueError:
1064 1064 raise error.Abort(_('similarity must be a number'))
1065 1065 if similarity < 0 or similarity > 100:
1066 1066 raise error.Abort(_('similarity must be between 0 and 100'))
1067 1067 similarity /= 100.0
1068 1068
1069 1069 ret = 0
1070 1070
1071 1071 wctx = repo[None]
1072 1072 for subpath in sorted(wctx.substate):
1073 1073 submatch = matchmod.subdirmatcher(subpath, m)
1074 1074 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1075 1075 sub = wctx.sub(subpath)
1076 1076 subprefix = repo.wvfs.reljoin(prefix, subpath)
1077 1077 subuipathfn = subdiruipathfn(subpath, uipathfn)
1078 1078 try:
1079 1079 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1080 1080 ret = 1
1081 1081 except error.LookupError:
1082 1082 repo.ui.status(_("skipping missing subrepository: %s\n")
1083 1083 % uipathfn(subpath))
1084 1084
1085 1085 rejected = []
1086 1086 def badfn(f, msg):
1087 1087 if f in m.files():
1088 1088 m.bad(f, msg)
1089 1089 rejected.append(f)
1090 1090
1091 1091 badmatch = matchmod.badmatch(m, badfn)
1092 1092 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1093 1093 badmatch)
1094 1094
1095 1095 unknownset = set(unknown + forgotten)
1096 1096 toprint = unknownset.copy()
1097 1097 toprint.update(deleted)
1098 1098 for abs in sorted(toprint):
1099 1099 if repo.ui.verbose or not m.exact(abs):
1100 1100 if abs in unknownset:
1101 1101 status = _('adding %s\n') % uipathfn(abs)
1102 1102 label = 'ui.addremove.added'
1103 1103 else:
1104 1104 status = _('removing %s\n') % uipathfn(abs)
1105 1105 label = 'ui.addremove.removed'
1106 1106 repo.ui.status(status, label=label)
1107 1107
1108 1108 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1109 1109 similarity, uipathfn)
1110 1110
1111 1111 if not dry_run:
1112 1112 _markchanges(repo, unknown + forgotten, deleted, renames)
1113 1113
1114 1114 for f in rejected:
1115 1115 if f in m.files():
1116 1116 return 1
1117 1117 return ret
1118 1118
1119 1119 def marktouched(repo, files, similarity=0.0):
1120 1120 '''Assert that files have somehow been operated upon. files are relative to
1121 1121 the repo root.'''
1122 1122 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1123 1123 rejected = []
1124 1124
1125 1125 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1126 1126
1127 1127 if repo.ui.verbose:
1128 1128 unknownset = set(unknown + forgotten)
1129 1129 toprint = unknownset.copy()
1130 1130 toprint.update(deleted)
1131 1131 for abs in sorted(toprint):
1132 1132 if abs in unknownset:
1133 1133 status = _('adding %s\n') % abs
1134 1134 else:
1135 1135 status = _('removing %s\n') % abs
1136 1136 repo.ui.status(status)
1137 1137
1138 1138 # TODO: We should probably have the caller pass in uipathfn and apply it to
1139 1139 # the messages above too. legacyrelativevalue=True is consistent with how
1140 1140 # it used to work.
1141 1141 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1142 1142 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1143 1143 similarity, uipathfn)
1144 1144
1145 1145 _markchanges(repo, unknown + forgotten, deleted, renames)
1146 1146
1147 1147 for f in rejected:
1148 1148 if f in m.files():
1149 1149 return 1
1150 1150 return 0
1151 1151
1152 1152 def _interestingfiles(repo, matcher):
1153 1153 '''Walk dirstate with matcher, looking for files that addremove would care
1154 1154 about.
1155 1155
1156 1156 This is different from dirstate.status because it doesn't care about
1157 1157 whether files are modified or clean.'''
1158 1158 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1159 1159 audit_path = pathutil.pathauditor(repo.root, cached=True)
1160 1160
1161 1161 ctx = repo[None]
1162 1162 dirstate = repo.dirstate
1163 1163 matcher = repo.narrowmatch(matcher, includeexact=True)
1164 1164 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1165 1165 unknown=True, ignored=False, full=False)
1166 1166 for abs, st in walkresults.iteritems():
1167 1167 dstate = dirstate[abs]
1168 1168 if dstate == '?' and audit_path.check(abs):
1169 1169 unknown.append(abs)
1170 1170 elif dstate != 'r' and not st:
1171 1171 deleted.append(abs)
1172 1172 elif dstate == 'r' and st:
1173 1173 forgotten.append(abs)
1174 1174 # for finding renames
1175 1175 elif dstate == 'r' and not st:
1176 1176 removed.append(abs)
1177 1177 elif dstate == 'a':
1178 1178 added.append(abs)
1179 1179
1180 1180 return added, unknown, deleted, removed, forgotten
1181 1181
1182 1182 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1183 1183 '''Find renames from removed files to added ones.'''
1184 1184 renames = {}
1185 1185 if similarity > 0:
1186 1186 for old, new, score in similar.findrenames(repo, added, removed,
1187 1187 similarity):
1188 1188 if (repo.ui.verbose or not matcher.exact(old)
1189 1189 or not matcher.exact(new)):
1190 1190 repo.ui.status(_('recording removal of %s as rename to %s '
1191 1191 '(%d%% similar)\n') %
1192 1192 (uipathfn(old), uipathfn(new),
1193 1193 score * 100))
1194 1194 renames[new] = old
1195 1195 return renames
1196 1196
1197 1197 def _markchanges(repo, unknown, deleted, renames):
1198 1198 '''Marks the files in unknown as added, the files in deleted as removed,
1199 1199 and the files in renames as copied.'''
1200 1200 wctx = repo[None]
1201 1201 with repo.wlock():
1202 1202 wctx.forget(deleted)
1203 1203 wctx.add(unknown)
1204 1204 for new, old in renames.iteritems():
1205 1205 wctx.copy(old, new)
1206 1206
1207 1207 def getrenamedfn(repo, endrev=None):
1208 1208 if copiesmod.usechangesetcentricalgo(repo):
1209 1209 def getrenamed(fn, rev):
1210 1210 ctx = repo[rev]
1211 1211 p1copies = ctx.p1copies()
1212 1212 if fn in p1copies:
1213 1213 return p1copies[fn]
1214 1214 p2copies = ctx.p2copies()
1215 1215 if fn in p2copies:
1216 1216 return p2copies[fn]
1217 1217 return None
1218 1218 return getrenamed
1219 1219
1220 1220 rcache = {}
1221 1221 if endrev is None:
1222 1222 endrev = len(repo)
1223 1223
1224 1224 def getrenamed(fn, rev):
1225 1225 '''looks up all renames for a file (up to endrev) the first
1226 1226 time the file is given. It indexes on the changerev and only
1227 1227 parses the manifest if linkrev != changerev.
1228 1228 Returns rename info for fn at changerev rev.'''
1229 1229 if fn not in rcache:
1230 1230 rcache[fn] = {}
1231 1231 fl = repo.file(fn)
1232 1232 for i in fl:
1233 1233 lr = fl.linkrev(i)
1234 1234 renamed = fl.renamed(fl.node(i))
1235 1235 rcache[fn][lr] = renamed and renamed[0]
1236 1236 if lr >= endrev:
1237 1237 break
1238 1238 if rev in rcache[fn]:
1239 1239 return rcache[fn][rev]
1240 1240
1241 1241 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1242 1242 # filectx logic.
1243 1243 try:
1244 1244 return repo[rev][fn].copysource()
1245 1245 except error.LookupError:
1246 1246 return None
1247 1247
1248 1248 return getrenamed
1249 1249
1250 1250 def getcopiesfn(repo, endrev=None):
1251 1251 if copiesmod.usechangesetcentricalgo(repo):
1252 1252 def copiesfn(ctx):
1253 1253 if ctx.p2copies():
1254 1254 allcopies = ctx.p1copies().copy()
1255 1255 # There should be no overlap
1256 1256 allcopies.update(ctx.p2copies())
1257 1257 return sorted(allcopies.items())
1258 1258 else:
1259 1259 return sorted(ctx.p1copies().items())
1260 1260 else:
1261 1261 getrenamed = getrenamedfn(repo, endrev)
1262 1262 def copiesfn(ctx):
1263 1263 copies = []
1264 1264 for fn in ctx.files():
1265 1265 rename = getrenamed(fn, ctx.rev())
1266 1266 if rename:
1267 1267 copies.append((fn, rename))
1268 1268 return copies
1269 1269
1270 1270 return copiesfn
1271 1271
1272 1272 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1273 1273 """Update the dirstate to reflect the intent of copying src to dst. For
1274 1274 different reasons it might not end with dst being marked as copied from src.
1275 1275 """
1276 1276 origsrc = repo.dirstate.copied(src) or src
1277 1277 if dst == origsrc: # copying back a copy?
1278 1278 if repo.dirstate[dst] not in 'mn' and not dryrun:
1279 1279 repo.dirstate.normallookup(dst)
1280 1280 else:
1281 1281 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1282 1282 if not ui.quiet:
1283 1283 ui.warn(_("%s has not been committed yet, so no copy "
1284 1284 "data will be stored for %s.\n")
1285 1285 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1286 1286 if repo.dirstate[dst] in '?r' and not dryrun:
1287 1287 wctx.add([dst])
1288 1288 elif not dryrun:
1289 1289 wctx.copy(origsrc, dst)
1290 1290
1291 1291 def movedirstate(repo, newctx, match=None):
1292 1292 """Move the dirstate to newctx and adjust it as necessary.
1293 1293
1294 1294 A matcher can be provided as an optimization. It is probably a bug to pass
1295 1295 a matcher that doesn't match all the differences between the parent of the
1296 1296 working copy and newctx.
1297 1297 """
1298 1298 oldctx = repo['.']
1299 1299 ds = repo.dirstate
1300 1300 ds.setparents(newctx.node(), nullid)
1301 1301 copies = dict(ds.copies())
1302 1302 s = newctx.status(oldctx, match=match)
1303 1303 for f in s.modified:
1304 1304 if ds[f] == 'r':
1305 1305 # modified + removed -> removed
1306 1306 continue
1307 1307 ds.normallookup(f)
1308 1308
1309 1309 for f in s.added:
1310 1310 if ds[f] == 'r':
1311 1311 # added + removed -> unknown
1312 1312 ds.drop(f)
1313 1313 elif ds[f] != 'a':
1314 1314 ds.add(f)
1315 1315
1316 1316 for f in s.removed:
1317 1317 if ds[f] == 'a':
1318 1318 # removed + added -> normal
1319 1319 ds.normallookup(f)
1320 1320 elif ds[f] != 'r':
1321 1321 ds.remove(f)
1322 1322
1323 1323 # Merge old parent and old working dir copies
1324 1324 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1325 1325 oldcopies.update(copies)
1326 1326 copies = dict((dst, oldcopies.get(src, src))
1327 1327 for dst, src in oldcopies.iteritems())
1328 1328 # Adjust the dirstate copies
1329 1329 for dst, src in copies.iteritems():
1330 1330 if (src not in newctx or dst in newctx or ds[dst] != 'a'):
1331 1331 src = None
1332 1332 ds.copy(src, dst)
1333 1333
1334 1334 def writerequires(opener, requirements):
1335 1335 with opener('requires', 'w', atomictemp=True) as fp:
1336 1336 for r in sorted(requirements):
1337 1337 fp.write("%s\n" % r)
1338 1338
1339 1339 class filecachesubentry(object):
1340 1340 def __init__(self, path, stat):
1341 1341 self.path = path
1342 1342 self.cachestat = None
1343 1343 self._cacheable = None
1344 1344
1345 1345 if stat:
1346 1346 self.cachestat = filecachesubentry.stat(self.path)
1347 1347
1348 1348 if self.cachestat:
1349 1349 self._cacheable = self.cachestat.cacheable()
1350 1350 else:
1351 1351 # None means we don't know yet
1352 1352 self._cacheable = None
1353 1353
1354 1354 def refresh(self):
1355 1355 if self.cacheable():
1356 1356 self.cachestat = filecachesubentry.stat(self.path)
1357 1357
1358 1358 def cacheable(self):
1359 1359 if self._cacheable is not None:
1360 1360 return self._cacheable
1361 1361
1362 1362 # we don't know yet, assume it is for now
1363 1363 return True
1364 1364
1365 1365 def changed(self):
1366 1366 # no point in going further if we can't cache it
1367 1367 if not self.cacheable():
1368 1368 return True
1369 1369
1370 1370 newstat = filecachesubentry.stat(self.path)
1371 1371
1372 1372 # we may not know if it's cacheable yet, check again now
1373 1373 if newstat and self._cacheable is None:
1374 1374 self._cacheable = newstat.cacheable()
1375 1375
1376 1376 # check again
1377 1377 if not self._cacheable:
1378 1378 return True
1379 1379
1380 1380 if self.cachestat != newstat:
1381 1381 self.cachestat = newstat
1382 1382 return True
1383 1383 else:
1384 1384 return False
1385 1385
1386 1386 @staticmethod
1387 1387 def stat(path):
1388 1388 try:
1389 1389 return util.cachestat(path)
1390 1390 except OSError as e:
1391 1391 if e.errno != errno.ENOENT:
1392 1392 raise
1393 1393
1394 1394 class filecacheentry(object):
1395 1395 def __init__(self, paths, stat=True):
1396 1396 self._entries = []
1397 1397 for path in paths:
1398 1398 self._entries.append(filecachesubentry(path, stat))
1399 1399
1400 1400 def changed(self):
1401 1401 '''true if any entry has changed'''
1402 1402 for entry in self._entries:
1403 1403 if entry.changed():
1404 1404 return True
1405 1405 return False
1406 1406
1407 1407 def refresh(self):
1408 1408 for entry in self._entries:
1409 1409 entry.refresh()
1410 1410
1411 1411 class filecache(object):
1412 1412 """A property like decorator that tracks files under .hg/ for updates.
1413 1413
1414 1414 On first access, the files defined as arguments are stat()ed and the
1415 1415 results cached. The decorated function is called. The results are stashed
1416 1416 away in a ``_filecache`` dict on the object whose method is decorated.
1417 1417
1418 1418 On subsequent access, the cached result is used as it is set to the
1419 1419 instance dictionary.
1420 1420
1421 1421 On external property set/delete operations, the caller must update the
1422 1422 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1423 1423 instead of directly setting <attr>.
1424 1424
1425 1425 When using the property API, the cached data is always used if available.
1426 1426 No stat() is performed to check if the file has changed.
1427 1427
1428 1428 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1429 1429 can populate an entry before the property's getter is called. In this case,
1430 1430 entries in ``_filecache`` will be used during property operations,
1431 1431 if available. If the underlying file changes, it is up to external callers
1432 1432 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1433 1433 method result as well as possibly calling ``del obj._filecache[attr]`` to
1434 1434 remove the ``filecacheentry``.
1435 1435 """
1436 1436
1437 1437 def __init__(self, *paths):
1438 1438 self.paths = paths
1439 1439
1440 1440 def join(self, obj, fname):
1441 1441 """Used to compute the runtime path of a cached file.
1442 1442
1443 1443 Users should subclass filecache and provide their own version of this
1444 1444 function to call the appropriate join function on 'obj' (an instance
1445 1445 of the class that its member function was decorated).
1446 1446 """
1447 1447 raise NotImplementedError
1448 1448
1449 1449 def __call__(self, func):
1450 1450 self.func = func
1451 1451 self.sname = func.__name__
1452 1452 self.name = pycompat.sysbytes(self.sname)
1453 1453 return self
1454 1454
1455 1455 def __get__(self, obj, type=None):
1456 1456 # if accessed on the class, return the descriptor itself.
1457 1457 if obj is None:
1458 1458 return self
1459 1459
1460 1460 assert self.sname not in obj.__dict__
1461 1461
1462 1462 entry = obj._filecache.get(self.name)
1463 1463
1464 1464 if entry:
1465 1465 if entry.changed():
1466 1466 entry.obj = self.func(obj)
1467 1467 else:
1468 1468 paths = [self.join(obj, path) for path in self.paths]
1469 1469
1470 1470 # We stat -before- creating the object so our cache doesn't lie if
1471 1471 # a writer modified between the time we read and stat
1472 1472 entry = filecacheentry(paths, True)
1473 1473 entry.obj = self.func(obj)
1474 1474
1475 1475 obj._filecache[self.name] = entry
1476 1476
1477 1477 obj.__dict__[self.sname] = entry.obj
1478 1478 return entry.obj
1479 1479
1480 1480 # don't implement __set__(), which would make __dict__ lookup as slow as
1481 1481 # function call.
1482 1482
1483 1483 def set(self, obj, value):
1484 1484 if self.name not in obj._filecache:
1485 1485 # we add an entry for the missing value because X in __dict__
1486 1486 # implies X in _filecache
1487 1487 paths = [self.join(obj, path) for path in self.paths]
1488 1488 ce = filecacheentry(paths, False)
1489 1489 obj._filecache[self.name] = ce
1490 1490 else:
1491 1491 ce = obj._filecache[self.name]
1492 1492
1493 1493 ce.obj = value # update cached copy
1494 1494 obj.__dict__[self.sname] = value # update copy returned by obj.x
1495 1495
1496 1496 def extdatasource(repo, source):
1497 1497 """Gather a map of rev -> value dict from the specified source
1498 1498
1499 1499 A source spec is treated as a URL, with a special case shell: type
1500 1500 for parsing the output from a shell command.
1501 1501
1502 1502 The data is parsed as a series of newline-separated records where
1503 1503 each record is a revision specifier optionally followed by a space
1504 1504 and a freeform string value. If the revision is known locally, it
1505 1505 is converted to a rev, otherwise the record is skipped.
1506 1506
1507 1507 Note that both key and value are treated as UTF-8 and converted to
1508 1508 the local encoding. This allows uniformity between local and
1509 1509 remote data sources.
1510 1510 """
1511 1511
1512 1512 spec = repo.ui.config("extdata", source)
1513 1513 if not spec:
1514 1514 raise error.Abort(_("unknown extdata source '%s'") % source)
1515 1515
1516 1516 data = {}
1517 1517 src = proc = None
1518 1518 try:
1519 1519 if spec.startswith("shell:"):
1520 1520 # external commands should be run relative to the repo root
1521 1521 cmd = spec[6:]
1522 1522 proc = subprocess.Popen(procutil.tonativestr(cmd),
1523 1523 shell=True, bufsize=-1,
1524 1524 close_fds=procutil.closefds,
1525 1525 stdout=subprocess.PIPE,
1526 1526 cwd=procutil.tonativestr(repo.root))
1527 1527 src = proc.stdout
1528 1528 else:
1529 1529 # treat as a URL or file
1530 1530 src = url.open(repo.ui, spec)
1531 1531 for l in src:
1532 1532 if " " in l:
1533 1533 k, v = l.strip().split(" ", 1)
1534 1534 else:
1535 1535 k, v = l.strip(), ""
1536 1536
1537 1537 k = encoding.tolocal(k)
1538 1538 try:
1539 1539 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1540 1540 except (error.LookupError, error.RepoLookupError):
1541 1541 pass # we ignore data for nodes that don't exist locally
1542 1542 finally:
1543 1543 if proc:
1544 try:
1544 1545 proc.communicate()
1546 except ValueError:
1547 # This happens if we started iterating src and then
1548 # get a parse error on a line. It should be safe to ignore.
1549 pass
1545 1550 if src:
1546 1551 src.close()
1547 1552 if proc and proc.returncode != 0:
1548 1553 raise error.Abort(_("extdata command '%s' failed: %s")
1549 1554 % (cmd, procutil.explainexit(proc.returncode)))
1550 1555
1551 1556 return data
1552 1557
1553 1558 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1554 1559 if lock is None:
1555 1560 raise error.LockInheritanceContractViolation(
1556 1561 'lock can only be inherited while held')
1557 1562 if environ is None:
1558 1563 environ = {}
1559 1564 with lock.inherit() as locker:
1560 1565 environ[envvar] = locker
1561 1566 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1562 1567
1563 1568 def wlocksub(repo, cmd, *args, **kwargs):
1564 1569 """run cmd as a subprocess that allows inheriting repo's wlock
1565 1570
1566 1571 This can only be called while the wlock is held. This takes all the
1567 1572 arguments that ui.system does, and returns the exit code of the
1568 1573 subprocess."""
1569 1574 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1570 1575 **kwargs)
1571 1576
1572 1577 class progress(object):
1573 1578 def __init__(self, ui, updatebar, topic, unit="", total=None):
1574 1579 self.ui = ui
1575 1580 self.pos = 0
1576 1581 self.topic = topic
1577 1582 self.unit = unit
1578 1583 self.total = total
1579 1584 self.debug = ui.configbool('progress', 'debug')
1580 1585 self._updatebar = updatebar
1581 1586
1582 1587 def __enter__(self):
1583 1588 return self
1584 1589
1585 1590 def __exit__(self, exc_type, exc_value, exc_tb):
1586 1591 self.complete()
1587 1592
1588 1593 def update(self, pos, item="", total=None):
1589 1594 assert pos is not None
1590 1595 if total:
1591 1596 self.total = total
1592 1597 self.pos = pos
1593 1598 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1594 1599 if self.debug:
1595 1600 self._printdebug(item)
1596 1601
1597 1602 def increment(self, step=1, item="", total=None):
1598 1603 self.update(self.pos + step, item, total)
1599 1604
1600 1605 def complete(self):
1601 1606 self.pos = None
1602 1607 self.unit = ""
1603 1608 self.total = None
1604 1609 self._updatebar(self.topic, self.pos, "", self.unit, self.total)
1605 1610
1606 1611 def _printdebug(self, item):
1607 1612 if self.unit:
1608 1613 unit = ' ' + self.unit
1609 1614 if item:
1610 1615 item = ' ' + item
1611 1616
1612 1617 if self.total:
1613 1618 pct = 100.0 * self.pos / self.total
1614 1619 self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n'
1615 1620 % (self.topic, item, self.pos, self.total, unit, pct))
1616 1621 else:
1617 1622 self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1618 1623
1619 1624 def gdinitconfig(ui):
1620 1625 """helper function to know if a repo should be created as general delta
1621 1626 """
1622 1627 # experimental config: format.generaldelta
1623 1628 return (ui.configbool('format', 'generaldelta')
1624 1629 or ui.configbool('format', 'usegeneraldelta'))
1625 1630
1626 1631 def gddeltaconfig(ui):
1627 1632 """helper function to know if incoming delta should be optimised
1628 1633 """
1629 1634 # experimental config: format.generaldelta
1630 1635 return ui.configbool('format', 'generaldelta')
1631 1636
1632 1637 class simplekeyvaluefile(object):
1633 1638 """A simple file with key=value lines
1634 1639
1635 1640 Keys must be alphanumerics and start with a letter, values must not
1636 1641 contain '\n' characters"""
1637 1642 firstlinekey = '__firstline'
1638 1643
1639 1644 def __init__(self, vfs, path, keys=None):
1640 1645 self.vfs = vfs
1641 1646 self.path = path
1642 1647
1643 1648 def read(self, firstlinenonkeyval=False):
1644 1649 """Read the contents of a simple key-value file
1645 1650
1646 1651 'firstlinenonkeyval' indicates whether the first line of file should
1647 1652 be treated as a key-value pair or reuturned fully under the
1648 1653 __firstline key."""
1649 1654 lines = self.vfs.readlines(self.path)
1650 1655 d = {}
1651 1656 if firstlinenonkeyval:
1652 1657 if not lines:
1653 1658 e = _("empty simplekeyvalue file")
1654 1659 raise error.CorruptedState(e)
1655 1660 # we don't want to include '\n' in the __firstline
1656 1661 d[self.firstlinekey] = lines[0][:-1]
1657 1662 del lines[0]
1658 1663
1659 1664 try:
1660 1665 # the 'if line.strip()' part prevents us from failing on empty
1661 1666 # lines which only contain '\n' therefore are not skipped
1662 1667 # by 'if line'
1663 1668 updatedict = dict(line[:-1].split('=', 1) for line in lines
1664 1669 if line.strip())
1665 1670 if self.firstlinekey in updatedict:
1666 1671 e = _("%r can't be used as a key")
1667 1672 raise error.CorruptedState(e % self.firstlinekey)
1668 1673 d.update(updatedict)
1669 1674 except ValueError as e:
1670 1675 raise error.CorruptedState(str(e))
1671 1676 return d
1672 1677
1673 1678 def write(self, data, firstline=None):
1674 1679 """Write key=>value mapping to a file
1675 1680 data is a dict. Keys must be alphanumerical and start with a letter.
1676 1681 Values must not contain newline characters.
1677 1682
1678 1683 If 'firstline' is not None, it is written to file before
1679 1684 everything else, as it is, not in a key=value form"""
1680 1685 lines = []
1681 1686 if firstline is not None:
1682 1687 lines.append('%s\n' % firstline)
1683 1688
1684 1689 for k, v in data.items():
1685 1690 if k == self.firstlinekey:
1686 1691 e = "key name '%s' is reserved" % self.firstlinekey
1687 1692 raise error.ProgrammingError(e)
1688 1693 if not k[0:1].isalpha():
1689 1694 e = "keys must start with a letter in a key-value file"
1690 1695 raise error.ProgrammingError(e)
1691 1696 if not k.isalnum():
1692 1697 e = "invalid key name in a simple key-value file"
1693 1698 raise error.ProgrammingError(e)
1694 1699 if '\n' in v:
1695 1700 e = "invalid value in a simple key-value file"
1696 1701 raise error.ProgrammingError(e)
1697 1702 lines.append("%s=%s\n" % (k, v))
1698 1703 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1699 1704 fp.write(''.join(lines))
1700 1705
1701 1706 _reportobsoletedsource = [
1702 1707 'debugobsolete',
1703 1708 'pull',
1704 1709 'push',
1705 1710 'serve',
1706 1711 'unbundle',
1707 1712 ]
1708 1713
1709 1714 _reportnewcssource = [
1710 1715 'pull',
1711 1716 'unbundle',
1712 1717 ]
1713 1718
1714 1719 def prefetchfiles(repo, revs, match):
1715 1720 """Invokes the registered file prefetch functions, allowing extensions to
1716 1721 ensure the corresponding files are available locally, before the command
1717 1722 uses them."""
1718 1723 if match:
1719 1724 # The command itself will complain about files that don't exist, so
1720 1725 # don't duplicate the message.
1721 1726 match = matchmod.badmatch(match, lambda fn, msg: None)
1722 1727 else:
1723 1728 match = matchall(repo)
1724 1729
1725 1730 fileprefetchhooks(repo, revs, match)
1726 1731
1727 1732 # a list of (repo, revs, match) prefetch functions
1728 1733 fileprefetchhooks = util.hooks()
1729 1734
1730 1735 # A marker that tells the evolve extension to suppress its own reporting
1731 1736 _reportstroubledchangesets = True
1732 1737
1733 1738 def registersummarycallback(repo, otr, txnname=''):
1734 1739 """register a callback to issue a summary after the transaction is closed
1735 1740 """
1736 1741 def txmatch(sources):
1737 1742 return any(txnname.startswith(source) for source in sources)
1738 1743
1739 1744 categories = []
1740 1745
1741 1746 def reportsummary(func):
1742 1747 """decorator for report callbacks."""
1743 1748 # The repoview life cycle is shorter than the one of the actual
1744 1749 # underlying repository. So the filtered object can die before the
1745 1750 # weakref is used leading to troubles. We keep a reference to the
1746 1751 # unfiltered object and restore the filtering when retrieving the
1747 1752 # repository through the weakref.
1748 1753 filtername = repo.filtername
1749 1754 reporef = weakref.ref(repo.unfiltered())
1750 1755 def wrapped(tr):
1751 1756 repo = reporef()
1752 1757 if filtername:
1753 1758 repo = repo.filtered(filtername)
1754 1759 func(repo, tr)
1755 1760 newcat = '%02i-txnreport' % len(categories)
1756 1761 otr.addpostclose(newcat, wrapped)
1757 1762 categories.append(newcat)
1758 1763 return wrapped
1759 1764
1760 1765 if txmatch(_reportobsoletedsource):
1761 1766 @reportsummary
1762 1767 def reportobsoleted(repo, tr):
1763 1768 obsoleted = obsutil.getobsoleted(repo, tr)
1764 1769 if obsoleted:
1765 1770 repo.ui.status(_('obsoleted %i changesets\n')
1766 1771 % len(obsoleted))
1767 1772
1768 1773 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1769 1774 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1770 1775 instabilitytypes = [
1771 1776 ('orphan', 'orphan'),
1772 1777 ('phase-divergent', 'phasedivergent'),
1773 1778 ('content-divergent', 'contentdivergent'),
1774 1779 ]
1775 1780
1776 1781 def getinstabilitycounts(repo):
1777 1782 filtered = repo.changelog.filteredrevs
1778 1783 counts = {}
1779 1784 for instability, revset in instabilitytypes:
1780 1785 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1781 1786 filtered)
1782 1787 return counts
1783 1788
1784 1789 oldinstabilitycounts = getinstabilitycounts(repo)
1785 1790 @reportsummary
1786 1791 def reportnewinstabilities(repo, tr):
1787 1792 newinstabilitycounts = getinstabilitycounts(repo)
1788 1793 for instability, revset in instabilitytypes:
1789 1794 delta = (newinstabilitycounts[instability] -
1790 1795 oldinstabilitycounts[instability])
1791 1796 msg = getinstabilitymessage(delta, instability)
1792 1797 if msg:
1793 1798 repo.ui.warn(msg)
1794 1799
1795 1800 if txmatch(_reportnewcssource):
1796 1801 @reportsummary
1797 1802 def reportnewcs(repo, tr):
1798 1803 """Report the range of new revisions pulled/unbundled."""
1799 1804 origrepolen = tr.changes.get('origrepolen', len(repo))
1800 1805 unfi = repo.unfiltered()
1801 1806 if origrepolen >= len(unfi):
1802 1807 return
1803 1808
1804 1809 # Compute the bounds of new visible revisions' range.
1805 1810 revs = smartset.spanset(repo, start=origrepolen)
1806 1811 if revs:
1807 1812 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1808 1813
1809 1814 if minrev == maxrev:
1810 1815 revrange = minrev
1811 1816 else:
1812 1817 revrange = '%s:%s' % (minrev, maxrev)
1813 1818 draft = len(repo.revs('%ld and draft()', revs))
1814 1819 secret = len(repo.revs('%ld and secret()', revs))
1815 1820 if not (draft or secret):
1816 1821 msg = _('new changesets %s\n') % revrange
1817 1822 elif draft and secret:
1818 1823 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1819 1824 msg %= (revrange, draft, secret)
1820 1825 elif draft:
1821 1826 msg = _('new changesets %s (%d drafts)\n')
1822 1827 msg %= (revrange, draft)
1823 1828 elif secret:
1824 1829 msg = _('new changesets %s (%d secrets)\n')
1825 1830 msg %= (revrange, secret)
1826 1831 else:
1827 1832 errormsg = 'entered unreachable condition'
1828 1833 raise error.ProgrammingError(errormsg)
1829 1834 repo.ui.status(msg)
1830 1835
1831 1836 # search new changesets directly pulled as obsolete
1832 1837 duplicates = tr.changes.get('revduplicates', ())
1833 1838 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1834 1839 origrepolen, duplicates)
1835 1840 cl = repo.changelog
1836 1841 extinctadded = [r for r in obsadded if r not in cl]
1837 1842 if extinctadded:
1838 1843 # They are not just obsolete, but obsolete and invisible
1839 1844 # we call them "extinct" internally but the terms have not been
1840 1845 # exposed to users.
1841 1846 msg = '(%d other changesets obsolete on arrival)\n'
1842 1847 repo.ui.status(msg % len(extinctadded))
1843 1848
1844 1849 @reportsummary
1845 1850 def reportphasechanges(repo, tr):
1846 1851 """Report statistics of phase changes for changesets pre-existing
1847 1852 pull/unbundle.
1848 1853 """
1849 1854 origrepolen = tr.changes.get('origrepolen', len(repo))
1850 1855 phasetracking = tr.changes.get('phases', {})
1851 1856 if not phasetracking:
1852 1857 return
1853 1858 published = [
1854 1859 rev for rev, (old, new) in phasetracking.iteritems()
1855 1860 if new == phases.public and rev < origrepolen
1856 1861 ]
1857 1862 if not published:
1858 1863 return
1859 1864 repo.ui.status(_('%d local changesets published\n')
1860 1865 % len(published))
1861 1866
1862 1867 def getinstabilitymessage(delta, instability):
1863 1868 """function to return the message to show warning about new instabilities
1864 1869
1865 1870 exists as a separate function so that extension can wrap to show more
1866 1871 information like how to fix instabilities"""
1867 1872 if delta > 0:
1868 1873 return _('%i new %s changesets\n') % (delta, instability)
1869 1874
1870 1875 def nodesummaries(repo, nodes, maxnumnodes=4):
1871 1876 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1872 1877 return ' '.join(short(h) for h in nodes)
1873 1878 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1874 1879 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1875 1880
1876 1881 def enforcesinglehead(repo, tr, desc):
1877 1882 """check that no named branch has multiple heads"""
1878 1883 if desc in ('strip', 'repair'):
1879 1884 # skip the logic during strip
1880 1885 return
1881 1886 visible = repo.filtered('visible')
1882 1887 # possible improvement: we could restrict the check to affected branch
1883 1888 for name, heads in visible.branchmap().iteritems():
1884 1889 if len(heads) > 1:
1885 1890 msg = _('rejecting multiple heads on branch "%s"')
1886 1891 msg %= name
1887 1892 hint = _('%d heads: %s')
1888 1893 hint %= (len(heads), nodesummaries(repo, heads))
1889 1894 raise error.Abort(msg, hint=hint)
1890 1895
1891 1896 def wrapconvertsink(sink):
1892 1897 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1893 1898 before it is used, whether or not the convert extension was formally loaded.
1894 1899 """
1895 1900 return sink
1896 1901
1897 1902 def unhidehashlikerevs(repo, specs, hiddentype):
1898 1903 """parse the user specs and unhide changesets whose hash or revision number
1899 1904 is passed.
1900 1905
1901 1906 hiddentype can be: 1) 'warn': warn while unhiding changesets
1902 1907 2) 'nowarn': don't warn while unhiding changesets
1903 1908
1904 1909 returns a repo object with the required changesets unhidden
1905 1910 """
1906 1911 if not repo.filtername or not repo.ui.configbool('experimental',
1907 1912 'directaccess'):
1908 1913 return repo
1909 1914
1910 1915 if repo.filtername not in ('visible', 'visible-hidden'):
1911 1916 return repo
1912 1917
1913 1918 symbols = set()
1914 1919 for spec in specs:
1915 1920 try:
1916 1921 tree = revsetlang.parse(spec)
1917 1922 except error.ParseError: # will be reported by scmutil.revrange()
1918 1923 continue
1919 1924
1920 1925 symbols.update(revsetlang.gethashlikesymbols(tree))
1921 1926
1922 1927 if not symbols:
1923 1928 return repo
1924 1929
1925 1930 revs = _getrevsfromsymbols(repo, symbols)
1926 1931
1927 1932 if not revs:
1928 1933 return repo
1929 1934
1930 1935 if hiddentype == 'warn':
1931 1936 unfi = repo.unfiltered()
1932 1937 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1933 1938 repo.ui.warn(_("warning: accessing hidden changesets for write "
1934 1939 "operation: %s\n") % revstr)
1935 1940
1936 1941 # we have to use new filtername to separate branch/tags cache until we can
1937 1942 # disbale these cache when revisions are dynamically pinned.
1938 1943 return repo.filtered('visible-hidden', revs)
1939 1944
1940 1945 def _getrevsfromsymbols(repo, symbols):
1941 1946 """parse the list of symbols and returns a set of revision numbers of hidden
1942 1947 changesets present in symbols"""
1943 1948 revs = set()
1944 1949 unfi = repo.unfiltered()
1945 1950 unficl = unfi.changelog
1946 1951 cl = repo.changelog
1947 1952 tiprev = len(unficl)
1948 1953 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1949 1954 for s in symbols:
1950 1955 try:
1951 1956 n = int(s)
1952 1957 if n <= tiprev:
1953 1958 if not allowrevnums:
1954 1959 continue
1955 1960 else:
1956 1961 if n not in cl:
1957 1962 revs.add(n)
1958 1963 continue
1959 1964 except ValueError:
1960 1965 pass
1961 1966
1962 1967 try:
1963 1968 s = resolvehexnodeidprefix(unfi, s)
1964 1969 except (error.LookupError, error.WdirUnsupported):
1965 1970 s = None
1966 1971
1967 1972 if s is not None:
1968 1973 rev = unficl.rev(s)
1969 1974 if rev not in cl:
1970 1975 revs.add(rev)
1971 1976
1972 1977 return revs
1973 1978
1974 1979 def bookmarkrevs(repo, mark):
1975 1980 """
1976 1981 Select revisions reachable by a given bookmark
1977 1982 """
1978 1983 return repo.revs("ancestors(bookmark(%s)) - "
1979 1984 "ancestors(head() and not bookmark(%s)) - "
1980 1985 "ancestors(bookmark() and not bookmark(%s))",
1981 1986 mark, mark, mark)
@@ -1,115 +1,120 b''
1 1 $ hg init repo
2 2 $ cd repo
3 3 $ for n in 0 1 2 3 4 5 6 7 8 9 10 11; do
4 4 > echo $n > $n
5 5 > hg ci -qAm $n
6 6 > done
7 7
8 8 test revset support
9 9
10 10 $ cat <<'EOF' >> .hg/hgrc
11 11 > [extdata]
12 12 > filedata = file:extdata.txt
13 13 > notes = notes.txt
14 14 > shelldata = shell:cat extdata.txt | grep 2
15 15 > emptygrep = shell:cat extdata.txt | grep empty
16 16 > badparse = shell:cat badparse.txt
17 17 > EOF
18 18 $ cat <<'EOF' > extdata.txt
19 19 > 2 another comment on 2
20 20 > 3
21 21 > EOF
22 22 $ cat <<'EOF' > notes.txt
23 23 > f6ed this change is great!
24 24 > e834 this is buggy :(
25 25 > 0625 first post
26 26 > bogusnode gives no error
27 27 > a ambiguous node gives no error
28 28 > EOF
29 29
30 30 $ hg log -qr "extdata(filedata)"
31 31 2:f6ed99a58333
32 32 3:9de260b1e88e
33 33 $ hg log -qr "extdata(shelldata)"
34 34 2:f6ed99a58333
35 35
36 36 test weight of extdata() revset
37 37
38 38 $ hg debugrevspec -p optimized "extdata(filedata) & 3"
39 39 * optimized:
40 40 (andsmally
41 41 (func
42 42 (symbol 'extdata')
43 43 (symbol 'filedata'))
44 44 (symbol '3'))
45 45 3
46 46
47 47 test non-zero exit of shell command
48 48
49 49 $ hg log -qr "extdata(emptygrep)"
50 50 abort: extdata command 'cat extdata.txt | grep empty' failed: exited with status 1
51 51 [255]
52 52
53 53 test bad extdata() revset source
54 54
55 55 $ hg log -qr "extdata()"
56 56 hg: parse error: extdata takes at least 1 string argument
57 57 [255]
58 58 $ hg log -qr "extdata(unknown)"
59 59 abort: unknown extdata source 'unknown'
60 60 [255]
61 61
62 62 test a zero-exiting source that emits garbage to confuse the revset parser
63 63
64 64 $ cat > badparse.txt <<'EOF'
65 65 > +---------------------------------------+
66 66 > 9de260b1e88e
67 67 > EOF
68 68
69 BUG: this should print the revset parse error
70 $ hg log -qr "extdata(badparse)" 2>&1 | grep ValueError
71 ValueError: Mixing iteration and read methods would lose data
69 It might be nice if this error message mentioned where the bad string
70 came from (eg line X of extdata source S), but the important thing is
71 that we don't crash before we can print the parse error.
72 $ hg log -qr "extdata(badparse)"
73 hg: parse error at 0: not a prefix: +
74 (+---------------------------------------+
75 ^ here)
76 [255]
72 77
73 78 test template support:
74 79
75 80 $ hg log -r:3 -T "{node|short}{if(extdata('notes'), ' # {extdata('notes')}')}\n"
76 81 06254b906311 # first post
77 82 e8342c9a2ed1 # this is buggy :(
78 83 f6ed99a58333 # this change is great!
79 84 9de260b1e88e
80 85
81 86 test template cache:
82 87
83 88 $ hg log -r:3 -T '{rev} "{extdata("notes")}" "{extdata("shelldata")}"\n'
84 89 0 "first post" ""
85 90 1 "this is buggy :(" ""
86 91 2 "this change is great!" "another comment on 2"
87 92 3 "" ""
88 93
89 94 test bad extdata() template source
90 95
91 96 $ hg log -T "{extdata()}\n"
92 97 hg: parse error: extdata expects one argument
93 98 [255]
94 99 $ hg log -T "{extdata('unknown')}\n"
95 100 abort: unknown extdata source 'unknown'
96 101 [255]
97 102 $ hg log -T "{extdata(unknown)}\n"
98 103 hg: parse error: empty data source specified
99 104 (did you mean extdata('unknown')?)
100 105 [255]
101 106 $ hg log -T "{extdata('{unknown}')}\n"
102 107 hg: parse error: empty data source specified
103 108 [255]
104 109
105 110 we don't fix up relative file URLs, but we do run shell commands in repo root
106 111
107 112 $ mkdir sub
108 113 $ cd sub
109 114 $ hg log -qr "extdata(filedata)"
110 115 abort: error: $ENOENT$
111 116 [255]
112 117 $ hg log -qr "extdata(shelldata)"
113 118 2:f6ed99a58333
114 119
115 120 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now