##// END OF EJS Templates
scmutil: display the optional hint when handling StorageError in catchall()...
Matt Harbison -
r40694:4ec8bee1 default
parent child Browse files
Show More
@@ -1,1800 +1,1802
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29
30 30 from . import (
31 31 encoding,
32 32 error,
33 33 match as matchmod,
34 34 obsolete,
35 35 obsutil,
36 36 pathutil,
37 37 phases,
38 38 policy,
39 39 pycompat,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 procutil,
50 50 stringutil,
51 51 )
52 52
53 53 if pycompat.iswindows:
54 54 from . import scmwindows as scmplatform
55 55 else:
56 56 from . import scmposix as scmplatform
57 57
58 58 parsers = policy.importmod(r'parsers')
59 59
60 60 termsize = scmplatform.termsize
61 61
62 62 class status(tuple):
63 63 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 64 and 'ignored' properties are only relevant to the working copy.
65 65 '''
66 66
67 67 __slots__ = ()
68 68
69 69 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 70 clean):
71 71 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 72 ignored, clean))
73 73
74 74 @property
75 75 def modified(self):
76 76 '''files that have been modified'''
77 77 return self[0]
78 78
79 79 @property
80 80 def added(self):
81 81 '''files that have been added'''
82 82 return self[1]
83 83
84 84 @property
85 85 def removed(self):
86 86 '''files that have been removed'''
87 87 return self[2]
88 88
89 89 @property
90 90 def deleted(self):
91 91 '''files that are in the dirstate, but have been deleted from the
92 92 working copy (aka "missing")
93 93 '''
94 94 return self[3]
95 95
96 96 @property
97 97 def unknown(self):
98 98 '''files not in the dirstate that are not ignored'''
99 99 return self[4]
100 100
101 101 @property
102 102 def ignored(self):
103 103 '''files not in the dirstate that are ignored (by _dirignore())'''
104 104 return self[5]
105 105
106 106 @property
107 107 def clean(self):
108 108 '''files that have not been modified'''
109 109 return self[6]
110 110
111 111 def __repr__(self, *args, **kwargs):
112 112 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 113 r'unknown=%s, ignored=%s, clean=%s>') %
114 114 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115 115
116 116 def itersubrepos(ctx1, ctx2):
117 117 """find subrepos in ctx1 or ctx2"""
118 118 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 119 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 120 # has been modified (in ctx2) but not yet committed (in ctx1).
121 121 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 122 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123 123
124 124 missing = set()
125 125
126 126 for subpath in ctx2.substate:
127 127 if subpath not in ctx1.substate:
128 128 del subpaths[subpath]
129 129 missing.add(subpath)
130 130
131 131 for subpath, ctx in sorted(subpaths.iteritems()):
132 132 yield subpath, ctx.sub(subpath)
133 133
134 134 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 135 # status and diff will have an accurate result when it does
136 136 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 137 # against itself.
138 138 for subpath in missing:
139 139 yield subpath, ctx2.nullsub(subpath, ctx1)
140 140
141 141 def nochangesfound(ui, repo, excluded=None):
142 142 '''Report no changes for push/pull, excluded is None or a list of
143 143 nodes excluded from the push/pull.
144 144 '''
145 145 secretlist = []
146 146 if excluded:
147 147 for n in excluded:
148 148 ctx = repo[n]
149 149 if ctx.phase() >= phases.secret and not ctx.extinct():
150 150 secretlist.append(n)
151 151
152 152 if secretlist:
153 153 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 154 % len(secretlist))
155 155 else:
156 156 ui.status(_("no changes found\n"))
157 157
158 158 def callcatch(ui, func):
159 159 """call func() with global exception handling
160 160
161 161 return func() if no exception happens. otherwise do some error handling
162 162 and return an exit code accordingly. does not handle all exceptions.
163 163 """
164 164 try:
165 165 try:
166 166 return func()
167 167 except: # re-raises
168 168 ui.traceback()
169 169 raise
170 170 # Global exception handling, alphabetically
171 171 # Mercurial-specific first, followed by built-in and library exceptions
172 172 except error.LockHeld as inst:
173 173 if inst.errno == errno.ETIMEDOUT:
174 174 reason = _('timed out waiting for lock held by %r') % (
175 175 pycompat.bytestr(inst.locker))
176 176 else:
177 177 reason = _('lock held by %r') % inst.locker
178 178 ui.error(_("abort: %s: %s\n") % (
179 179 inst.desc or stringutil.forcebytestr(inst.filename), reason))
180 180 if not inst.locker:
181 181 ui.error(_("(lock might be very busy)\n"))
182 182 except error.LockUnavailable as inst:
183 183 ui.error(_("abort: could not lock %s: %s\n") %
184 184 (inst.desc or stringutil.forcebytestr(inst.filename),
185 185 encoding.strtolocal(inst.strerror)))
186 186 except error.OutOfBandError as inst:
187 187 if inst.args:
188 188 msg = _("abort: remote error:\n")
189 189 else:
190 190 msg = _("abort: remote error\n")
191 191 ui.error(msg)
192 192 if inst.args:
193 193 ui.error(''.join(inst.args))
194 194 if inst.hint:
195 195 ui.error('(%s)\n' % inst.hint)
196 196 except error.RepoError as inst:
197 197 ui.error(_("abort: %s!\n") % inst)
198 198 if inst.hint:
199 199 ui.error(_("(%s)\n") % inst.hint)
200 200 except error.ResponseError as inst:
201 201 ui.error(_("abort: %s") % inst.args[0])
202 202 msg = inst.args[1]
203 203 if isinstance(msg, type(u'')):
204 204 msg = pycompat.sysbytes(msg)
205 205 if not isinstance(msg, bytes):
206 206 ui.error(" %r\n" % (msg,))
207 207 elif not msg:
208 208 ui.error(_(" empty string\n"))
209 209 else:
210 210 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
211 211 except error.CensoredNodeError as inst:
212 212 ui.error(_("abort: file censored %s!\n") % inst)
213 213 except error.StorageError as inst:
214 214 ui.error(_("abort: %s!\n") % inst)
215 if inst.hint:
216 ui.error(_("(%s)\n") % inst.hint)
215 217 except error.InterventionRequired as inst:
216 218 ui.error("%s\n" % inst)
217 219 if inst.hint:
218 220 ui.error(_("(%s)\n") % inst.hint)
219 221 return 1
220 222 except error.WdirUnsupported:
221 223 ui.error(_("abort: working directory revision cannot be specified\n"))
222 224 except error.Abort as inst:
223 225 ui.error(_("abort: %s\n") % inst)
224 226 if inst.hint:
225 227 ui.error(_("(%s)\n") % inst.hint)
226 228 except ImportError as inst:
227 229 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
228 230 m = stringutil.forcebytestr(inst).split()[-1]
229 231 if m in "mpatch bdiff".split():
230 232 ui.error(_("(did you forget to compile extensions?)\n"))
231 233 elif m in "zlib".split():
232 234 ui.error(_("(is your Python install correct?)\n"))
233 235 except IOError as inst:
234 236 if util.safehasattr(inst, "code"):
235 237 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
236 238 elif util.safehasattr(inst, "reason"):
237 239 try: # usually it is in the form (errno, strerror)
238 240 reason = inst.reason.args[1]
239 241 except (AttributeError, IndexError):
240 242 # it might be anything, for example a string
241 243 reason = inst.reason
242 244 if isinstance(reason, pycompat.unicode):
243 245 # SSLError of Python 2.7.9 contains a unicode
244 246 reason = encoding.unitolocal(reason)
245 247 ui.error(_("abort: error: %s\n") % reason)
246 248 elif (util.safehasattr(inst, "args")
247 249 and inst.args and inst.args[0] == errno.EPIPE):
248 250 pass
249 251 elif getattr(inst, "strerror", None):
250 252 if getattr(inst, "filename", None):
251 253 ui.error(_("abort: %s: %s\n") % (
252 254 encoding.strtolocal(inst.strerror),
253 255 stringutil.forcebytestr(inst.filename)))
254 256 else:
255 257 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
256 258 else:
257 259 raise
258 260 except OSError as inst:
259 261 if getattr(inst, "filename", None) is not None:
260 262 ui.error(_("abort: %s: '%s'\n") % (
261 263 encoding.strtolocal(inst.strerror),
262 264 stringutil.forcebytestr(inst.filename)))
263 265 else:
264 266 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
265 267 except MemoryError:
266 268 ui.error(_("abort: out of memory\n"))
267 269 except SystemExit as inst:
268 270 # Commands shouldn't sys.exit directly, but give a return code.
269 271 # Just in case catch this and and pass exit code to caller.
270 272 return inst.code
271 273 except socket.error as inst:
272 274 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
273 275
274 276 return -1
275 277
276 278 def checknewlabel(repo, lbl, kind):
277 279 # Do not use the "kind" parameter in ui output.
278 280 # It makes strings difficult to translate.
279 281 if lbl in ['tip', '.', 'null']:
280 282 raise error.Abort(_("the name '%s' is reserved") % lbl)
281 283 for c in (':', '\0', '\n', '\r'):
282 284 if c in lbl:
283 285 raise error.Abort(
284 286 _("%r cannot be used in a name") % pycompat.bytestr(c))
285 287 try:
286 288 int(lbl)
287 289 raise error.Abort(_("cannot use an integer as a name"))
288 290 except ValueError:
289 291 pass
290 292 if lbl.strip() != lbl:
291 293 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
292 294
293 295 def checkfilename(f):
294 296 '''Check that the filename f is an acceptable filename for a tracked file'''
295 297 if '\r' in f or '\n' in f:
296 298 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
297 299 % pycompat.bytestr(f))
298 300
299 301 def checkportable(ui, f):
300 302 '''Check if filename f is portable and warn or abort depending on config'''
301 303 checkfilename(f)
302 304 abort, warn = checkportabilityalert(ui)
303 305 if abort or warn:
304 306 msg = util.checkwinfilename(f)
305 307 if msg:
306 308 msg = "%s: %s" % (msg, procutil.shellquote(f))
307 309 if abort:
308 310 raise error.Abort(msg)
309 311 ui.warn(_("warning: %s\n") % msg)
310 312
311 313 def checkportabilityalert(ui):
312 314 '''check if the user's config requests nothing, a warning, or abort for
313 315 non-portable filenames'''
314 316 val = ui.config('ui', 'portablefilenames')
315 317 lval = val.lower()
316 318 bval = stringutil.parsebool(val)
317 319 abort = pycompat.iswindows or lval == 'abort'
318 320 warn = bval or lval == 'warn'
319 321 if bval is None and not (warn or abort or lval == 'ignore'):
320 322 raise error.ConfigError(
321 323 _("ui.portablefilenames value is invalid ('%s')") % val)
322 324 return abort, warn
323 325
324 326 class casecollisionauditor(object):
325 327 def __init__(self, ui, abort, dirstate):
326 328 self._ui = ui
327 329 self._abort = abort
328 330 allfiles = '\0'.join(dirstate._map)
329 331 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
330 332 self._dirstate = dirstate
331 333 # The purpose of _newfiles is so that we don't complain about
332 334 # case collisions if someone were to call this object with the
333 335 # same filename twice.
334 336 self._newfiles = set()
335 337
336 338 def __call__(self, f):
337 339 if f in self._newfiles:
338 340 return
339 341 fl = encoding.lower(f)
340 342 if fl in self._loweredfiles and f not in self._dirstate:
341 343 msg = _('possible case-folding collision for %s') % f
342 344 if self._abort:
343 345 raise error.Abort(msg)
344 346 self._ui.warn(_("warning: %s\n") % msg)
345 347 self._loweredfiles.add(fl)
346 348 self._newfiles.add(f)
347 349
348 350 def filteredhash(repo, maxrev):
349 351 """build hash of filtered revisions in the current repoview.
350 352
351 353 Multiple caches perform up-to-date validation by checking that the
352 354 tiprev and tipnode stored in the cache file match the current repository.
353 355 However, this is not sufficient for validating repoviews because the set
354 356 of revisions in the view may change without the repository tiprev and
355 357 tipnode changing.
356 358
357 359 This function hashes all the revs filtered from the view and returns
358 360 that SHA-1 digest.
359 361 """
360 362 cl = repo.changelog
361 363 if not cl.filteredrevs:
362 364 return None
363 365 key = None
364 366 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
365 367 if revs:
366 368 s = hashlib.sha1()
367 369 for rev in revs:
368 370 s.update('%d;' % rev)
369 371 key = s.digest()
370 372 return key
371 373
372 374 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
373 375 '''yield every hg repository under path, always recursively.
374 376 The recurse flag will only control recursion into repo working dirs'''
375 377 def errhandler(err):
376 378 if err.filename == path:
377 379 raise err
378 380 samestat = getattr(os.path, 'samestat', None)
379 381 if followsym and samestat is not None:
380 382 def adddir(dirlst, dirname):
381 383 dirstat = os.stat(dirname)
382 384 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
383 385 if not match:
384 386 dirlst.append(dirstat)
385 387 return not match
386 388 else:
387 389 followsym = False
388 390
389 391 if (seen_dirs is None) and followsym:
390 392 seen_dirs = []
391 393 adddir(seen_dirs, path)
392 394 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
393 395 dirs.sort()
394 396 if '.hg' in dirs:
395 397 yield root # found a repository
396 398 qroot = os.path.join(root, '.hg', 'patches')
397 399 if os.path.isdir(os.path.join(qroot, '.hg')):
398 400 yield qroot # we have a patch queue repo here
399 401 if recurse:
400 402 # avoid recursing inside the .hg directory
401 403 dirs.remove('.hg')
402 404 else:
403 405 dirs[:] = [] # don't descend further
404 406 elif followsym:
405 407 newdirs = []
406 408 for d in dirs:
407 409 fname = os.path.join(root, d)
408 410 if adddir(seen_dirs, fname):
409 411 if os.path.islink(fname):
410 412 for hgname in walkrepos(fname, True, seen_dirs):
411 413 yield hgname
412 414 else:
413 415 newdirs.append(d)
414 416 dirs[:] = newdirs
415 417
416 418 def binnode(ctx):
417 419 """Return binary node id for a given basectx"""
418 420 node = ctx.node()
419 421 if node is None:
420 422 return wdirid
421 423 return node
422 424
423 425 def intrev(ctx):
424 426 """Return integer for a given basectx that can be used in comparison or
425 427 arithmetic operation"""
426 428 rev = ctx.rev()
427 429 if rev is None:
428 430 return wdirrev
429 431 return rev
430 432
431 433 def formatchangeid(ctx):
432 434 """Format changectx as '{rev}:{node|formatnode}', which is the default
433 435 template provided by logcmdutil.changesettemplater"""
434 436 repo = ctx.repo()
435 437 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
436 438
437 439 def formatrevnode(ui, rev, node):
438 440 """Format given revision and node depending on the current verbosity"""
439 441 if ui.debugflag:
440 442 hexfunc = hex
441 443 else:
442 444 hexfunc = short
443 445 return '%d:%s' % (rev, hexfunc(node))
444 446
445 447 def resolvehexnodeidprefix(repo, prefix):
446 448 if (prefix.startswith('x') and
447 449 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
448 450 prefix = prefix[1:]
449 451 try:
450 452 # Uses unfiltered repo because it's faster when prefix is ambiguous/
451 453 # This matches the shortesthexnodeidprefix() function below.
452 454 node = repo.unfiltered().changelog._partialmatch(prefix)
453 455 except error.AmbiguousPrefixLookupError:
454 456 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
455 457 if revset:
456 458 # Clear config to avoid infinite recursion
457 459 configoverrides = {('experimental',
458 460 'revisions.disambiguatewithin'): None}
459 461 with repo.ui.configoverride(configoverrides):
460 462 revs = repo.anyrevs([revset], user=True)
461 463 matches = []
462 464 for rev in revs:
463 465 node = repo.changelog.node(rev)
464 466 if hex(node).startswith(prefix):
465 467 matches.append(node)
466 468 if len(matches) == 1:
467 469 return matches[0]
468 470 raise
469 471 if node is None:
470 472 return
471 473 repo.changelog.rev(node) # make sure node isn't filtered
472 474 return node
473 475
474 476 def mayberevnum(repo, prefix):
475 477 """Checks if the given prefix may be mistaken for a revision number"""
476 478 try:
477 479 i = int(prefix)
478 480 # if we are a pure int, then starting with zero will not be
479 481 # confused as a rev; or, obviously, if the int is larger
480 482 # than the value of the tip rev. We still need to disambiguate if
481 483 # prefix == '0', since that *is* a valid revnum.
482 484 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
483 485 return False
484 486 return True
485 487 except ValueError:
486 488 return False
487 489
488 490 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
489 491 """Find the shortest unambiguous prefix that matches hexnode.
490 492
491 493 If "cache" is not None, it must be a dictionary that can be used for
492 494 caching between calls to this method.
493 495 """
494 496 # _partialmatch() of filtered changelog could take O(len(repo)) time,
495 497 # which would be unacceptably slow. so we look for hash collision in
496 498 # unfiltered space, which means some hashes may be slightly longer.
497 499
498 500 minlength=max(minlength, 1)
499 501
500 502 def disambiguate(prefix):
501 503 """Disambiguate against revnums."""
502 504 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
503 505 if mayberevnum(repo, prefix):
504 506 return 'x' + prefix
505 507 else:
506 508 return prefix
507 509
508 510 hexnode = hex(node)
509 511 for length in range(len(prefix), len(hexnode) + 1):
510 512 prefix = hexnode[:length]
511 513 if not mayberevnum(repo, prefix):
512 514 return prefix
513 515
514 516 cl = repo.unfiltered().changelog
515 517 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
516 518 if revset:
517 519 revs = None
518 520 if cache is not None:
519 521 revs = cache.get('disambiguationrevset')
520 522 if revs is None:
521 523 revs = repo.anyrevs([revset], user=True)
522 524 if cache is not None:
523 525 cache['disambiguationrevset'] = revs
524 526 if cl.rev(node) in revs:
525 527 hexnode = hex(node)
526 528 nodetree = None
527 529 if cache is not None:
528 530 nodetree = cache.get('disambiguationnodetree')
529 531 if not nodetree:
530 532 try:
531 533 nodetree = parsers.nodetree(cl.index, len(revs))
532 534 except AttributeError:
533 535 # no native nodetree
534 536 pass
535 537 else:
536 538 for r in revs:
537 539 nodetree.insert(r)
538 540 if cache is not None:
539 541 cache['disambiguationnodetree'] = nodetree
540 542 if nodetree is not None:
541 543 length = max(nodetree.shortest(node), minlength)
542 544 prefix = hexnode[:length]
543 545 return disambiguate(prefix)
544 546 for length in range(minlength, len(hexnode) + 1):
545 547 matches = []
546 548 prefix = hexnode[:length]
547 549 for rev in revs:
548 550 otherhexnode = repo[rev].hex()
549 551 if prefix == otherhexnode[:length]:
550 552 matches.append(otherhexnode)
551 553 if len(matches) == 1:
552 554 return disambiguate(prefix)
553 555
554 556 try:
555 557 return disambiguate(cl.shortest(node, minlength))
556 558 except error.LookupError:
557 559 raise error.RepoLookupError()
558 560
559 561 def isrevsymbol(repo, symbol):
560 562 """Checks if a symbol exists in the repo.
561 563
562 564 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
563 565 symbol is an ambiguous nodeid prefix.
564 566 """
565 567 try:
566 568 revsymbol(repo, symbol)
567 569 return True
568 570 except error.RepoLookupError:
569 571 return False
570 572
571 573 def revsymbol(repo, symbol):
572 574 """Returns a context given a single revision symbol (as string).
573 575
574 576 This is similar to revsingle(), but accepts only a single revision symbol,
575 577 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
576 578 not "max(public())".
577 579 """
578 580 if not isinstance(symbol, bytes):
579 581 msg = ("symbol (%s of type %s) was not a string, did you mean "
580 582 "repo[symbol]?" % (symbol, type(symbol)))
581 583 raise error.ProgrammingError(msg)
582 584 try:
583 585 if symbol in ('.', 'tip', 'null'):
584 586 return repo[symbol]
585 587
586 588 try:
587 589 r = int(symbol)
588 590 if '%d' % r != symbol:
589 591 raise ValueError
590 592 l = len(repo.changelog)
591 593 if r < 0:
592 594 r += l
593 595 if r < 0 or r >= l and r != wdirrev:
594 596 raise ValueError
595 597 return repo[r]
596 598 except error.FilteredIndexError:
597 599 raise
598 600 except (ValueError, OverflowError, IndexError):
599 601 pass
600 602
601 603 if len(symbol) == 40:
602 604 try:
603 605 node = bin(symbol)
604 606 rev = repo.changelog.rev(node)
605 607 return repo[rev]
606 608 except error.FilteredLookupError:
607 609 raise
608 610 except (TypeError, LookupError):
609 611 pass
610 612
611 613 # look up bookmarks through the name interface
612 614 try:
613 615 node = repo.names.singlenode(repo, symbol)
614 616 rev = repo.changelog.rev(node)
615 617 return repo[rev]
616 618 except KeyError:
617 619 pass
618 620
619 621 node = resolvehexnodeidprefix(repo, symbol)
620 622 if node is not None:
621 623 rev = repo.changelog.rev(node)
622 624 return repo[rev]
623 625
624 626 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
625 627
626 628 except error.WdirUnsupported:
627 629 return repo[None]
628 630 except (error.FilteredIndexError, error.FilteredLookupError,
629 631 error.FilteredRepoLookupError):
630 632 raise _filterederror(repo, symbol)
631 633
632 634 def _filterederror(repo, changeid):
633 635 """build an exception to be raised about a filtered changeid
634 636
635 637 This is extracted in a function to help extensions (eg: evolve) to
636 638 experiment with various message variants."""
637 639 if repo.filtername.startswith('visible'):
638 640
639 641 # Check if the changeset is obsolete
640 642 unfilteredrepo = repo.unfiltered()
641 643 ctx = revsymbol(unfilteredrepo, changeid)
642 644
643 645 # If the changeset is obsolete, enrich the message with the reason
644 646 # that made this changeset not visible
645 647 if ctx.obsolete():
646 648 msg = obsutil._getfilteredreason(repo, changeid, ctx)
647 649 else:
648 650 msg = _("hidden revision '%s'") % changeid
649 651
650 652 hint = _('use --hidden to access hidden revisions')
651 653
652 654 return error.FilteredRepoLookupError(msg, hint=hint)
653 655 msg = _("filtered revision '%s' (not in '%s' subset)")
654 656 msg %= (changeid, repo.filtername)
655 657 return error.FilteredRepoLookupError(msg)
656 658
657 659 def revsingle(repo, revspec, default='.', localalias=None):
658 660 if not revspec and revspec != 0:
659 661 return repo[default]
660 662
661 663 l = revrange(repo, [revspec], localalias=localalias)
662 664 if not l:
663 665 raise error.Abort(_('empty revision set'))
664 666 return repo[l.last()]
665 667
666 668 def _pairspec(revspec):
667 669 tree = revsetlang.parse(revspec)
668 670 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
669 671
670 672 def revpair(repo, revs):
671 673 if not revs:
672 674 return repo['.'], repo[None]
673 675
674 676 l = revrange(repo, revs)
675 677
676 678 if not l:
677 679 first = second = None
678 680 elif l.isascending():
679 681 first = l.min()
680 682 second = l.max()
681 683 elif l.isdescending():
682 684 first = l.max()
683 685 second = l.min()
684 686 else:
685 687 first = l.first()
686 688 second = l.last()
687 689
688 690 if first is None:
689 691 raise error.Abort(_('empty revision range'))
690 692 if (first == second and len(revs) >= 2
691 693 and not all(revrange(repo, [r]) for r in revs)):
692 694 raise error.Abort(_('empty revision on one side of range'))
693 695
694 696 # if top-level is range expression, the result must always be a pair
695 697 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
696 698 return repo[first], repo[None]
697 699
698 700 return repo[first], repo[second]
699 701
700 702 def revrange(repo, specs, localalias=None):
701 703 """Execute 1 to many revsets and return the union.
702 704
703 705 This is the preferred mechanism for executing revsets using user-specified
704 706 config options, such as revset aliases.
705 707
706 708 The revsets specified by ``specs`` will be executed via a chained ``OR``
707 709 expression. If ``specs`` is empty, an empty result is returned.
708 710
709 711 ``specs`` can contain integers, in which case they are assumed to be
710 712 revision numbers.
711 713
712 714 It is assumed the revsets are already formatted. If you have arguments
713 715 that need to be expanded in the revset, call ``revsetlang.formatspec()``
714 716 and pass the result as an element of ``specs``.
715 717
716 718 Specifying a single revset is allowed.
717 719
718 720 Returns a ``revset.abstractsmartset`` which is a list-like interface over
719 721 integer revisions.
720 722 """
721 723 allspecs = []
722 724 for spec in specs:
723 725 if isinstance(spec, int):
724 726 spec = revsetlang.formatspec('rev(%d)', spec)
725 727 allspecs.append(spec)
726 728 return repo.anyrevs(allspecs, user=True, localalias=localalias)
727 729
728 730 def meaningfulparents(repo, ctx):
729 731 """Return list of meaningful (or all if debug) parentrevs for rev.
730 732
731 733 For merges (two non-nullrev revisions) both parents are meaningful.
732 734 Otherwise the first parent revision is considered meaningful if it
733 735 is not the preceding revision.
734 736 """
735 737 parents = ctx.parents()
736 738 if len(parents) > 1:
737 739 return parents
738 740 if repo.ui.debugflag:
739 741 return [parents[0], repo[nullrev]]
740 742 if parents[0].rev() >= intrev(ctx) - 1:
741 743 return []
742 744 return parents
743 745
744 746 def expandpats(pats):
745 747 '''Expand bare globs when running on windows.
746 748 On posix we assume it already has already been done by sh.'''
747 749 if not util.expandglobs:
748 750 return list(pats)
749 751 ret = []
750 752 for kindpat in pats:
751 753 kind, pat = matchmod._patsplit(kindpat, None)
752 754 if kind is None:
753 755 try:
754 756 globbed = glob.glob(pat)
755 757 except re.error:
756 758 globbed = [pat]
757 759 if globbed:
758 760 ret.extend(globbed)
759 761 continue
760 762 ret.append(kindpat)
761 763 return ret
762 764
763 765 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
764 766 badfn=None):
765 767 '''Return a matcher and the patterns that were used.
766 768 The matcher will warn about bad matches, unless an alternate badfn callback
767 769 is provided.'''
768 770 if pats == ("",):
769 771 pats = []
770 772 if opts is None:
771 773 opts = {}
772 774 if not globbed and default == 'relpath':
773 775 pats = expandpats(pats or [])
774 776
775 777 def bad(f, msg):
776 778 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
777 779
778 780 if badfn is None:
779 781 badfn = bad
780 782
781 783 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
782 784 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
783 785
784 786 if m.always():
785 787 pats = []
786 788 return m, pats
787 789
788 790 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
789 791 badfn=None):
790 792 '''Return a matcher that will warn about bad matches.'''
791 793 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
792 794
793 795 def matchall(repo):
794 796 '''Return a matcher that will efficiently match everything.'''
795 797 return matchmod.always(repo.root, repo.getcwd())
796 798
797 799 def matchfiles(repo, files, badfn=None):
798 800 '''Return a matcher that will efficiently match exactly these files.'''
799 801 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
800 802
801 803 def parsefollowlinespattern(repo, rev, pat, msg):
802 804 """Return a file name from `pat` pattern suitable for usage in followlines
803 805 logic.
804 806 """
805 807 if not matchmod.patkind(pat):
806 808 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
807 809 else:
808 810 ctx = repo[rev]
809 811 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
810 812 files = [f for f in ctx if m(f)]
811 813 if len(files) != 1:
812 814 raise error.ParseError(msg)
813 815 return files[0]
814 816
815 817 def origpath(ui, repo, filepath):
816 818 '''customize where .orig files are created
817 819
818 820 Fetch user defined path from config file: [ui] origbackuppath = <path>
819 821 Fall back to default (filepath with .orig suffix) if not specified
820 822 '''
821 823 origbackuppath = ui.config('ui', 'origbackuppath')
822 824 if not origbackuppath:
823 825 return filepath + ".orig"
824 826
825 827 # Convert filepath from an absolute path into a path inside the repo.
826 828 filepathfromroot = util.normpath(os.path.relpath(filepath,
827 829 start=repo.root))
828 830
829 831 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
830 832 origbackupdir = origvfs.dirname(filepathfromroot)
831 833 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
832 834 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
833 835
834 836 # Remove any files that conflict with the backup file's path
835 837 for f in reversed(list(util.finddirs(filepathfromroot))):
836 838 if origvfs.isfileorlink(f):
837 839 ui.note(_('removing conflicting file: %s\n')
838 840 % origvfs.join(f))
839 841 origvfs.unlink(f)
840 842 break
841 843
842 844 origvfs.makedirs(origbackupdir)
843 845
844 846 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
845 847 ui.note(_('removing conflicting directory: %s\n')
846 848 % origvfs.join(filepathfromroot))
847 849 origvfs.rmtree(filepathfromroot, forcibly=True)
848 850
849 851 return origvfs.join(filepathfromroot)
850 852
851 853 class _containsnode(object):
852 854 """proxy __contains__(node) to container.__contains__ which accepts revs"""
853 855
854 856 def __init__(self, repo, revcontainer):
855 857 self._torev = repo.changelog.rev
856 858 self._revcontains = revcontainer.__contains__
857 859
858 860 def __contains__(self, node):
859 861 return self._revcontains(self._torev(node))
860 862
861 863 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
862 864 fixphase=False, targetphase=None, backup=True):
863 865 """do common cleanups when old nodes are replaced by new nodes
864 866
865 867 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
866 868 (we might also want to move working directory parent in the future)
867 869
868 870 By default, bookmark moves are calculated automatically from 'replacements',
869 871 but 'moves' can be used to override that. Also, 'moves' may include
870 872 additional bookmark moves that should not have associated obsmarkers.
871 873
872 874 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
873 875 have replacements. operation is a string, like "rebase".
874 876
875 877 metadata is dictionary containing metadata to be stored in obsmarker if
876 878 obsolescence is enabled.
877 879 """
878 880 assert fixphase or targetphase is None
879 881 if not replacements and not moves:
880 882 return
881 883
882 884 # translate mapping's other forms
883 885 if not util.safehasattr(replacements, 'items'):
884 886 replacements = {(n,): () for n in replacements}
885 887 else:
886 888 # upgrading non tuple "source" to tuple ones for BC
887 889 repls = {}
888 890 for key, value in replacements.items():
889 891 if not isinstance(key, tuple):
890 892 key = (key,)
891 893 repls[key] = value
892 894 replacements = repls
893 895
894 896 # Calculate bookmark movements
895 897 if moves is None:
896 898 moves = {}
897 899 # Unfiltered repo is needed since nodes in replacements might be hidden.
898 900 unfi = repo.unfiltered()
899 901 for oldnodes, newnodes in replacements.items():
900 902 for oldnode in oldnodes:
901 903 if oldnode in moves:
902 904 continue
903 905 if len(newnodes) > 1:
904 906 # usually a split, take the one with biggest rev number
905 907 newnode = next(unfi.set('max(%ln)', newnodes)).node()
906 908 elif len(newnodes) == 0:
907 909 # move bookmark backwards
908 910 allreplaced = []
909 911 for rep in replacements:
910 912 allreplaced.extend(rep)
911 913 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
912 914 allreplaced))
913 915 if roots:
914 916 newnode = roots[0].node()
915 917 else:
916 918 newnode = nullid
917 919 else:
918 920 newnode = newnodes[0]
919 921 moves[oldnode] = newnode
920 922
921 923 allnewnodes = [n for ns in replacements.values() for n in ns]
922 924 toretract = {}
923 925 toadvance = {}
924 926 if fixphase:
925 927 precursors = {}
926 928 for oldnodes, newnodes in replacements.items():
927 929 for oldnode in oldnodes:
928 930 for newnode in newnodes:
929 931 precursors.setdefault(newnode, []).append(oldnode)
930 932
931 933 allnewnodes.sort(key=lambda n: unfi[n].rev())
932 934 newphases = {}
933 935 def phase(ctx):
934 936 return newphases.get(ctx.node(), ctx.phase())
935 937 for newnode in allnewnodes:
936 938 ctx = unfi[newnode]
937 939 parentphase = max(phase(p) for p in ctx.parents())
938 940 if targetphase is None:
939 941 oldphase = max(unfi[oldnode].phase()
940 942 for oldnode in precursors[newnode])
941 943 newphase = max(oldphase, parentphase)
942 944 else:
943 945 newphase = max(targetphase, parentphase)
944 946 newphases[newnode] = newphase
945 947 if newphase > ctx.phase():
946 948 toretract.setdefault(newphase, []).append(newnode)
947 949 elif newphase < ctx.phase():
948 950 toadvance.setdefault(newphase, []).append(newnode)
949 951
950 952 with repo.transaction('cleanup') as tr:
951 953 # Move bookmarks
952 954 bmarks = repo._bookmarks
953 955 bmarkchanges = []
954 956 for oldnode, newnode in moves.items():
955 957 oldbmarks = repo.nodebookmarks(oldnode)
956 958 if not oldbmarks:
957 959 continue
958 960 from . import bookmarks # avoid import cycle
959 961 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
960 962 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
961 963 hex(oldnode), hex(newnode)))
962 964 # Delete divergent bookmarks being parents of related newnodes
963 965 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
964 966 allnewnodes, newnode, oldnode)
965 967 deletenodes = _containsnode(repo, deleterevs)
966 968 for name in oldbmarks:
967 969 bmarkchanges.append((name, newnode))
968 970 for b in bookmarks.divergent2delete(repo, deletenodes, name):
969 971 bmarkchanges.append((b, None))
970 972
971 973 if bmarkchanges:
972 974 bmarks.applychanges(repo, tr, bmarkchanges)
973 975
974 976 for phase, nodes in toretract.items():
975 977 phases.retractboundary(repo, tr, phase, nodes)
976 978 for phase, nodes in toadvance.items():
977 979 phases.advanceboundary(repo, tr, phase, nodes)
978 980
979 981 # Obsolete or strip nodes
980 982 if obsolete.isenabled(repo, obsolete.createmarkersopt):
981 983 # If a node is already obsoleted, and we want to obsolete it
982 984 # without a successor, skip that obssolete request since it's
983 985 # unnecessary. That's the "if s or not isobs(n)" check below.
984 986 # Also sort the node in topology order, that might be useful for
985 987 # some obsstore logic.
986 988 # NOTE: the sorting might belong to createmarkers.
987 989 torev = unfi.changelog.rev
988 990 sortfunc = lambda ns: torev(ns[0][0])
989 991 rels = []
990 992 for ns, s in sorted(replacements.items(), key=sortfunc):
991 993 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
992 994 rels.append(rel)
993 995 if rels:
994 996 obsolete.createmarkers(repo, rels, operation=operation,
995 997 metadata=metadata)
996 998 else:
997 999 from . import repair # avoid import cycle
998 1000 tostrip = list(n for ns in replacements for n in ns)
999 1001 if tostrip:
1000 1002 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1001 1003 backup=backup)
1002 1004
1003 1005 def addremove(repo, matcher, prefix, opts=None):
1004 1006 if opts is None:
1005 1007 opts = {}
1006 1008 m = matcher
1007 1009 dry_run = opts.get('dry_run')
1008 1010 try:
1009 1011 similarity = float(opts.get('similarity') or 0)
1010 1012 except ValueError:
1011 1013 raise error.Abort(_('similarity must be a number'))
1012 1014 if similarity < 0 or similarity > 100:
1013 1015 raise error.Abort(_('similarity must be between 0 and 100'))
1014 1016 similarity /= 100.0
1015 1017
1016 1018 ret = 0
1017 1019 join = lambda f: os.path.join(prefix, f)
1018 1020
1019 1021 wctx = repo[None]
1020 1022 for subpath in sorted(wctx.substate):
1021 1023 submatch = matchmod.subdirmatcher(subpath, m)
1022 1024 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1023 1025 sub = wctx.sub(subpath)
1024 1026 try:
1025 1027 if sub.addremove(submatch, prefix, opts):
1026 1028 ret = 1
1027 1029 except error.LookupError:
1028 1030 repo.ui.status(_("skipping missing subrepository: %s\n")
1029 1031 % join(subpath))
1030 1032
1031 1033 rejected = []
1032 1034 def badfn(f, msg):
1033 1035 if f in m.files():
1034 1036 m.bad(f, msg)
1035 1037 rejected.append(f)
1036 1038
1037 1039 badmatch = matchmod.badmatch(m, badfn)
1038 1040 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1039 1041 badmatch)
1040 1042
1041 1043 unknownset = set(unknown + forgotten)
1042 1044 toprint = unknownset.copy()
1043 1045 toprint.update(deleted)
1044 1046 for abs in sorted(toprint):
1045 1047 if repo.ui.verbose or not m.exact(abs):
1046 1048 if abs in unknownset:
1047 1049 status = _('adding %s\n') % m.uipath(abs)
1048 1050 label = 'ui.addremove.added'
1049 1051 else:
1050 1052 status = _('removing %s\n') % m.uipath(abs)
1051 1053 label = 'ui.addremove.removed'
1052 1054 repo.ui.status(status, label=label)
1053 1055
1054 1056 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1055 1057 similarity)
1056 1058
1057 1059 if not dry_run:
1058 1060 _markchanges(repo, unknown + forgotten, deleted, renames)
1059 1061
1060 1062 for f in rejected:
1061 1063 if f in m.files():
1062 1064 return 1
1063 1065 return ret
1064 1066
1065 1067 def marktouched(repo, files, similarity=0.0):
1066 1068 '''Assert that files have somehow been operated upon. files are relative to
1067 1069 the repo root.'''
1068 1070 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1069 1071 rejected = []
1070 1072
1071 1073 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1072 1074
1073 1075 if repo.ui.verbose:
1074 1076 unknownset = set(unknown + forgotten)
1075 1077 toprint = unknownset.copy()
1076 1078 toprint.update(deleted)
1077 1079 for abs in sorted(toprint):
1078 1080 if abs in unknownset:
1079 1081 status = _('adding %s\n') % abs
1080 1082 else:
1081 1083 status = _('removing %s\n') % abs
1082 1084 repo.ui.status(status)
1083 1085
1084 1086 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1085 1087 similarity)
1086 1088
1087 1089 _markchanges(repo, unknown + forgotten, deleted, renames)
1088 1090
1089 1091 for f in rejected:
1090 1092 if f in m.files():
1091 1093 return 1
1092 1094 return 0
1093 1095
1094 1096 def _interestingfiles(repo, matcher):
1095 1097 '''Walk dirstate with matcher, looking for files that addremove would care
1096 1098 about.
1097 1099
1098 1100 This is different from dirstate.status because it doesn't care about
1099 1101 whether files are modified or clean.'''
1100 1102 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1101 1103 audit_path = pathutil.pathauditor(repo.root, cached=True)
1102 1104
1103 1105 ctx = repo[None]
1104 1106 dirstate = repo.dirstate
1105 1107 matcher = repo.narrowmatch(matcher, includeexact=True)
1106 1108 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1107 1109 unknown=True, ignored=False, full=False)
1108 1110 for abs, st in walkresults.iteritems():
1109 1111 dstate = dirstate[abs]
1110 1112 if dstate == '?' and audit_path.check(abs):
1111 1113 unknown.append(abs)
1112 1114 elif dstate != 'r' and not st:
1113 1115 deleted.append(abs)
1114 1116 elif dstate == 'r' and st:
1115 1117 forgotten.append(abs)
1116 1118 # for finding renames
1117 1119 elif dstate == 'r' and not st:
1118 1120 removed.append(abs)
1119 1121 elif dstate == 'a':
1120 1122 added.append(abs)
1121 1123
1122 1124 return added, unknown, deleted, removed, forgotten
1123 1125
1124 1126 def _findrenames(repo, matcher, added, removed, similarity):
1125 1127 '''Find renames from removed files to added ones.'''
1126 1128 renames = {}
1127 1129 if similarity > 0:
1128 1130 for old, new, score in similar.findrenames(repo, added, removed,
1129 1131 similarity):
1130 1132 if (repo.ui.verbose or not matcher.exact(old)
1131 1133 or not matcher.exact(new)):
1132 1134 repo.ui.status(_('recording removal of %s as rename to %s '
1133 1135 '(%d%% similar)\n') %
1134 1136 (matcher.rel(old), matcher.rel(new),
1135 1137 score * 100))
1136 1138 renames[new] = old
1137 1139 return renames
1138 1140
1139 1141 def _markchanges(repo, unknown, deleted, renames):
1140 1142 '''Marks the files in unknown as added, the files in deleted as removed,
1141 1143 and the files in renames as copied.'''
1142 1144 wctx = repo[None]
1143 1145 with repo.wlock():
1144 1146 wctx.forget(deleted)
1145 1147 wctx.add(unknown)
1146 1148 for new, old in renames.iteritems():
1147 1149 wctx.copy(old, new)
1148 1150
1149 1151 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1150 1152 """Update the dirstate to reflect the intent of copying src to dst. For
1151 1153 different reasons it might not end with dst being marked as copied from src.
1152 1154 """
1153 1155 origsrc = repo.dirstate.copied(src) or src
1154 1156 if dst == origsrc: # copying back a copy?
1155 1157 if repo.dirstate[dst] not in 'mn' and not dryrun:
1156 1158 repo.dirstate.normallookup(dst)
1157 1159 else:
1158 1160 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1159 1161 if not ui.quiet:
1160 1162 ui.warn(_("%s has not been committed yet, so no copy "
1161 1163 "data will be stored for %s.\n")
1162 1164 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1163 1165 if repo.dirstate[dst] in '?r' and not dryrun:
1164 1166 wctx.add([dst])
1165 1167 elif not dryrun:
1166 1168 wctx.copy(origsrc, dst)
1167 1169
1168 1170 def writerequires(opener, requirements):
1169 1171 with opener('requires', 'w', atomictemp=True) as fp:
1170 1172 for r in sorted(requirements):
1171 1173 fp.write("%s\n" % r)
1172 1174
1173 1175 class filecachesubentry(object):
1174 1176 def __init__(self, path, stat):
1175 1177 self.path = path
1176 1178 self.cachestat = None
1177 1179 self._cacheable = None
1178 1180
1179 1181 if stat:
1180 1182 self.cachestat = filecachesubentry.stat(self.path)
1181 1183
1182 1184 if self.cachestat:
1183 1185 self._cacheable = self.cachestat.cacheable()
1184 1186 else:
1185 1187 # None means we don't know yet
1186 1188 self._cacheable = None
1187 1189
1188 1190 def refresh(self):
1189 1191 if self.cacheable():
1190 1192 self.cachestat = filecachesubentry.stat(self.path)
1191 1193
1192 1194 def cacheable(self):
1193 1195 if self._cacheable is not None:
1194 1196 return self._cacheable
1195 1197
1196 1198 # we don't know yet, assume it is for now
1197 1199 return True
1198 1200
1199 1201 def changed(self):
1200 1202 # no point in going further if we can't cache it
1201 1203 if not self.cacheable():
1202 1204 return True
1203 1205
1204 1206 newstat = filecachesubentry.stat(self.path)
1205 1207
1206 1208 # we may not know if it's cacheable yet, check again now
1207 1209 if newstat and self._cacheable is None:
1208 1210 self._cacheable = newstat.cacheable()
1209 1211
1210 1212 # check again
1211 1213 if not self._cacheable:
1212 1214 return True
1213 1215
1214 1216 if self.cachestat != newstat:
1215 1217 self.cachestat = newstat
1216 1218 return True
1217 1219 else:
1218 1220 return False
1219 1221
1220 1222 @staticmethod
1221 1223 def stat(path):
1222 1224 try:
1223 1225 return util.cachestat(path)
1224 1226 except OSError as e:
1225 1227 if e.errno != errno.ENOENT:
1226 1228 raise
1227 1229
1228 1230 class filecacheentry(object):
1229 1231 def __init__(self, paths, stat=True):
1230 1232 self._entries = []
1231 1233 for path in paths:
1232 1234 self._entries.append(filecachesubentry(path, stat))
1233 1235
1234 1236 def changed(self):
1235 1237 '''true if any entry has changed'''
1236 1238 for entry in self._entries:
1237 1239 if entry.changed():
1238 1240 return True
1239 1241 return False
1240 1242
1241 1243 def refresh(self):
1242 1244 for entry in self._entries:
1243 1245 entry.refresh()
1244 1246
1245 1247 class filecache(object):
1246 1248 """A property like decorator that tracks files under .hg/ for updates.
1247 1249
1248 1250 On first access, the files defined as arguments are stat()ed and the
1249 1251 results cached. The decorated function is called. The results are stashed
1250 1252 away in a ``_filecache`` dict on the object whose method is decorated.
1251 1253
1252 1254 On subsequent access, the cached result is used as it is set to the
1253 1255 instance dictionary.
1254 1256
1255 1257 On external property set/delete operations, the caller must update the
1256 1258 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1257 1259 instead of directly setting <attr>.
1258 1260
1259 1261 When using the property API, the cached data is always used if available.
1260 1262 No stat() is performed to check if the file has changed.
1261 1263
1262 1264 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1263 1265 can populate an entry before the property's getter is called. In this case,
1264 1266 entries in ``_filecache`` will be used during property operations,
1265 1267 if available. If the underlying file changes, it is up to external callers
1266 1268 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1267 1269 method result as well as possibly calling ``del obj._filecache[attr]`` to
1268 1270 remove the ``filecacheentry``.
1269 1271 """
1270 1272
1271 1273 def __init__(self, *paths):
1272 1274 self.paths = paths
1273 1275
1274 1276 def join(self, obj, fname):
1275 1277 """Used to compute the runtime path of a cached file.
1276 1278
1277 1279 Users should subclass filecache and provide their own version of this
1278 1280 function to call the appropriate join function on 'obj' (an instance
1279 1281 of the class that its member function was decorated).
1280 1282 """
1281 1283 raise NotImplementedError
1282 1284
1283 1285 def __call__(self, func):
1284 1286 self.func = func
1285 1287 self.sname = func.__name__
1286 1288 self.name = pycompat.sysbytes(self.sname)
1287 1289 return self
1288 1290
1289 1291 def __get__(self, obj, type=None):
1290 1292 # if accessed on the class, return the descriptor itself.
1291 1293 if obj is None:
1292 1294 return self
1293 1295
1294 1296 assert self.sname not in obj.__dict__
1295 1297
1296 1298 entry = obj._filecache.get(self.name)
1297 1299
1298 1300 if entry:
1299 1301 if entry.changed():
1300 1302 entry.obj = self.func(obj)
1301 1303 else:
1302 1304 paths = [self.join(obj, path) for path in self.paths]
1303 1305
1304 1306 # We stat -before- creating the object so our cache doesn't lie if
1305 1307 # a writer modified between the time we read and stat
1306 1308 entry = filecacheentry(paths, True)
1307 1309 entry.obj = self.func(obj)
1308 1310
1309 1311 obj._filecache[self.name] = entry
1310 1312
1311 1313 obj.__dict__[self.sname] = entry.obj
1312 1314 return entry.obj
1313 1315
1314 1316 # don't implement __set__(), which would make __dict__ lookup as slow as
1315 1317 # function call.
1316 1318
1317 1319 def set(self, obj, value):
1318 1320 if self.name not in obj._filecache:
1319 1321 # we add an entry for the missing value because X in __dict__
1320 1322 # implies X in _filecache
1321 1323 paths = [self.join(obj, path) for path in self.paths]
1322 1324 ce = filecacheentry(paths, False)
1323 1325 obj._filecache[self.name] = ce
1324 1326 else:
1325 1327 ce = obj._filecache[self.name]
1326 1328
1327 1329 ce.obj = value # update cached copy
1328 1330 obj.__dict__[self.sname] = value # update copy returned by obj.x
1329 1331
1330 1332 def extdatasource(repo, source):
1331 1333 """Gather a map of rev -> value dict from the specified source
1332 1334
1333 1335 A source spec is treated as a URL, with a special case shell: type
1334 1336 for parsing the output from a shell command.
1335 1337
1336 1338 The data is parsed as a series of newline-separated records where
1337 1339 each record is a revision specifier optionally followed by a space
1338 1340 and a freeform string value. If the revision is known locally, it
1339 1341 is converted to a rev, otherwise the record is skipped.
1340 1342
1341 1343 Note that both key and value are treated as UTF-8 and converted to
1342 1344 the local encoding. This allows uniformity between local and
1343 1345 remote data sources.
1344 1346 """
1345 1347
1346 1348 spec = repo.ui.config("extdata", source)
1347 1349 if not spec:
1348 1350 raise error.Abort(_("unknown extdata source '%s'") % source)
1349 1351
1350 1352 data = {}
1351 1353 src = proc = None
1352 1354 try:
1353 1355 if spec.startswith("shell:"):
1354 1356 # external commands should be run relative to the repo root
1355 1357 cmd = spec[6:]
1356 1358 proc = subprocess.Popen(procutil.tonativestr(cmd),
1357 1359 shell=True, bufsize=-1,
1358 1360 close_fds=procutil.closefds,
1359 1361 stdout=subprocess.PIPE,
1360 1362 cwd=procutil.tonativestr(repo.root))
1361 1363 src = proc.stdout
1362 1364 else:
1363 1365 # treat as a URL or file
1364 1366 src = url.open(repo.ui, spec)
1365 1367 for l in src:
1366 1368 if " " in l:
1367 1369 k, v = l.strip().split(" ", 1)
1368 1370 else:
1369 1371 k, v = l.strip(), ""
1370 1372
1371 1373 k = encoding.tolocal(k)
1372 1374 try:
1373 1375 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1374 1376 except (error.LookupError, error.RepoLookupError):
1375 1377 pass # we ignore data for nodes that don't exist locally
1376 1378 finally:
1377 1379 if proc:
1378 1380 proc.communicate()
1379 1381 if src:
1380 1382 src.close()
1381 1383 if proc and proc.returncode != 0:
1382 1384 raise error.Abort(_("extdata command '%s' failed: %s")
1383 1385 % (cmd, procutil.explainexit(proc.returncode)))
1384 1386
1385 1387 return data
1386 1388
1387 1389 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1388 1390 if lock is None:
1389 1391 raise error.LockInheritanceContractViolation(
1390 1392 'lock can only be inherited while held')
1391 1393 if environ is None:
1392 1394 environ = {}
1393 1395 with lock.inherit() as locker:
1394 1396 environ[envvar] = locker
1395 1397 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1396 1398
1397 1399 def wlocksub(repo, cmd, *args, **kwargs):
1398 1400 """run cmd as a subprocess that allows inheriting repo's wlock
1399 1401
1400 1402 This can only be called while the wlock is held. This takes all the
1401 1403 arguments that ui.system does, and returns the exit code of the
1402 1404 subprocess."""
1403 1405 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1404 1406 **kwargs)
1405 1407
1406 1408 class progress(object):
1407 1409 def __init__(self, ui, topic, unit="", total=None):
1408 1410 self.ui = ui
1409 1411 self.pos = 0
1410 1412 self.topic = topic
1411 1413 self.unit = unit
1412 1414 self.total = total
1413 1415
1414 1416 def __enter__(self):
1415 1417 return self
1416 1418
1417 1419 def __exit__(self, exc_type, exc_value, exc_tb):
1418 1420 self.complete()
1419 1421
1420 1422 def update(self, pos, item="", total=None):
1421 1423 assert pos is not None
1422 1424 if total:
1423 1425 self.total = total
1424 1426 self.pos = pos
1425 1427 self._print(item)
1426 1428
1427 1429 def increment(self, step=1, item="", total=None):
1428 1430 self.update(self.pos + step, item, total)
1429 1431
1430 1432 def complete(self):
1431 1433 self.ui.progress(self.topic, None)
1432 1434
1433 1435 def _print(self, item):
1434 1436 self.ui.progress(self.topic, self.pos, item, self.unit,
1435 1437 self.total)
1436 1438
1437 1439 def gdinitconfig(ui):
1438 1440 """helper function to know if a repo should be created as general delta
1439 1441 """
1440 1442 # experimental config: format.generaldelta
1441 1443 return (ui.configbool('format', 'generaldelta')
1442 1444 or ui.configbool('format', 'usegeneraldelta')
1443 1445 or ui.configbool('format', 'sparse-revlog'))
1444 1446
1445 1447 def gddeltaconfig(ui):
1446 1448 """helper function to know if incoming delta should be optimised
1447 1449 """
1448 1450 # experimental config: format.generaldelta
1449 1451 return ui.configbool('format', 'generaldelta')
1450 1452
1451 1453 class simplekeyvaluefile(object):
1452 1454 """A simple file with key=value lines
1453 1455
1454 1456 Keys must be alphanumerics and start with a letter, values must not
1455 1457 contain '\n' characters"""
1456 1458 firstlinekey = '__firstline'
1457 1459
1458 1460 def __init__(self, vfs, path, keys=None):
1459 1461 self.vfs = vfs
1460 1462 self.path = path
1461 1463
1462 1464 def read(self, firstlinenonkeyval=False):
1463 1465 """Read the contents of a simple key-value file
1464 1466
1465 1467 'firstlinenonkeyval' indicates whether the first line of file should
1466 1468 be treated as a key-value pair or reuturned fully under the
1467 1469 __firstline key."""
1468 1470 lines = self.vfs.readlines(self.path)
1469 1471 d = {}
1470 1472 if firstlinenonkeyval:
1471 1473 if not lines:
1472 1474 e = _("empty simplekeyvalue file")
1473 1475 raise error.CorruptedState(e)
1474 1476 # we don't want to include '\n' in the __firstline
1475 1477 d[self.firstlinekey] = lines[0][:-1]
1476 1478 del lines[0]
1477 1479
1478 1480 try:
1479 1481 # the 'if line.strip()' part prevents us from failing on empty
1480 1482 # lines which only contain '\n' therefore are not skipped
1481 1483 # by 'if line'
1482 1484 updatedict = dict(line[:-1].split('=', 1) for line in lines
1483 1485 if line.strip())
1484 1486 if self.firstlinekey in updatedict:
1485 1487 e = _("%r can't be used as a key")
1486 1488 raise error.CorruptedState(e % self.firstlinekey)
1487 1489 d.update(updatedict)
1488 1490 except ValueError as e:
1489 1491 raise error.CorruptedState(str(e))
1490 1492 return d
1491 1493
1492 1494 def write(self, data, firstline=None):
1493 1495 """Write key=>value mapping to a file
1494 1496 data is a dict. Keys must be alphanumerical and start with a letter.
1495 1497 Values must not contain newline characters.
1496 1498
1497 1499 If 'firstline' is not None, it is written to file before
1498 1500 everything else, as it is, not in a key=value form"""
1499 1501 lines = []
1500 1502 if firstline is not None:
1501 1503 lines.append('%s\n' % firstline)
1502 1504
1503 1505 for k, v in data.items():
1504 1506 if k == self.firstlinekey:
1505 1507 e = "key name '%s' is reserved" % self.firstlinekey
1506 1508 raise error.ProgrammingError(e)
1507 1509 if not k[0:1].isalpha():
1508 1510 e = "keys must start with a letter in a key-value file"
1509 1511 raise error.ProgrammingError(e)
1510 1512 if not k.isalnum():
1511 1513 e = "invalid key name in a simple key-value file"
1512 1514 raise error.ProgrammingError(e)
1513 1515 if '\n' in v:
1514 1516 e = "invalid value in a simple key-value file"
1515 1517 raise error.ProgrammingError(e)
1516 1518 lines.append("%s=%s\n" % (k, v))
1517 1519 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1518 1520 fp.write(''.join(lines))
1519 1521
1520 1522 _reportobsoletedsource = [
1521 1523 'debugobsolete',
1522 1524 'pull',
1523 1525 'push',
1524 1526 'serve',
1525 1527 'unbundle',
1526 1528 ]
1527 1529
1528 1530 _reportnewcssource = [
1529 1531 'pull',
1530 1532 'unbundle',
1531 1533 ]
1532 1534
1533 1535 def prefetchfiles(repo, revs, match):
1534 1536 """Invokes the registered file prefetch functions, allowing extensions to
1535 1537 ensure the corresponding files are available locally, before the command
1536 1538 uses them."""
1537 1539 if match:
1538 1540 # The command itself will complain about files that don't exist, so
1539 1541 # don't duplicate the message.
1540 1542 match = matchmod.badmatch(match, lambda fn, msg: None)
1541 1543 else:
1542 1544 match = matchall(repo)
1543 1545
1544 1546 fileprefetchhooks(repo, revs, match)
1545 1547
1546 1548 # a list of (repo, revs, match) prefetch functions
1547 1549 fileprefetchhooks = util.hooks()
1548 1550
1549 1551 # A marker that tells the evolve extension to suppress its own reporting
1550 1552 _reportstroubledchangesets = True
1551 1553
1552 1554 def registersummarycallback(repo, otr, txnname=''):
1553 1555 """register a callback to issue a summary after the transaction is closed
1554 1556 """
1555 1557 def txmatch(sources):
1556 1558 return any(txnname.startswith(source) for source in sources)
1557 1559
1558 1560 categories = []
1559 1561
1560 1562 def reportsummary(func):
1561 1563 """decorator for report callbacks."""
1562 1564 # The repoview life cycle is shorter than the one of the actual
1563 1565 # underlying repository. So the filtered object can die before the
1564 1566 # weakref is used leading to troubles. We keep a reference to the
1565 1567 # unfiltered object and restore the filtering when retrieving the
1566 1568 # repository through the weakref.
1567 1569 filtername = repo.filtername
1568 1570 reporef = weakref.ref(repo.unfiltered())
1569 1571 def wrapped(tr):
1570 1572 repo = reporef()
1571 1573 if filtername:
1572 1574 repo = repo.filtered(filtername)
1573 1575 func(repo, tr)
1574 1576 newcat = '%02i-txnreport' % len(categories)
1575 1577 otr.addpostclose(newcat, wrapped)
1576 1578 categories.append(newcat)
1577 1579 return wrapped
1578 1580
1579 1581 if txmatch(_reportobsoletedsource):
1580 1582 @reportsummary
1581 1583 def reportobsoleted(repo, tr):
1582 1584 obsoleted = obsutil.getobsoleted(repo, tr)
1583 1585 if obsoleted:
1584 1586 repo.ui.status(_('obsoleted %i changesets\n')
1585 1587 % len(obsoleted))
1586 1588
1587 1589 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1588 1590 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1589 1591 instabilitytypes = [
1590 1592 ('orphan', 'orphan'),
1591 1593 ('phase-divergent', 'phasedivergent'),
1592 1594 ('content-divergent', 'contentdivergent'),
1593 1595 ]
1594 1596
1595 1597 def getinstabilitycounts(repo):
1596 1598 filtered = repo.changelog.filteredrevs
1597 1599 counts = {}
1598 1600 for instability, revset in instabilitytypes:
1599 1601 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1600 1602 filtered)
1601 1603 return counts
1602 1604
1603 1605 oldinstabilitycounts = getinstabilitycounts(repo)
1604 1606 @reportsummary
1605 1607 def reportnewinstabilities(repo, tr):
1606 1608 newinstabilitycounts = getinstabilitycounts(repo)
1607 1609 for instability, revset in instabilitytypes:
1608 1610 delta = (newinstabilitycounts[instability] -
1609 1611 oldinstabilitycounts[instability])
1610 1612 msg = getinstabilitymessage(delta, instability)
1611 1613 if msg:
1612 1614 repo.ui.warn(msg)
1613 1615
1614 1616 if txmatch(_reportnewcssource):
1615 1617 @reportsummary
1616 1618 def reportnewcs(repo, tr):
1617 1619 """Report the range of new revisions pulled/unbundled."""
1618 1620 origrepolen = tr.changes.get('origrepolen', len(repo))
1619 1621 unfi = repo.unfiltered()
1620 1622 if origrepolen >= len(unfi):
1621 1623 return
1622 1624
1623 1625 # Compute the bounds of new visible revisions' range.
1624 1626 revs = smartset.spanset(repo, start=origrepolen)
1625 1627 if revs:
1626 1628 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1627 1629
1628 1630 if minrev == maxrev:
1629 1631 revrange = minrev
1630 1632 else:
1631 1633 revrange = '%s:%s' % (minrev, maxrev)
1632 1634 draft = len(repo.revs('%ld and draft()', revs))
1633 1635 secret = len(repo.revs('%ld and secret()', revs))
1634 1636 if not (draft or secret):
1635 1637 msg = _('new changesets %s\n') % revrange
1636 1638 elif draft and secret:
1637 1639 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1638 1640 msg %= (revrange, draft, secret)
1639 1641 elif draft:
1640 1642 msg = _('new changesets %s (%d drafts)\n')
1641 1643 msg %= (revrange, draft)
1642 1644 elif secret:
1643 1645 msg = _('new changesets %s (%d secrets)\n')
1644 1646 msg %= (revrange, secret)
1645 1647 else:
1646 1648 errormsg = 'entered unreachable condition'
1647 1649 raise error.ProgrammingError(errormsg)
1648 1650 repo.ui.status(msg)
1649 1651
1650 1652 # search new changesets directly pulled as obsolete
1651 1653 duplicates = tr.changes.get('revduplicates', ())
1652 1654 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1653 1655 origrepolen, duplicates)
1654 1656 cl = repo.changelog
1655 1657 extinctadded = [r for r in obsadded if r not in cl]
1656 1658 if extinctadded:
1657 1659 # They are not just obsolete, but obsolete and invisible
1658 1660 # we call them "extinct" internally but the terms have not been
1659 1661 # exposed to users.
1660 1662 msg = '(%d other changesets obsolete on arrival)\n'
1661 1663 repo.ui.status(msg % len(extinctadded))
1662 1664
1663 1665 @reportsummary
1664 1666 def reportphasechanges(repo, tr):
1665 1667 """Report statistics of phase changes for changesets pre-existing
1666 1668 pull/unbundle.
1667 1669 """
1668 1670 origrepolen = tr.changes.get('origrepolen', len(repo))
1669 1671 phasetracking = tr.changes.get('phases', {})
1670 1672 if not phasetracking:
1671 1673 return
1672 1674 published = [
1673 1675 rev for rev, (old, new) in phasetracking.iteritems()
1674 1676 if new == phases.public and rev < origrepolen
1675 1677 ]
1676 1678 if not published:
1677 1679 return
1678 1680 repo.ui.status(_('%d local changesets published\n')
1679 1681 % len(published))
1680 1682
1681 1683 def getinstabilitymessage(delta, instability):
1682 1684 """function to return the message to show warning about new instabilities
1683 1685
1684 1686 exists as a separate function so that extension can wrap to show more
1685 1687 information like how to fix instabilities"""
1686 1688 if delta > 0:
1687 1689 return _('%i new %s changesets\n') % (delta, instability)
1688 1690
1689 1691 def nodesummaries(repo, nodes, maxnumnodes=4):
1690 1692 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1691 1693 return ' '.join(short(h) for h in nodes)
1692 1694 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1693 1695 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1694 1696
1695 1697 def enforcesinglehead(repo, tr, desc):
1696 1698 """check that no named branch has multiple heads"""
1697 1699 if desc in ('strip', 'repair'):
1698 1700 # skip the logic during strip
1699 1701 return
1700 1702 visible = repo.filtered('visible')
1701 1703 # possible improvement: we could restrict the check to affected branch
1702 1704 for name, heads in visible.branchmap().iteritems():
1703 1705 if len(heads) > 1:
1704 1706 msg = _('rejecting multiple heads on branch "%s"')
1705 1707 msg %= name
1706 1708 hint = _('%d heads: %s')
1707 1709 hint %= (len(heads), nodesummaries(repo, heads))
1708 1710 raise error.Abort(msg, hint=hint)
1709 1711
1710 1712 def wrapconvertsink(sink):
1711 1713 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1712 1714 before it is used, whether or not the convert extension was formally loaded.
1713 1715 """
1714 1716 return sink
1715 1717
1716 1718 def unhidehashlikerevs(repo, specs, hiddentype):
1717 1719 """parse the user specs and unhide changesets whose hash or revision number
1718 1720 is passed.
1719 1721
1720 1722 hiddentype can be: 1) 'warn': warn while unhiding changesets
1721 1723 2) 'nowarn': don't warn while unhiding changesets
1722 1724
1723 1725 returns a repo object with the required changesets unhidden
1724 1726 """
1725 1727 if not repo.filtername or not repo.ui.configbool('experimental',
1726 1728 'directaccess'):
1727 1729 return repo
1728 1730
1729 1731 if repo.filtername not in ('visible', 'visible-hidden'):
1730 1732 return repo
1731 1733
1732 1734 symbols = set()
1733 1735 for spec in specs:
1734 1736 try:
1735 1737 tree = revsetlang.parse(spec)
1736 1738 except error.ParseError: # will be reported by scmutil.revrange()
1737 1739 continue
1738 1740
1739 1741 symbols.update(revsetlang.gethashlikesymbols(tree))
1740 1742
1741 1743 if not symbols:
1742 1744 return repo
1743 1745
1744 1746 revs = _getrevsfromsymbols(repo, symbols)
1745 1747
1746 1748 if not revs:
1747 1749 return repo
1748 1750
1749 1751 if hiddentype == 'warn':
1750 1752 unfi = repo.unfiltered()
1751 1753 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1752 1754 repo.ui.warn(_("warning: accessing hidden changesets for write "
1753 1755 "operation: %s\n") % revstr)
1754 1756
1755 1757 # we have to use new filtername to separate branch/tags cache until we can
1756 1758 # disbale these cache when revisions are dynamically pinned.
1757 1759 return repo.filtered('visible-hidden', revs)
1758 1760
1759 1761 def _getrevsfromsymbols(repo, symbols):
1760 1762 """parse the list of symbols and returns a set of revision numbers of hidden
1761 1763 changesets present in symbols"""
1762 1764 revs = set()
1763 1765 unfi = repo.unfiltered()
1764 1766 unficl = unfi.changelog
1765 1767 cl = repo.changelog
1766 1768 tiprev = len(unficl)
1767 1769 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1768 1770 for s in symbols:
1769 1771 try:
1770 1772 n = int(s)
1771 1773 if n <= tiprev:
1772 1774 if not allowrevnums:
1773 1775 continue
1774 1776 else:
1775 1777 if n not in cl:
1776 1778 revs.add(n)
1777 1779 continue
1778 1780 except ValueError:
1779 1781 pass
1780 1782
1781 1783 try:
1782 1784 s = resolvehexnodeidprefix(unfi, s)
1783 1785 except (error.LookupError, error.WdirUnsupported):
1784 1786 s = None
1785 1787
1786 1788 if s is not None:
1787 1789 rev = unficl.rev(s)
1788 1790 if rev not in cl:
1789 1791 revs.add(rev)
1790 1792
1791 1793 return revs
1792 1794
1793 1795 def bookmarkrevs(repo, mark):
1794 1796 """
1795 1797 Select revisions reachable by a given bookmark
1796 1798 """
1797 1799 return repo.revs("ancestors(bookmark(%s)) - "
1798 1800 "ancestors(head() and not bookmark(%s)) - "
1799 1801 "ancestors(bookmark() and not bookmark(%s))",
1800 1802 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now