##// END OF EJS Templates
scmutil: wrap locker information in bytestr before repr()ing it...
Augie Fackler -
r40203:c554dc0c default
parent child Browse files
Show More
@@ -1,1802 +1,1803 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29
30 30 from . import (
31 31 encoding,
32 32 error,
33 33 match as matchmod,
34 34 obsolete,
35 35 obsutil,
36 36 pathutil,
37 37 phases,
38 38 policy,
39 39 pycompat,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 procutil,
50 50 stringutil,
51 51 )
52 52
53 53 if pycompat.iswindows:
54 54 from . import scmwindows as scmplatform
55 55 else:
56 56 from . import scmposix as scmplatform
57 57
58 58 parsers = policy.importmod(r'parsers')
59 59
60 60 termsize = scmplatform.termsize
61 61
62 62 class status(tuple):
63 63 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 64 and 'ignored' properties are only relevant to the working copy.
65 65 '''
66 66
67 67 __slots__ = ()
68 68
69 69 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 70 clean):
71 71 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 72 ignored, clean))
73 73
74 74 @property
75 75 def modified(self):
76 76 '''files that have been modified'''
77 77 return self[0]
78 78
79 79 @property
80 80 def added(self):
81 81 '''files that have been added'''
82 82 return self[1]
83 83
84 84 @property
85 85 def removed(self):
86 86 '''files that have been removed'''
87 87 return self[2]
88 88
89 89 @property
90 90 def deleted(self):
91 91 '''files that are in the dirstate, but have been deleted from the
92 92 working copy (aka "missing")
93 93 '''
94 94 return self[3]
95 95
96 96 @property
97 97 def unknown(self):
98 98 '''files not in the dirstate that are not ignored'''
99 99 return self[4]
100 100
101 101 @property
102 102 def ignored(self):
103 103 '''files not in the dirstate that are ignored (by _dirignore())'''
104 104 return self[5]
105 105
106 106 @property
107 107 def clean(self):
108 108 '''files that have not been modified'''
109 109 return self[6]
110 110
111 111 def __repr__(self, *args, **kwargs):
112 112 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 113 r'unknown=%s, ignored=%s, clean=%s>') %
114 114 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115 115
116 116 def itersubrepos(ctx1, ctx2):
117 117 """find subrepos in ctx1 or ctx2"""
118 118 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 119 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 120 # has been modified (in ctx2) but not yet committed (in ctx1).
121 121 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 122 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123 123
124 124 missing = set()
125 125
126 126 for subpath in ctx2.substate:
127 127 if subpath not in ctx1.substate:
128 128 del subpaths[subpath]
129 129 missing.add(subpath)
130 130
131 131 for subpath, ctx in sorted(subpaths.iteritems()):
132 132 yield subpath, ctx.sub(subpath)
133 133
134 134 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 135 # status and diff will have an accurate result when it does
136 136 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 137 # against itself.
138 138 for subpath in missing:
139 139 yield subpath, ctx2.nullsub(subpath, ctx1)
140 140
141 141 def nochangesfound(ui, repo, excluded=None):
142 142 '''Report no changes for push/pull, excluded is None or a list of
143 143 nodes excluded from the push/pull.
144 144 '''
145 145 secretlist = []
146 146 if excluded:
147 147 for n in excluded:
148 148 ctx = repo[n]
149 149 if ctx.phase() >= phases.secret and not ctx.extinct():
150 150 secretlist.append(n)
151 151
152 152 if secretlist:
153 153 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 154 % len(secretlist))
155 155 else:
156 156 ui.status(_("no changes found\n"))
157 157
158 158 def callcatch(ui, func):
159 159 """call func() with global exception handling
160 160
161 161 return func() if no exception happens. otherwise do some error handling
162 162 and return an exit code accordingly. does not handle all exceptions.
163 163 """
164 164 try:
165 165 try:
166 166 return func()
167 167 except: # re-raises
168 168 ui.traceback()
169 169 raise
170 170 # Global exception handling, alphabetically
171 171 # Mercurial-specific first, followed by built-in and library exceptions
172 172 except error.LockHeld as inst:
173 173 if inst.errno == errno.ETIMEDOUT:
174 reason = _('timed out waiting for lock held by %r') % inst.locker
174 reason = _('timed out waiting for lock held by %r') % (
175 pycompat.bytestr(inst.locker))
175 176 else:
176 177 reason = _('lock held by %r') % inst.locker
177 178 ui.error(_("abort: %s: %s\n") % (
178 179 inst.desc or stringutil.forcebytestr(inst.filename), reason))
179 180 if not inst.locker:
180 181 ui.error(_("(lock might be very busy)\n"))
181 182 except error.LockUnavailable as inst:
182 183 ui.error(_("abort: could not lock %s: %s\n") %
183 184 (inst.desc or stringutil.forcebytestr(inst.filename),
184 185 encoding.strtolocal(inst.strerror)))
185 186 except error.OutOfBandError as inst:
186 187 if inst.args:
187 188 msg = _("abort: remote error:\n")
188 189 else:
189 190 msg = _("abort: remote error\n")
190 191 ui.error(msg)
191 192 if inst.args:
192 193 ui.error(''.join(inst.args))
193 194 if inst.hint:
194 195 ui.error('(%s)\n' % inst.hint)
195 196 except error.RepoError as inst:
196 197 ui.error(_("abort: %s!\n") % inst)
197 198 if inst.hint:
198 199 ui.error(_("(%s)\n") % inst.hint)
199 200 except error.ResponseError as inst:
200 201 ui.error(_("abort: %s") % inst.args[0])
201 202 msg = inst.args[1]
202 203 if isinstance(msg, type(u'')):
203 204 msg = pycompat.sysbytes(msg)
204 205 if not isinstance(msg, bytes):
205 206 ui.error(" %r\n" % (msg,))
206 207 elif not msg:
207 208 ui.error(_(" empty string\n"))
208 209 else:
209 210 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
210 211 except error.CensoredNodeError as inst:
211 212 ui.error(_("abort: file censored %s!\n") % inst)
212 213 except error.StorageError as inst:
213 214 ui.error(_("abort: %s!\n") % inst)
214 215 except error.InterventionRequired as inst:
215 216 ui.error("%s\n" % inst)
216 217 if inst.hint:
217 218 ui.error(_("(%s)\n") % inst.hint)
218 219 return 1
219 220 except error.WdirUnsupported:
220 221 ui.error(_("abort: working directory revision cannot be specified\n"))
221 222 except error.Abort as inst:
222 223 ui.error(_("abort: %s\n") % inst)
223 224 if inst.hint:
224 225 ui.error(_("(%s)\n") % inst.hint)
225 226 except ImportError as inst:
226 227 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
227 228 m = stringutil.forcebytestr(inst).split()[-1]
228 229 if m in "mpatch bdiff".split():
229 230 ui.error(_("(did you forget to compile extensions?)\n"))
230 231 elif m in "zlib".split():
231 232 ui.error(_("(is your Python install correct?)\n"))
232 233 except IOError as inst:
233 234 if util.safehasattr(inst, "code"):
234 235 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
235 236 elif util.safehasattr(inst, "reason"):
236 237 try: # usually it is in the form (errno, strerror)
237 238 reason = inst.reason.args[1]
238 239 except (AttributeError, IndexError):
239 240 # it might be anything, for example a string
240 241 reason = inst.reason
241 242 if isinstance(reason, pycompat.unicode):
242 243 # SSLError of Python 2.7.9 contains a unicode
243 244 reason = encoding.unitolocal(reason)
244 245 ui.error(_("abort: error: %s\n") % reason)
245 246 elif (util.safehasattr(inst, "args")
246 247 and inst.args and inst.args[0] == errno.EPIPE):
247 248 pass
248 249 elif getattr(inst, "strerror", None):
249 250 if getattr(inst, "filename", None):
250 251 ui.error(_("abort: %s: %s\n") % (
251 252 encoding.strtolocal(inst.strerror),
252 253 stringutil.forcebytestr(inst.filename)))
253 254 else:
254 255 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
255 256 else:
256 257 raise
257 258 except OSError as inst:
258 259 if getattr(inst, "filename", None) is not None:
259 260 ui.error(_("abort: %s: '%s'\n") % (
260 261 encoding.strtolocal(inst.strerror),
261 262 stringutil.forcebytestr(inst.filename)))
262 263 else:
263 264 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
264 265 except MemoryError:
265 266 ui.error(_("abort: out of memory\n"))
266 267 except SystemExit as inst:
267 268 # Commands shouldn't sys.exit directly, but give a return code.
268 269 # Just in case catch this and and pass exit code to caller.
269 270 return inst.code
270 271 except socket.error as inst:
271 272 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
272 273
273 274 return -1
274 275
275 276 def checknewlabel(repo, lbl, kind):
276 277 # Do not use the "kind" parameter in ui output.
277 278 # It makes strings difficult to translate.
278 279 if lbl in ['tip', '.', 'null']:
279 280 raise error.Abort(_("the name '%s' is reserved") % lbl)
280 281 for c in (':', '\0', '\n', '\r'):
281 282 if c in lbl:
282 283 raise error.Abort(
283 284 _("%r cannot be used in a name") % pycompat.bytestr(c))
284 285 try:
285 286 int(lbl)
286 287 raise error.Abort(_("cannot use an integer as a name"))
287 288 except ValueError:
288 289 pass
289 290 if lbl.strip() != lbl:
290 291 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
291 292
292 293 def checkfilename(f):
293 294 '''Check that the filename f is an acceptable filename for a tracked file'''
294 295 if '\r' in f or '\n' in f:
295 296 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
296 297 % pycompat.bytestr(f))
297 298
298 299 def checkportable(ui, f):
299 300 '''Check if filename f is portable and warn or abort depending on config'''
300 301 checkfilename(f)
301 302 abort, warn = checkportabilityalert(ui)
302 303 if abort or warn:
303 304 msg = util.checkwinfilename(f)
304 305 if msg:
305 306 msg = "%s: %s" % (msg, procutil.shellquote(f))
306 307 if abort:
307 308 raise error.Abort(msg)
308 309 ui.warn(_("warning: %s\n") % msg)
309 310
310 311 def checkportabilityalert(ui):
311 312 '''check if the user's config requests nothing, a warning, or abort for
312 313 non-portable filenames'''
313 314 val = ui.config('ui', 'portablefilenames')
314 315 lval = val.lower()
315 316 bval = stringutil.parsebool(val)
316 317 abort = pycompat.iswindows or lval == 'abort'
317 318 warn = bval or lval == 'warn'
318 319 if bval is None and not (warn or abort or lval == 'ignore'):
319 320 raise error.ConfigError(
320 321 _("ui.portablefilenames value is invalid ('%s')") % val)
321 322 return abort, warn
322 323
323 324 class casecollisionauditor(object):
324 325 def __init__(self, ui, abort, dirstate):
325 326 self._ui = ui
326 327 self._abort = abort
327 328 allfiles = '\0'.join(dirstate._map)
328 329 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
329 330 self._dirstate = dirstate
330 331 # The purpose of _newfiles is so that we don't complain about
331 332 # case collisions if someone were to call this object with the
332 333 # same filename twice.
333 334 self._newfiles = set()
334 335
335 336 def __call__(self, f):
336 337 if f in self._newfiles:
337 338 return
338 339 fl = encoding.lower(f)
339 340 if fl in self._loweredfiles and f not in self._dirstate:
340 341 msg = _('possible case-folding collision for %s') % f
341 342 if self._abort:
342 343 raise error.Abort(msg)
343 344 self._ui.warn(_("warning: %s\n") % msg)
344 345 self._loweredfiles.add(fl)
345 346 self._newfiles.add(f)
346 347
347 348 def filteredhash(repo, maxrev):
348 349 """build hash of filtered revisions in the current repoview.
349 350
350 351 Multiple caches perform up-to-date validation by checking that the
351 352 tiprev and tipnode stored in the cache file match the current repository.
352 353 However, this is not sufficient for validating repoviews because the set
353 354 of revisions in the view may change without the repository tiprev and
354 355 tipnode changing.
355 356
356 357 This function hashes all the revs filtered from the view and returns
357 358 that SHA-1 digest.
358 359 """
359 360 cl = repo.changelog
360 361 if not cl.filteredrevs:
361 362 return None
362 363 key = None
363 364 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
364 365 if revs:
365 366 s = hashlib.sha1()
366 367 for rev in revs:
367 368 s.update('%d;' % rev)
368 369 key = s.digest()
369 370 return key
370 371
371 372 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
372 373 '''yield every hg repository under path, always recursively.
373 374 The recurse flag will only control recursion into repo working dirs'''
374 375 def errhandler(err):
375 376 if err.filename == path:
376 377 raise err
377 378 samestat = getattr(os.path, 'samestat', None)
378 379 if followsym and samestat is not None:
379 380 def adddir(dirlst, dirname):
380 381 dirstat = os.stat(dirname)
381 382 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
382 383 if not match:
383 384 dirlst.append(dirstat)
384 385 return not match
385 386 else:
386 387 followsym = False
387 388
388 389 if (seen_dirs is None) and followsym:
389 390 seen_dirs = []
390 391 adddir(seen_dirs, path)
391 392 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
392 393 dirs.sort()
393 394 if '.hg' in dirs:
394 395 yield root # found a repository
395 396 qroot = os.path.join(root, '.hg', 'patches')
396 397 if os.path.isdir(os.path.join(qroot, '.hg')):
397 398 yield qroot # we have a patch queue repo here
398 399 if recurse:
399 400 # avoid recursing inside the .hg directory
400 401 dirs.remove('.hg')
401 402 else:
402 403 dirs[:] = [] # don't descend further
403 404 elif followsym:
404 405 newdirs = []
405 406 for d in dirs:
406 407 fname = os.path.join(root, d)
407 408 if adddir(seen_dirs, fname):
408 409 if os.path.islink(fname):
409 410 for hgname in walkrepos(fname, True, seen_dirs):
410 411 yield hgname
411 412 else:
412 413 newdirs.append(d)
413 414 dirs[:] = newdirs
414 415
415 416 def binnode(ctx):
416 417 """Return binary node id for a given basectx"""
417 418 node = ctx.node()
418 419 if node is None:
419 420 return wdirid
420 421 return node
421 422
422 423 def intrev(ctx):
423 424 """Return integer for a given basectx that can be used in comparison or
424 425 arithmetic operation"""
425 426 rev = ctx.rev()
426 427 if rev is None:
427 428 return wdirrev
428 429 return rev
429 430
430 431 def formatchangeid(ctx):
431 432 """Format changectx as '{rev}:{node|formatnode}', which is the default
432 433 template provided by logcmdutil.changesettemplater"""
433 434 repo = ctx.repo()
434 435 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
435 436
436 437 def formatrevnode(ui, rev, node):
437 438 """Format given revision and node depending on the current verbosity"""
438 439 if ui.debugflag:
439 440 hexfunc = hex
440 441 else:
441 442 hexfunc = short
442 443 return '%d:%s' % (rev, hexfunc(node))
443 444
444 445 def resolvehexnodeidprefix(repo, prefix):
445 446 if (prefix.startswith('x') and
446 447 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
447 448 prefix = prefix[1:]
448 449 try:
449 450 # Uses unfiltered repo because it's faster when prefix is ambiguous/
450 451 # This matches the shortesthexnodeidprefix() function below.
451 452 node = repo.unfiltered().changelog._partialmatch(prefix)
452 453 except error.AmbiguousPrefixLookupError:
453 454 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
454 455 if revset:
455 456 # Clear config to avoid infinite recursion
456 457 configoverrides = {('experimental',
457 458 'revisions.disambiguatewithin'): None}
458 459 with repo.ui.configoverride(configoverrides):
459 460 revs = repo.anyrevs([revset], user=True)
460 461 matches = []
461 462 for rev in revs:
462 463 node = repo.changelog.node(rev)
463 464 if hex(node).startswith(prefix):
464 465 matches.append(node)
465 466 if len(matches) == 1:
466 467 return matches[0]
467 468 raise
468 469 if node is None:
469 470 return
470 471 repo.changelog.rev(node) # make sure node isn't filtered
471 472 return node
472 473
473 474 def mayberevnum(repo, prefix):
474 475 """Checks if the given prefix may be mistaken for a revision number"""
475 476 try:
476 477 i = int(prefix)
477 478 # if we are a pure int, then starting with zero will not be
478 479 # confused as a rev; or, obviously, if the int is larger
479 480 # than the value of the tip rev
480 481 if prefix[0:1] == b'0' or i >= len(repo):
481 482 return False
482 483 return True
483 484 except ValueError:
484 485 return False
485 486
486 487 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
487 488 """Find the shortest unambiguous prefix that matches hexnode.
488 489
489 490 If "cache" is not None, it must be a dictionary that can be used for
490 491 caching between calls to this method.
491 492 """
492 493 # _partialmatch() of filtered changelog could take O(len(repo)) time,
493 494 # which would be unacceptably slow. so we look for hash collision in
494 495 # unfiltered space, which means some hashes may be slightly longer.
495 496
496 497 def disambiguate(prefix):
497 498 """Disambiguate against revnums."""
498 499 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
499 500 if mayberevnum(repo, prefix):
500 501 return 'x' + prefix
501 502 else:
502 503 return prefix
503 504
504 505 hexnode = hex(node)
505 506 for length in range(len(prefix), len(hexnode) + 1):
506 507 prefix = hexnode[:length]
507 508 if not mayberevnum(repo, prefix):
508 509 return prefix
509 510
510 511 cl = repo.unfiltered().changelog
511 512 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
512 513 if revset:
513 514 revs = None
514 515 if cache is not None:
515 516 revs = cache.get('disambiguationrevset')
516 517 if revs is None:
517 518 revs = repo.anyrevs([revset], user=True)
518 519 if cache is not None:
519 520 cache['disambiguationrevset'] = revs
520 521 if cl.rev(node) in revs:
521 522 hexnode = hex(node)
522 523 nodetree = None
523 524 if cache is not None:
524 525 nodetree = cache.get('disambiguationnodetree')
525 526 if not nodetree:
526 527 try:
527 528 nodetree = parsers.nodetree(cl.index, len(revs))
528 529 except AttributeError:
529 530 # no native nodetree
530 531 pass
531 532 else:
532 533 for r in revs:
533 534 nodetree.insert(r)
534 535 if cache is not None:
535 536 cache['disambiguationnodetree'] = nodetree
536 537 if nodetree is not None:
537 538 length = max(nodetree.shortest(node), minlength)
538 539 prefix = hexnode[:length]
539 540 return disambiguate(prefix)
540 541 for length in range(minlength, len(hexnode) + 1):
541 542 matches = []
542 543 prefix = hexnode[:length]
543 544 for rev in revs:
544 545 otherhexnode = repo[rev].hex()
545 546 if prefix == otherhexnode[:length]:
546 547 matches.append(otherhexnode)
547 548 if len(matches) == 1:
548 549 return disambiguate(prefix)
549 550
550 551 try:
551 552 return disambiguate(cl.shortest(node, minlength))
552 553 except error.LookupError:
553 554 raise error.RepoLookupError()
554 555
555 556 def isrevsymbol(repo, symbol):
556 557 """Checks if a symbol exists in the repo.
557 558
558 559 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
559 560 symbol is an ambiguous nodeid prefix.
560 561 """
561 562 try:
562 563 revsymbol(repo, symbol)
563 564 return True
564 565 except error.RepoLookupError:
565 566 return False
566 567
567 568 def revsymbol(repo, symbol):
568 569 """Returns a context given a single revision symbol (as string).
569 570
570 571 This is similar to revsingle(), but accepts only a single revision symbol,
571 572 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
572 573 not "max(public())".
573 574 """
574 575 if not isinstance(symbol, bytes):
575 576 msg = ("symbol (%s of type %s) was not a string, did you mean "
576 577 "repo[symbol]?" % (symbol, type(symbol)))
577 578 raise error.ProgrammingError(msg)
578 579 try:
579 580 if symbol in ('.', 'tip', 'null'):
580 581 return repo[symbol]
581 582
582 583 try:
583 584 r = int(symbol)
584 585 if '%d' % r != symbol:
585 586 raise ValueError
586 587 l = len(repo.changelog)
587 588 if r < 0:
588 589 r += l
589 590 if r < 0 or r >= l and r != wdirrev:
590 591 raise ValueError
591 592 return repo[r]
592 593 except error.FilteredIndexError:
593 594 raise
594 595 except (ValueError, OverflowError, IndexError):
595 596 pass
596 597
597 598 if len(symbol) == 40:
598 599 try:
599 600 node = bin(symbol)
600 601 rev = repo.changelog.rev(node)
601 602 return repo[rev]
602 603 except error.FilteredLookupError:
603 604 raise
604 605 except (TypeError, LookupError):
605 606 pass
606 607
607 608 # look up bookmarks through the name interface
608 609 try:
609 610 node = repo.names.singlenode(repo, symbol)
610 611 rev = repo.changelog.rev(node)
611 612 return repo[rev]
612 613 except KeyError:
613 614 pass
614 615
615 616 node = resolvehexnodeidprefix(repo, symbol)
616 617 if node is not None:
617 618 rev = repo.changelog.rev(node)
618 619 return repo[rev]
619 620
620 621 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
621 622
622 623 except error.WdirUnsupported:
623 624 return repo[None]
624 625 except (error.FilteredIndexError, error.FilteredLookupError,
625 626 error.FilteredRepoLookupError):
626 627 raise _filterederror(repo, symbol)
627 628
628 629 def _filterederror(repo, changeid):
629 630 """build an exception to be raised about a filtered changeid
630 631
631 632 This is extracted in a function to help extensions (eg: evolve) to
632 633 experiment with various message variants."""
633 634 if repo.filtername.startswith('visible'):
634 635
635 636 # Check if the changeset is obsolete
636 637 unfilteredrepo = repo.unfiltered()
637 638 ctx = revsymbol(unfilteredrepo, changeid)
638 639
639 640 # If the changeset is obsolete, enrich the message with the reason
640 641 # that made this changeset not visible
641 642 if ctx.obsolete():
642 643 msg = obsutil._getfilteredreason(repo, changeid, ctx)
643 644 else:
644 645 msg = _("hidden revision '%s'") % changeid
645 646
646 647 hint = _('use --hidden to access hidden revisions')
647 648
648 649 return error.FilteredRepoLookupError(msg, hint=hint)
649 650 msg = _("filtered revision '%s' (not in '%s' subset)")
650 651 msg %= (changeid, repo.filtername)
651 652 return error.FilteredRepoLookupError(msg)
652 653
653 654 def revsingle(repo, revspec, default='.', localalias=None):
654 655 if not revspec and revspec != 0:
655 656 return repo[default]
656 657
657 658 l = revrange(repo, [revspec], localalias=localalias)
658 659 if not l:
659 660 raise error.Abort(_('empty revision set'))
660 661 return repo[l.last()]
661 662
662 663 def _pairspec(revspec):
663 664 tree = revsetlang.parse(revspec)
664 665 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
665 666
666 667 def revpair(repo, revs):
667 668 if not revs:
668 669 return repo['.'], repo[None]
669 670
670 671 l = revrange(repo, revs)
671 672
672 673 if not l:
673 674 first = second = None
674 675 elif l.isascending():
675 676 first = l.min()
676 677 second = l.max()
677 678 elif l.isdescending():
678 679 first = l.max()
679 680 second = l.min()
680 681 else:
681 682 first = l.first()
682 683 second = l.last()
683 684
684 685 if first is None:
685 686 raise error.Abort(_('empty revision range'))
686 687 if (first == second and len(revs) >= 2
687 688 and not all(revrange(repo, [r]) for r in revs)):
688 689 raise error.Abort(_('empty revision on one side of range'))
689 690
690 691 # if top-level is range expression, the result must always be a pair
691 692 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
692 693 return repo[first], repo[None]
693 694
694 695 return repo[first], repo[second]
695 696
696 697 def revrange(repo, specs, localalias=None):
697 698 """Execute 1 to many revsets and return the union.
698 699
699 700 This is the preferred mechanism for executing revsets using user-specified
700 701 config options, such as revset aliases.
701 702
702 703 The revsets specified by ``specs`` will be executed via a chained ``OR``
703 704 expression. If ``specs`` is empty, an empty result is returned.
704 705
705 706 ``specs`` can contain integers, in which case they are assumed to be
706 707 revision numbers.
707 708
708 709 It is assumed the revsets are already formatted. If you have arguments
709 710 that need to be expanded in the revset, call ``revsetlang.formatspec()``
710 711 and pass the result as an element of ``specs``.
711 712
712 713 Specifying a single revset is allowed.
713 714
714 715 Returns a ``revset.abstractsmartset`` which is a list-like interface over
715 716 integer revisions.
716 717 """
717 718 allspecs = []
718 719 for spec in specs:
719 720 if isinstance(spec, int):
720 721 spec = revsetlang.formatspec('rev(%d)', spec)
721 722 allspecs.append(spec)
722 723 return repo.anyrevs(allspecs, user=True, localalias=localalias)
723 724
724 725 def meaningfulparents(repo, ctx):
725 726 """Return list of meaningful (or all if debug) parentrevs for rev.
726 727
727 728 For merges (two non-nullrev revisions) both parents are meaningful.
728 729 Otherwise the first parent revision is considered meaningful if it
729 730 is not the preceding revision.
730 731 """
731 732 parents = ctx.parents()
732 733 if len(parents) > 1:
733 734 return parents
734 735 if repo.ui.debugflag:
735 736 return [parents[0], repo[nullrev]]
736 737 if parents[0].rev() >= intrev(ctx) - 1:
737 738 return []
738 739 return parents
739 740
740 741 def expandpats(pats):
741 742 '''Expand bare globs when running on windows.
742 743 On posix we assume it already has already been done by sh.'''
743 744 if not util.expandglobs:
744 745 return list(pats)
745 746 ret = []
746 747 for kindpat in pats:
747 748 kind, pat = matchmod._patsplit(kindpat, None)
748 749 if kind is None:
749 750 try:
750 751 globbed = glob.glob(pat)
751 752 except re.error:
752 753 globbed = [pat]
753 754 if globbed:
754 755 ret.extend(globbed)
755 756 continue
756 757 ret.append(kindpat)
757 758 return ret
758 759
759 760 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
760 761 badfn=None):
761 762 '''Return a matcher and the patterns that were used.
762 763 The matcher will warn about bad matches, unless an alternate badfn callback
763 764 is provided.'''
764 765 if pats == ("",):
765 766 pats = []
766 767 if opts is None:
767 768 opts = {}
768 769 if not globbed and default == 'relpath':
769 770 pats = expandpats(pats or [])
770 771
771 772 def bad(f, msg):
772 773 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
773 774
774 775 if badfn is None:
775 776 badfn = bad
776 777
777 778 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
778 779 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
779 780
780 781 if m.always():
781 782 pats = []
782 783 return m, pats
783 784
784 785 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
785 786 badfn=None):
786 787 '''Return a matcher that will warn about bad matches.'''
787 788 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
788 789
789 790 def matchall(repo):
790 791 '''Return a matcher that will efficiently match everything.'''
791 792 return matchmod.always(repo.root, repo.getcwd())
792 793
793 794 def matchfiles(repo, files, badfn=None):
794 795 '''Return a matcher that will efficiently match exactly these files.'''
795 796 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
796 797
797 798 def parsefollowlinespattern(repo, rev, pat, msg):
798 799 """Return a file name from `pat` pattern suitable for usage in followlines
799 800 logic.
800 801 """
801 802 if not matchmod.patkind(pat):
802 803 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
803 804 else:
804 805 ctx = repo[rev]
805 806 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
806 807 files = [f for f in ctx if m(f)]
807 808 if len(files) != 1:
808 809 raise error.ParseError(msg)
809 810 return files[0]
810 811
811 812 def origpath(ui, repo, filepath):
812 813 '''customize where .orig files are created
813 814
814 815 Fetch user defined path from config file: [ui] origbackuppath = <path>
815 816 Fall back to default (filepath with .orig suffix) if not specified
816 817 '''
817 818 origbackuppath = ui.config('ui', 'origbackuppath')
818 819 if not origbackuppath:
819 820 return filepath + ".orig"
820 821
821 822 # Convert filepath from an absolute path into a path inside the repo.
822 823 filepathfromroot = util.normpath(os.path.relpath(filepath,
823 824 start=repo.root))
824 825
825 826 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
826 827 origbackupdir = origvfs.dirname(filepathfromroot)
827 828 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
828 829 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
829 830
830 831 # Remove any files that conflict with the backup file's path
831 832 for f in reversed(list(util.finddirs(filepathfromroot))):
832 833 if origvfs.isfileorlink(f):
833 834 ui.note(_('removing conflicting file: %s\n')
834 835 % origvfs.join(f))
835 836 origvfs.unlink(f)
836 837 break
837 838
838 839 origvfs.makedirs(origbackupdir)
839 840
840 841 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
841 842 ui.note(_('removing conflicting directory: %s\n')
842 843 % origvfs.join(filepathfromroot))
843 844 origvfs.rmtree(filepathfromroot, forcibly=True)
844 845
845 846 return origvfs.join(filepathfromroot)
846 847
847 848 class _containsnode(object):
848 849 """proxy __contains__(node) to container.__contains__ which accepts revs"""
849 850
850 851 def __init__(self, repo, revcontainer):
851 852 self._torev = repo.changelog.rev
852 853 self._revcontains = revcontainer.__contains__
853 854
854 855 def __contains__(self, node):
855 856 return self._revcontains(self._torev(node))
856 857
857 858 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
858 859 fixphase=False, targetphase=None, backup=True):
859 860 """do common cleanups when old nodes are replaced by new nodes
860 861
861 862 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
862 863 (we might also want to move working directory parent in the future)
863 864
864 865 By default, bookmark moves are calculated automatically from 'replacements',
865 866 but 'moves' can be used to override that. Also, 'moves' may include
866 867 additional bookmark moves that should not have associated obsmarkers.
867 868
868 869 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
869 870 have replacements. operation is a string, like "rebase".
870 871
871 872 metadata is dictionary containing metadata to be stored in obsmarker if
872 873 obsolescence is enabled.
873 874 """
874 875 assert fixphase or targetphase is None
875 876 if not replacements and not moves:
876 877 return
877 878
878 879 # translate mapping's other forms
879 880 if not util.safehasattr(replacements, 'items'):
880 881 replacements = {(n,): () for n in replacements}
881 882 else:
882 883 # upgrading non tuple "source" to tuple ones for BC
883 884 repls = {}
884 885 for key, value in replacements.items():
885 886 if not isinstance(key, tuple):
886 887 key = (key,)
887 888 repls[key] = value
888 889 replacements = repls
889 890
890 891 # Calculate bookmark movements
891 892 if moves is None:
892 893 moves = {}
893 894 # Unfiltered repo is needed since nodes in replacements might be hidden.
894 895 unfi = repo.unfiltered()
895 896 for oldnodes, newnodes in replacements.items():
896 897 for oldnode in oldnodes:
897 898 if oldnode in moves:
898 899 continue
899 900 if len(newnodes) > 1:
900 901 # usually a split, take the one with biggest rev number
901 902 newnode = next(unfi.set('max(%ln)', newnodes)).node()
902 903 elif len(newnodes) == 0:
903 904 # move bookmark backwards
904 905 allreplaced = []
905 906 for rep in replacements:
906 907 allreplaced.extend(rep)
907 908 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
908 909 allreplaced))
909 910 if roots:
910 911 newnode = roots[0].node()
911 912 else:
912 913 newnode = nullid
913 914 else:
914 915 newnode = newnodes[0]
915 916 moves[oldnode] = newnode
916 917
917 918 allnewnodes = [n for ns in replacements.values() for n in ns]
918 919 toretract = {}
919 920 toadvance = {}
920 921 if fixphase:
921 922 precursors = {}
922 923 for oldnodes, newnodes in replacements.items():
923 924 for oldnode in oldnodes:
924 925 for newnode in newnodes:
925 926 precursors.setdefault(newnode, []).append(oldnode)
926 927
927 928 allnewnodes.sort(key=lambda n: unfi[n].rev())
928 929 newphases = {}
929 930 def phase(ctx):
930 931 return newphases.get(ctx.node(), ctx.phase())
931 932 for newnode in allnewnodes:
932 933 ctx = unfi[newnode]
933 934 parentphase = max(phase(p) for p in ctx.parents())
934 935 if targetphase is None:
935 936 oldphase = max(unfi[oldnode].phase()
936 937 for oldnode in precursors[newnode])
937 938 newphase = max(oldphase, parentphase)
938 939 else:
939 940 newphase = max(targetphase, parentphase)
940 941 newphases[newnode] = newphase
941 942 if newphase > ctx.phase():
942 943 toretract.setdefault(newphase, []).append(newnode)
943 944 elif newphase < ctx.phase():
944 945 toadvance.setdefault(newphase, []).append(newnode)
945 946
946 947 with repo.transaction('cleanup') as tr:
947 948 # Move bookmarks
948 949 bmarks = repo._bookmarks
949 950 bmarkchanges = []
950 951 for oldnode, newnode in moves.items():
951 952 oldbmarks = repo.nodebookmarks(oldnode)
952 953 if not oldbmarks:
953 954 continue
954 955 from . import bookmarks # avoid import cycle
955 956 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
956 957 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
957 958 hex(oldnode), hex(newnode)))
958 959 # Delete divergent bookmarks being parents of related newnodes
959 960 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
960 961 allnewnodes, newnode, oldnode)
961 962 deletenodes = _containsnode(repo, deleterevs)
962 963 for name in oldbmarks:
963 964 bmarkchanges.append((name, newnode))
964 965 for b in bookmarks.divergent2delete(repo, deletenodes, name):
965 966 bmarkchanges.append((b, None))
966 967
967 968 if bmarkchanges:
968 969 bmarks.applychanges(repo, tr, bmarkchanges)
969 970
970 971 for phase, nodes in toretract.items():
971 972 phases.retractboundary(repo, tr, phase, nodes)
972 973 for phase, nodes in toadvance.items():
973 974 phases.advanceboundary(repo, tr, phase, nodes)
974 975
975 976 # Obsolete or strip nodes
976 977 if obsolete.isenabled(repo, obsolete.createmarkersopt):
977 978 # If a node is already obsoleted, and we want to obsolete it
978 979 # without a successor, skip that obssolete request since it's
979 980 # unnecessary. That's the "if s or not isobs(n)" check below.
980 981 # Also sort the node in topology order, that might be useful for
981 982 # some obsstore logic.
982 983 # NOTE: the sorting might belong to createmarkers.
983 984 torev = unfi.changelog.rev
984 985 sortfunc = lambda ns: torev(ns[0][0])
985 986 rels = []
986 987 for ns, s in sorted(replacements.items(), key=sortfunc):
987 988 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
988 989 rels.append(rel)
989 990 if rels:
990 991 obsolete.createmarkers(repo, rels, operation=operation,
991 992 metadata=metadata)
992 993 else:
993 994 from . import repair # avoid import cycle
994 995 tostrip = list(n for ns in replacements for n in ns)
995 996 if tostrip:
996 997 repair.delayedstrip(repo.ui, repo, tostrip, operation,
997 998 backup=backup)
998 999
999 1000 def addremove(repo, matcher, prefix, opts=None):
1000 1001 if opts is None:
1001 1002 opts = {}
1002 1003 m = matcher
1003 1004 dry_run = opts.get('dry_run')
1004 1005 try:
1005 1006 similarity = float(opts.get('similarity') or 0)
1006 1007 except ValueError:
1007 1008 raise error.Abort(_('similarity must be a number'))
1008 1009 if similarity < 0 or similarity > 100:
1009 1010 raise error.Abort(_('similarity must be between 0 and 100'))
1010 1011 similarity /= 100.0
1011 1012
1012 1013 ret = 0
1013 1014 join = lambda f: os.path.join(prefix, f)
1014 1015
1015 1016 wctx = repo[None]
1016 1017 for subpath in sorted(wctx.substate):
1017 1018 submatch = matchmod.subdirmatcher(subpath, m)
1018 1019 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1019 1020 sub = wctx.sub(subpath)
1020 1021 try:
1021 1022 if sub.addremove(submatch, prefix, opts):
1022 1023 ret = 1
1023 1024 except error.LookupError:
1024 1025 repo.ui.status(_("skipping missing subrepository: %s\n")
1025 1026 % join(subpath))
1026 1027
1027 1028 rejected = []
1028 1029 def badfn(f, msg):
1029 1030 if f in m.files():
1030 1031 m.bad(f, msg)
1031 1032 rejected.append(f)
1032 1033
1033 1034 badmatch = matchmod.badmatch(m, badfn)
1034 1035 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1035 1036 badmatch)
1036 1037
1037 1038 unknownset = set(unknown + forgotten)
1038 1039 toprint = unknownset.copy()
1039 1040 toprint.update(deleted)
1040 1041 for abs in sorted(toprint):
1041 1042 if repo.ui.verbose or not m.exact(abs):
1042 1043 if abs in unknownset:
1043 1044 status = _('adding %s\n') % m.uipath(abs)
1044 1045 label = 'addremove.added'
1045 1046 else:
1046 1047 status = _('removing %s\n') % m.uipath(abs)
1047 1048 label = 'addremove.removed'
1048 1049 repo.ui.status(status, label=label)
1049 1050
1050 1051 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1051 1052 similarity)
1052 1053
1053 1054 if not dry_run:
1054 1055 _markchanges(repo, unknown + forgotten, deleted, renames)
1055 1056
1056 1057 for f in rejected:
1057 1058 if f in m.files():
1058 1059 return 1
1059 1060 return ret
1060 1061
1061 1062 def marktouched(repo, files, similarity=0.0):
1062 1063 '''Assert that files have somehow been operated upon. files are relative to
1063 1064 the repo root.'''
1064 1065 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1065 1066 rejected = []
1066 1067
1067 1068 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1068 1069
1069 1070 if repo.ui.verbose:
1070 1071 unknownset = set(unknown + forgotten)
1071 1072 toprint = unknownset.copy()
1072 1073 toprint.update(deleted)
1073 1074 for abs in sorted(toprint):
1074 1075 if abs in unknownset:
1075 1076 status = _('adding %s\n') % abs
1076 1077 else:
1077 1078 status = _('removing %s\n') % abs
1078 1079 repo.ui.status(status)
1079 1080
1080 1081 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1081 1082 similarity)
1082 1083
1083 1084 _markchanges(repo, unknown + forgotten, deleted, renames)
1084 1085
1085 1086 for f in rejected:
1086 1087 if f in m.files():
1087 1088 return 1
1088 1089 return 0
1089 1090
1090 1091 def _interestingfiles(repo, matcher):
1091 1092 '''Walk dirstate with matcher, looking for files that addremove would care
1092 1093 about.
1093 1094
1094 1095 This is different from dirstate.status because it doesn't care about
1095 1096 whether files are modified or clean.'''
1096 1097 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1097 1098 audit_path = pathutil.pathauditor(repo.root, cached=True)
1098 1099
1099 1100 ctx = repo[None]
1100 1101 dirstate = repo.dirstate
1101 1102 matcher = repo.narrowmatch(matcher, includeexact=True)
1102 1103 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1103 1104 unknown=True, ignored=False, full=False)
1104 1105 for abs, st in walkresults.iteritems():
1105 1106 dstate = dirstate[abs]
1106 1107 if dstate == '?' and audit_path.check(abs):
1107 1108 unknown.append(abs)
1108 1109 elif dstate != 'r' and not st:
1109 1110 deleted.append(abs)
1110 1111 elif dstate == 'r' and st:
1111 1112 forgotten.append(abs)
1112 1113 # for finding renames
1113 1114 elif dstate == 'r' and not st:
1114 1115 removed.append(abs)
1115 1116 elif dstate == 'a':
1116 1117 added.append(abs)
1117 1118
1118 1119 return added, unknown, deleted, removed, forgotten
1119 1120
1120 1121 def _findrenames(repo, matcher, added, removed, similarity):
1121 1122 '''Find renames from removed files to added ones.'''
1122 1123 renames = {}
1123 1124 if similarity > 0:
1124 1125 for old, new, score in similar.findrenames(repo, added, removed,
1125 1126 similarity):
1126 1127 if (repo.ui.verbose or not matcher.exact(old)
1127 1128 or not matcher.exact(new)):
1128 1129 repo.ui.status(_('recording removal of %s as rename to %s '
1129 1130 '(%d%% similar)\n') %
1130 1131 (matcher.rel(old), matcher.rel(new),
1131 1132 score * 100))
1132 1133 renames[new] = old
1133 1134 return renames
1134 1135
1135 1136 def _markchanges(repo, unknown, deleted, renames):
1136 1137 '''Marks the files in unknown as added, the files in deleted as removed,
1137 1138 and the files in renames as copied.'''
1138 1139 wctx = repo[None]
1139 1140 with repo.wlock():
1140 1141 wctx.forget(deleted)
1141 1142 wctx.add(unknown)
1142 1143 for new, old in renames.iteritems():
1143 1144 wctx.copy(old, new)
1144 1145
1145 1146 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1146 1147 """Update the dirstate to reflect the intent of copying src to dst. For
1147 1148 different reasons it might not end with dst being marked as copied from src.
1148 1149 """
1149 1150 origsrc = repo.dirstate.copied(src) or src
1150 1151 if dst == origsrc: # copying back a copy?
1151 1152 if repo.dirstate[dst] not in 'mn' and not dryrun:
1152 1153 repo.dirstate.normallookup(dst)
1153 1154 else:
1154 1155 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1155 1156 if not ui.quiet:
1156 1157 ui.warn(_("%s has not been committed yet, so no copy "
1157 1158 "data will be stored for %s.\n")
1158 1159 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1159 1160 if repo.dirstate[dst] in '?r' and not dryrun:
1160 1161 wctx.add([dst])
1161 1162 elif not dryrun:
1162 1163 wctx.copy(origsrc, dst)
1163 1164
1164 1165 def writerequires(opener, requirements):
1165 1166 with opener('requires', 'w') as fp:
1166 1167 for r in sorted(requirements):
1167 1168 fp.write("%s\n" % r)
1168 1169
1169 1170 class filecachesubentry(object):
1170 1171 def __init__(self, path, stat):
1171 1172 self.path = path
1172 1173 self.cachestat = None
1173 1174 self._cacheable = None
1174 1175
1175 1176 if stat:
1176 1177 self.cachestat = filecachesubentry.stat(self.path)
1177 1178
1178 1179 if self.cachestat:
1179 1180 self._cacheable = self.cachestat.cacheable()
1180 1181 else:
1181 1182 # None means we don't know yet
1182 1183 self._cacheable = None
1183 1184
1184 1185 def refresh(self):
1185 1186 if self.cacheable():
1186 1187 self.cachestat = filecachesubentry.stat(self.path)
1187 1188
1188 1189 def cacheable(self):
1189 1190 if self._cacheable is not None:
1190 1191 return self._cacheable
1191 1192
1192 1193 # we don't know yet, assume it is for now
1193 1194 return True
1194 1195
1195 1196 def changed(self):
1196 1197 # no point in going further if we can't cache it
1197 1198 if not self.cacheable():
1198 1199 return True
1199 1200
1200 1201 newstat = filecachesubentry.stat(self.path)
1201 1202
1202 1203 # we may not know if it's cacheable yet, check again now
1203 1204 if newstat and self._cacheable is None:
1204 1205 self._cacheable = newstat.cacheable()
1205 1206
1206 1207 # check again
1207 1208 if not self._cacheable:
1208 1209 return True
1209 1210
1210 1211 if self.cachestat != newstat:
1211 1212 self.cachestat = newstat
1212 1213 return True
1213 1214 else:
1214 1215 return False
1215 1216
1216 1217 @staticmethod
1217 1218 def stat(path):
1218 1219 try:
1219 1220 return util.cachestat(path)
1220 1221 except OSError as e:
1221 1222 if e.errno != errno.ENOENT:
1222 1223 raise
1223 1224
1224 1225 class filecacheentry(object):
1225 1226 def __init__(self, paths, stat=True):
1226 1227 self._entries = []
1227 1228 for path in paths:
1228 1229 self._entries.append(filecachesubentry(path, stat))
1229 1230
1230 1231 def changed(self):
1231 1232 '''true if any entry has changed'''
1232 1233 for entry in self._entries:
1233 1234 if entry.changed():
1234 1235 return True
1235 1236 return False
1236 1237
1237 1238 def refresh(self):
1238 1239 for entry in self._entries:
1239 1240 entry.refresh()
1240 1241
1241 1242 class filecache(object):
1242 1243 """A property like decorator that tracks files under .hg/ for updates.
1243 1244
1244 1245 On first access, the files defined as arguments are stat()ed and the
1245 1246 results cached. The decorated function is called. The results are stashed
1246 1247 away in a ``_filecache`` dict on the object whose method is decorated.
1247 1248
1248 1249 On subsequent access, the cached result is returned.
1249 1250
1250 1251 On external property set operations, stat() calls are performed and the new
1251 1252 value is cached.
1252 1253
1253 1254 On property delete operations, cached data is removed.
1254 1255
1255 1256 When using the property API, cached data is always returned, if available:
1256 1257 no stat() is performed to check if the file has changed and if the function
1257 1258 needs to be called to reflect file changes.
1258 1259
1259 1260 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1260 1261 can populate an entry before the property's getter is called. In this case,
1261 1262 entries in ``_filecache`` will be used during property operations,
1262 1263 if available. If the underlying file changes, it is up to external callers
1263 1264 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1264 1265 method result as well as possibly calling ``del obj._filecache[attr]`` to
1265 1266 remove the ``filecacheentry``.
1266 1267 """
1267 1268
1268 1269 def __init__(self, *paths):
1269 1270 self.paths = paths
1270 1271
1271 1272 def join(self, obj, fname):
1272 1273 """Used to compute the runtime path of a cached file.
1273 1274
1274 1275 Users should subclass filecache and provide their own version of this
1275 1276 function to call the appropriate join function on 'obj' (an instance
1276 1277 of the class that its member function was decorated).
1277 1278 """
1278 1279 raise NotImplementedError
1279 1280
1280 1281 def __call__(self, func):
1281 1282 self.func = func
1282 1283 self.sname = func.__name__
1283 1284 self.name = pycompat.sysbytes(self.sname)
1284 1285 return self
1285 1286
1286 1287 def __get__(self, obj, type=None):
1287 1288 # if accessed on the class, return the descriptor itself.
1288 1289 if obj is None:
1289 1290 return self
1290 1291 # do we need to check if the file changed?
1291 1292 if self.sname in obj.__dict__:
1292 1293 assert self.name in obj._filecache, self.name
1293 1294 return obj.__dict__[self.sname]
1294 1295
1295 1296 entry = obj._filecache.get(self.name)
1296 1297
1297 1298 if entry:
1298 1299 if entry.changed():
1299 1300 entry.obj = self.func(obj)
1300 1301 else:
1301 1302 paths = [self.join(obj, path) for path in self.paths]
1302 1303
1303 1304 # We stat -before- creating the object so our cache doesn't lie if
1304 1305 # a writer modified between the time we read and stat
1305 1306 entry = filecacheentry(paths, True)
1306 1307 entry.obj = self.func(obj)
1307 1308
1308 1309 obj._filecache[self.name] = entry
1309 1310
1310 1311 obj.__dict__[self.sname] = entry.obj
1311 1312 return entry.obj
1312 1313
1313 1314 def __set__(self, obj, value):
1314 1315 if self.name not in obj._filecache:
1315 1316 # we add an entry for the missing value because X in __dict__
1316 1317 # implies X in _filecache
1317 1318 paths = [self.join(obj, path) for path in self.paths]
1318 1319 ce = filecacheentry(paths, False)
1319 1320 obj._filecache[self.name] = ce
1320 1321 else:
1321 1322 ce = obj._filecache[self.name]
1322 1323
1323 1324 ce.obj = value # update cached copy
1324 1325 obj.__dict__[self.sname] = value # update copy returned by obj.x
1325 1326
1326 1327 def __delete__(self, obj):
1327 1328 try:
1328 1329 del obj.__dict__[self.sname]
1329 1330 except KeyError:
1330 1331 raise AttributeError(self.sname)
1331 1332
1332 1333 def extdatasource(repo, source):
1333 1334 """Gather a map of rev -> value dict from the specified source
1334 1335
1335 1336 A source spec is treated as a URL, with a special case shell: type
1336 1337 for parsing the output from a shell command.
1337 1338
1338 1339 The data is parsed as a series of newline-separated records where
1339 1340 each record is a revision specifier optionally followed by a space
1340 1341 and a freeform string value. If the revision is known locally, it
1341 1342 is converted to a rev, otherwise the record is skipped.
1342 1343
1343 1344 Note that both key and value are treated as UTF-8 and converted to
1344 1345 the local encoding. This allows uniformity between local and
1345 1346 remote data sources.
1346 1347 """
1347 1348
1348 1349 spec = repo.ui.config("extdata", source)
1349 1350 if not spec:
1350 1351 raise error.Abort(_("unknown extdata source '%s'") % source)
1351 1352
1352 1353 data = {}
1353 1354 src = proc = None
1354 1355 try:
1355 1356 if spec.startswith("shell:"):
1356 1357 # external commands should be run relative to the repo root
1357 1358 cmd = spec[6:]
1358 1359 proc = subprocess.Popen(procutil.tonativestr(cmd),
1359 1360 shell=True, bufsize=-1,
1360 1361 close_fds=procutil.closefds,
1361 1362 stdout=subprocess.PIPE,
1362 1363 cwd=procutil.tonativestr(repo.root))
1363 1364 src = proc.stdout
1364 1365 else:
1365 1366 # treat as a URL or file
1366 1367 src = url.open(repo.ui, spec)
1367 1368 for l in src:
1368 1369 if " " in l:
1369 1370 k, v = l.strip().split(" ", 1)
1370 1371 else:
1371 1372 k, v = l.strip(), ""
1372 1373
1373 1374 k = encoding.tolocal(k)
1374 1375 try:
1375 1376 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1376 1377 except (error.LookupError, error.RepoLookupError):
1377 1378 pass # we ignore data for nodes that don't exist locally
1378 1379 finally:
1379 1380 if proc:
1380 1381 proc.communicate()
1381 1382 if src:
1382 1383 src.close()
1383 1384 if proc and proc.returncode != 0:
1384 1385 raise error.Abort(_("extdata command '%s' failed: %s")
1385 1386 % (cmd, procutil.explainexit(proc.returncode)))
1386 1387
1387 1388 return data
1388 1389
1389 1390 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1390 1391 if lock is None:
1391 1392 raise error.LockInheritanceContractViolation(
1392 1393 'lock can only be inherited while held')
1393 1394 if environ is None:
1394 1395 environ = {}
1395 1396 with lock.inherit() as locker:
1396 1397 environ[envvar] = locker
1397 1398 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1398 1399
1399 1400 def wlocksub(repo, cmd, *args, **kwargs):
1400 1401 """run cmd as a subprocess that allows inheriting repo's wlock
1401 1402
1402 1403 This can only be called while the wlock is held. This takes all the
1403 1404 arguments that ui.system does, and returns the exit code of the
1404 1405 subprocess."""
1405 1406 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1406 1407 **kwargs)
1407 1408
1408 1409 class progress(object):
1409 1410 def __init__(self, ui, topic, unit="", total=None):
1410 1411 self.ui = ui
1411 1412 self.pos = 0
1412 1413 self.topic = topic
1413 1414 self.unit = unit
1414 1415 self.total = total
1415 1416
1416 1417 def __enter__(self):
1417 1418 return self
1418 1419
1419 1420 def __exit__(self, exc_type, exc_value, exc_tb):
1420 1421 self.complete()
1421 1422
1422 1423 def update(self, pos, item="", total=None):
1423 1424 assert pos is not None
1424 1425 if total:
1425 1426 self.total = total
1426 1427 self.pos = pos
1427 1428 self._print(item)
1428 1429
1429 1430 def increment(self, step=1, item="", total=None):
1430 1431 self.update(self.pos + step, item, total)
1431 1432
1432 1433 def complete(self):
1433 1434 self.ui.progress(self.topic, None)
1434 1435
1435 1436 def _print(self, item):
1436 1437 self.ui.progress(self.topic, self.pos, item, self.unit,
1437 1438 self.total)
1438 1439
1439 1440 def gdinitconfig(ui):
1440 1441 """helper function to know if a repo should be created as general delta
1441 1442 """
1442 1443 # experimental config: format.generaldelta
1443 1444 return (ui.configbool('format', 'generaldelta')
1444 1445 or ui.configbool('format', 'usegeneraldelta')
1445 1446 or ui.configbool('format', 'sparse-revlog'))
1446 1447
1447 1448 def gddeltaconfig(ui):
1448 1449 """helper function to know if incoming delta should be optimised
1449 1450 """
1450 1451 # experimental config: format.generaldelta
1451 1452 return ui.configbool('format', 'generaldelta')
1452 1453
1453 1454 class simplekeyvaluefile(object):
1454 1455 """A simple file with key=value lines
1455 1456
1456 1457 Keys must be alphanumerics and start with a letter, values must not
1457 1458 contain '\n' characters"""
1458 1459 firstlinekey = '__firstline'
1459 1460
1460 1461 def __init__(self, vfs, path, keys=None):
1461 1462 self.vfs = vfs
1462 1463 self.path = path
1463 1464
1464 1465 def read(self, firstlinenonkeyval=False):
1465 1466 """Read the contents of a simple key-value file
1466 1467
1467 1468 'firstlinenonkeyval' indicates whether the first line of file should
1468 1469 be treated as a key-value pair or reuturned fully under the
1469 1470 __firstline key."""
1470 1471 lines = self.vfs.readlines(self.path)
1471 1472 d = {}
1472 1473 if firstlinenonkeyval:
1473 1474 if not lines:
1474 1475 e = _("empty simplekeyvalue file")
1475 1476 raise error.CorruptedState(e)
1476 1477 # we don't want to include '\n' in the __firstline
1477 1478 d[self.firstlinekey] = lines[0][:-1]
1478 1479 del lines[0]
1479 1480
1480 1481 try:
1481 1482 # the 'if line.strip()' part prevents us from failing on empty
1482 1483 # lines which only contain '\n' therefore are not skipped
1483 1484 # by 'if line'
1484 1485 updatedict = dict(line[:-1].split('=', 1) for line in lines
1485 1486 if line.strip())
1486 1487 if self.firstlinekey in updatedict:
1487 1488 e = _("%r can't be used as a key")
1488 1489 raise error.CorruptedState(e % self.firstlinekey)
1489 1490 d.update(updatedict)
1490 1491 except ValueError as e:
1491 1492 raise error.CorruptedState(str(e))
1492 1493 return d
1493 1494
1494 1495 def write(self, data, firstline=None):
1495 1496 """Write key=>value mapping to a file
1496 1497 data is a dict. Keys must be alphanumerical and start with a letter.
1497 1498 Values must not contain newline characters.
1498 1499
1499 1500 If 'firstline' is not None, it is written to file before
1500 1501 everything else, as it is, not in a key=value form"""
1501 1502 lines = []
1502 1503 if firstline is not None:
1503 1504 lines.append('%s\n' % firstline)
1504 1505
1505 1506 for k, v in data.items():
1506 1507 if k == self.firstlinekey:
1507 1508 e = "key name '%s' is reserved" % self.firstlinekey
1508 1509 raise error.ProgrammingError(e)
1509 1510 if not k[0:1].isalpha():
1510 1511 e = "keys must start with a letter in a key-value file"
1511 1512 raise error.ProgrammingError(e)
1512 1513 if not k.isalnum():
1513 1514 e = "invalid key name in a simple key-value file"
1514 1515 raise error.ProgrammingError(e)
1515 1516 if '\n' in v:
1516 1517 e = "invalid value in a simple key-value file"
1517 1518 raise error.ProgrammingError(e)
1518 1519 lines.append("%s=%s\n" % (k, v))
1519 1520 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1520 1521 fp.write(''.join(lines))
1521 1522
1522 1523 _reportobsoletedsource = [
1523 1524 'debugobsolete',
1524 1525 'pull',
1525 1526 'push',
1526 1527 'serve',
1527 1528 'unbundle',
1528 1529 ]
1529 1530
1530 1531 _reportnewcssource = [
1531 1532 'pull',
1532 1533 'unbundle',
1533 1534 ]
1534 1535
1535 1536 def prefetchfiles(repo, revs, match):
1536 1537 """Invokes the registered file prefetch functions, allowing extensions to
1537 1538 ensure the corresponding files are available locally, before the command
1538 1539 uses them."""
1539 1540 if match:
1540 1541 # The command itself will complain about files that don't exist, so
1541 1542 # don't duplicate the message.
1542 1543 match = matchmod.badmatch(match, lambda fn, msg: None)
1543 1544 else:
1544 1545 match = matchall(repo)
1545 1546
1546 1547 fileprefetchhooks(repo, revs, match)
1547 1548
1548 1549 # a list of (repo, revs, match) prefetch functions
1549 1550 fileprefetchhooks = util.hooks()
1550 1551
1551 1552 # A marker that tells the evolve extension to suppress its own reporting
1552 1553 _reportstroubledchangesets = True
1553 1554
1554 1555 def registersummarycallback(repo, otr, txnname=''):
1555 1556 """register a callback to issue a summary after the transaction is closed
1556 1557 """
1557 1558 def txmatch(sources):
1558 1559 return any(txnname.startswith(source) for source in sources)
1559 1560
1560 1561 categories = []
1561 1562
1562 1563 def reportsummary(func):
1563 1564 """decorator for report callbacks."""
1564 1565 # The repoview life cycle is shorter than the one of the actual
1565 1566 # underlying repository. So the filtered object can die before the
1566 1567 # weakref is used leading to troubles. We keep a reference to the
1567 1568 # unfiltered object and restore the filtering when retrieving the
1568 1569 # repository through the weakref.
1569 1570 filtername = repo.filtername
1570 1571 reporef = weakref.ref(repo.unfiltered())
1571 1572 def wrapped(tr):
1572 1573 repo = reporef()
1573 1574 if filtername:
1574 1575 repo = repo.filtered(filtername)
1575 1576 func(repo, tr)
1576 1577 newcat = '%02i-txnreport' % len(categories)
1577 1578 otr.addpostclose(newcat, wrapped)
1578 1579 categories.append(newcat)
1579 1580 return wrapped
1580 1581
1581 1582 if txmatch(_reportobsoletedsource):
1582 1583 @reportsummary
1583 1584 def reportobsoleted(repo, tr):
1584 1585 obsoleted = obsutil.getobsoleted(repo, tr)
1585 1586 if obsoleted:
1586 1587 repo.ui.status(_('obsoleted %i changesets\n')
1587 1588 % len(obsoleted))
1588 1589
1589 1590 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1590 1591 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1591 1592 instabilitytypes = [
1592 1593 ('orphan', 'orphan'),
1593 1594 ('phase-divergent', 'phasedivergent'),
1594 1595 ('content-divergent', 'contentdivergent'),
1595 1596 ]
1596 1597
1597 1598 def getinstabilitycounts(repo):
1598 1599 filtered = repo.changelog.filteredrevs
1599 1600 counts = {}
1600 1601 for instability, revset in instabilitytypes:
1601 1602 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1602 1603 filtered)
1603 1604 return counts
1604 1605
1605 1606 oldinstabilitycounts = getinstabilitycounts(repo)
1606 1607 @reportsummary
1607 1608 def reportnewinstabilities(repo, tr):
1608 1609 newinstabilitycounts = getinstabilitycounts(repo)
1609 1610 for instability, revset in instabilitytypes:
1610 1611 delta = (newinstabilitycounts[instability] -
1611 1612 oldinstabilitycounts[instability])
1612 1613 msg = getinstabilitymessage(delta, instability)
1613 1614 if msg:
1614 1615 repo.ui.warn(msg)
1615 1616
1616 1617 if txmatch(_reportnewcssource):
1617 1618 @reportsummary
1618 1619 def reportnewcs(repo, tr):
1619 1620 """Report the range of new revisions pulled/unbundled."""
1620 1621 origrepolen = tr.changes.get('origrepolen', len(repo))
1621 1622 unfi = repo.unfiltered()
1622 1623 if origrepolen >= len(unfi):
1623 1624 return
1624 1625
1625 1626 # Compute the bounds of new visible revisions' range.
1626 1627 revs = smartset.spanset(repo, start=origrepolen)
1627 1628 if revs:
1628 1629 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1629 1630
1630 1631 if minrev == maxrev:
1631 1632 revrange = minrev
1632 1633 else:
1633 1634 revrange = '%s:%s' % (minrev, maxrev)
1634 1635 draft = len(repo.revs('%ld and draft()', revs))
1635 1636 secret = len(repo.revs('%ld and secret()', revs))
1636 1637 if not (draft or secret):
1637 1638 msg = _('new changesets %s\n') % revrange
1638 1639 elif draft and secret:
1639 1640 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1640 1641 msg %= (revrange, draft, secret)
1641 1642 elif draft:
1642 1643 msg = _('new changesets %s (%d drafts)\n')
1643 1644 msg %= (revrange, draft)
1644 1645 elif secret:
1645 1646 msg = _('new changesets %s (%d secrets)\n')
1646 1647 msg %= (revrange, secret)
1647 1648 else:
1648 1649 errormsg = 'entered unreachable condition'
1649 1650 raise error.ProgrammingError(errormsg)
1650 1651 repo.ui.status(msg)
1651 1652
1652 1653 # search new changesets directly pulled as obsolete
1653 1654 duplicates = tr.changes.get('revduplicates', ())
1654 1655 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1655 1656 origrepolen, duplicates)
1656 1657 cl = repo.changelog
1657 1658 extinctadded = [r for r in obsadded if r not in cl]
1658 1659 if extinctadded:
1659 1660 # They are not just obsolete, but obsolete and invisible
1660 1661 # we call them "extinct" internally but the terms have not been
1661 1662 # exposed to users.
1662 1663 msg = '(%d other changesets obsolete on arrival)\n'
1663 1664 repo.ui.status(msg % len(extinctadded))
1664 1665
1665 1666 @reportsummary
1666 1667 def reportphasechanges(repo, tr):
1667 1668 """Report statistics of phase changes for changesets pre-existing
1668 1669 pull/unbundle.
1669 1670 """
1670 1671 origrepolen = tr.changes.get('origrepolen', len(repo))
1671 1672 phasetracking = tr.changes.get('phases', {})
1672 1673 if not phasetracking:
1673 1674 return
1674 1675 published = [
1675 1676 rev for rev, (old, new) in phasetracking.iteritems()
1676 1677 if new == phases.public and rev < origrepolen
1677 1678 ]
1678 1679 if not published:
1679 1680 return
1680 1681 repo.ui.status(_('%d local changesets published\n')
1681 1682 % len(published))
1682 1683
1683 1684 def getinstabilitymessage(delta, instability):
1684 1685 """function to return the message to show warning about new instabilities
1685 1686
1686 1687 exists as a separate function so that extension can wrap to show more
1687 1688 information like how to fix instabilities"""
1688 1689 if delta > 0:
1689 1690 return _('%i new %s changesets\n') % (delta, instability)
1690 1691
1691 1692 def nodesummaries(repo, nodes, maxnumnodes=4):
1692 1693 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1693 1694 return ' '.join(short(h) for h in nodes)
1694 1695 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1695 1696 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1696 1697
1697 1698 def enforcesinglehead(repo, tr, desc):
1698 1699 """check that no named branch has multiple heads"""
1699 1700 if desc in ('strip', 'repair'):
1700 1701 # skip the logic during strip
1701 1702 return
1702 1703 visible = repo.filtered('visible')
1703 1704 # possible improvement: we could restrict the check to affected branch
1704 1705 for name, heads in visible.branchmap().iteritems():
1705 1706 if len(heads) > 1:
1706 1707 msg = _('rejecting multiple heads on branch "%s"')
1707 1708 msg %= name
1708 1709 hint = _('%d heads: %s')
1709 1710 hint %= (len(heads), nodesummaries(repo, heads))
1710 1711 raise error.Abort(msg, hint=hint)
1711 1712
1712 1713 def wrapconvertsink(sink):
1713 1714 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1714 1715 before it is used, whether or not the convert extension was formally loaded.
1715 1716 """
1716 1717 return sink
1717 1718
1718 1719 def unhidehashlikerevs(repo, specs, hiddentype):
1719 1720 """parse the user specs and unhide changesets whose hash or revision number
1720 1721 is passed.
1721 1722
1722 1723 hiddentype can be: 1) 'warn': warn while unhiding changesets
1723 1724 2) 'nowarn': don't warn while unhiding changesets
1724 1725
1725 1726 returns a repo object with the required changesets unhidden
1726 1727 """
1727 1728 if not repo.filtername or not repo.ui.configbool('experimental',
1728 1729 'directaccess'):
1729 1730 return repo
1730 1731
1731 1732 if repo.filtername not in ('visible', 'visible-hidden'):
1732 1733 return repo
1733 1734
1734 1735 symbols = set()
1735 1736 for spec in specs:
1736 1737 try:
1737 1738 tree = revsetlang.parse(spec)
1738 1739 except error.ParseError: # will be reported by scmutil.revrange()
1739 1740 continue
1740 1741
1741 1742 symbols.update(revsetlang.gethashlikesymbols(tree))
1742 1743
1743 1744 if not symbols:
1744 1745 return repo
1745 1746
1746 1747 revs = _getrevsfromsymbols(repo, symbols)
1747 1748
1748 1749 if not revs:
1749 1750 return repo
1750 1751
1751 1752 if hiddentype == 'warn':
1752 1753 unfi = repo.unfiltered()
1753 1754 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1754 1755 repo.ui.warn(_("warning: accessing hidden changesets for write "
1755 1756 "operation: %s\n") % revstr)
1756 1757
1757 1758 # we have to use new filtername to separate branch/tags cache until we can
1758 1759 # disbale these cache when revisions are dynamically pinned.
1759 1760 return repo.filtered('visible-hidden', revs)
1760 1761
1761 1762 def _getrevsfromsymbols(repo, symbols):
1762 1763 """parse the list of symbols and returns a set of revision numbers of hidden
1763 1764 changesets present in symbols"""
1764 1765 revs = set()
1765 1766 unfi = repo.unfiltered()
1766 1767 unficl = unfi.changelog
1767 1768 cl = repo.changelog
1768 1769 tiprev = len(unficl)
1769 1770 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1770 1771 for s in symbols:
1771 1772 try:
1772 1773 n = int(s)
1773 1774 if n <= tiprev:
1774 1775 if not allowrevnums:
1775 1776 continue
1776 1777 else:
1777 1778 if n not in cl:
1778 1779 revs.add(n)
1779 1780 continue
1780 1781 except ValueError:
1781 1782 pass
1782 1783
1783 1784 try:
1784 1785 s = resolvehexnodeidprefix(unfi, s)
1785 1786 except (error.LookupError, error.WdirUnsupported):
1786 1787 s = None
1787 1788
1788 1789 if s is not None:
1789 1790 rev = unficl.rev(s)
1790 1791 if rev not in cl:
1791 1792 revs.add(rev)
1792 1793
1793 1794 return revs
1794 1795
1795 1796 def bookmarkrevs(repo, mark):
1796 1797 """
1797 1798 Select revisions reachable by a given bookmark
1798 1799 """
1799 1800 return repo.revs("ancestors(bookmark(%s)) - "
1800 1801 "ancestors(head() and not bookmark(%s)) - "
1801 1802 "ancestors(bookmark() and not bookmark(%s))",
1802 1803 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now