##// END OF EJS Templates
requires: use atomictemp=True when writing .hg/requires...
Martin von Zweigbergk -
r40669:acd17caa default
parent child Browse files
Show More
@@ -1,1800 +1,1800 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29
30 30 from . import (
31 31 encoding,
32 32 error,
33 33 match as matchmod,
34 34 obsolete,
35 35 obsutil,
36 36 pathutil,
37 37 phases,
38 38 policy,
39 39 pycompat,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 procutil,
50 50 stringutil,
51 51 )
52 52
53 53 if pycompat.iswindows:
54 54 from . import scmwindows as scmplatform
55 55 else:
56 56 from . import scmposix as scmplatform
57 57
58 58 parsers = policy.importmod(r'parsers')
59 59
60 60 termsize = scmplatform.termsize
61 61
62 62 class status(tuple):
63 63 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 64 and 'ignored' properties are only relevant to the working copy.
65 65 '''
66 66
67 67 __slots__ = ()
68 68
69 69 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 70 clean):
71 71 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 72 ignored, clean))
73 73
74 74 @property
75 75 def modified(self):
76 76 '''files that have been modified'''
77 77 return self[0]
78 78
79 79 @property
80 80 def added(self):
81 81 '''files that have been added'''
82 82 return self[1]
83 83
84 84 @property
85 85 def removed(self):
86 86 '''files that have been removed'''
87 87 return self[2]
88 88
89 89 @property
90 90 def deleted(self):
91 91 '''files that are in the dirstate, but have been deleted from the
92 92 working copy (aka "missing")
93 93 '''
94 94 return self[3]
95 95
96 96 @property
97 97 def unknown(self):
98 98 '''files not in the dirstate that are not ignored'''
99 99 return self[4]
100 100
101 101 @property
102 102 def ignored(self):
103 103 '''files not in the dirstate that are ignored (by _dirignore())'''
104 104 return self[5]
105 105
106 106 @property
107 107 def clean(self):
108 108 '''files that have not been modified'''
109 109 return self[6]
110 110
111 111 def __repr__(self, *args, **kwargs):
112 112 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 113 r'unknown=%s, ignored=%s, clean=%s>') %
114 114 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115 115
116 116 def itersubrepos(ctx1, ctx2):
117 117 """find subrepos in ctx1 or ctx2"""
118 118 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 119 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 120 # has been modified (in ctx2) but not yet committed (in ctx1).
121 121 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 122 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123 123
124 124 missing = set()
125 125
126 126 for subpath in ctx2.substate:
127 127 if subpath not in ctx1.substate:
128 128 del subpaths[subpath]
129 129 missing.add(subpath)
130 130
131 131 for subpath, ctx in sorted(subpaths.iteritems()):
132 132 yield subpath, ctx.sub(subpath)
133 133
134 134 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 135 # status and diff will have an accurate result when it does
136 136 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 137 # against itself.
138 138 for subpath in missing:
139 139 yield subpath, ctx2.nullsub(subpath, ctx1)
140 140
141 141 def nochangesfound(ui, repo, excluded=None):
142 142 '''Report no changes for push/pull, excluded is None or a list of
143 143 nodes excluded from the push/pull.
144 144 '''
145 145 secretlist = []
146 146 if excluded:
147 147 for n in excluded:
148 148 ctx = repo[n]
149 149 if ctx.phase() >= phases.secret and not ctx.extinct():
150 150 secretlist.append(n)
151 151
152 152 if secretlist:
153 153 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 154 % len(secretlist))
155 155 else:
156 156 ui.status(_("no changes found\n"))
157 157
158 158 def callcatch(ui, func):
159 159 """call func() with global exception handling
160 160
161 161 return func() if no exception happens. otherwise do some error handling
162 162 and return an exit code accordingly. does not handle all exceptions.
163 163 """
164 164 try:
165 165 try:
166 166 return func()
167 167 except: # re-raises
168 168 ui.traceback()
169 169 raise
170 170 # Global exception handling, alphabetically
171 171 # Mercurial-specific first, followed by built-in and library exceptions
172 172 except error.LockHeld as inst:
173 173 if inst.errno == errno.ETIMEDOUT:
174 174 reason = _('timed out waiting for lock held by %r') % (
175 175 pycompat.bytestr(inst.locker))
176 176 else:
177 177 reason = _('lock held by %r') % inst.locker
178 178 ui.error(_("abort: %s: %s\n") % (
179 179 inst.desc or stringutil.forcebytestr(inst.filename), reason))
180 180 if not inst.locker:
181 181 ui.error(_("(lock might be very busy)\n"))
182 182 except error.LockUnavailable as inst:
183 183 ui.error(_("abort: could not lock %s: %s\n") %
184 184 (inst.desc or stringutil.forcebytestr(inst.filename),
185 185 encoding.strtolocal(inst.strerror)))
186 186 except error.OutOfBandError as inst:
187 187 if inst.args:
188 188 msg = _("abort: remote error:\n")
189 189 else:
190 190 msg = _("abort: remote error\n")
191 191 ui.error(msg)
192 192 if inst.args:
193 193 ui.error(''.join(inst.args))
194 194 if inst.hint:
195 195 ui.error('(%s)\n' % inst.hint)
196 196 except error.RepoError as inst:
197 197 ui.error(_("abort: %s!\n") % inst)
198 198 if inst.hint:
199 199 ui.error(_("(%s)\n") % inst.hint)
200 200 except error.ResponseError as inst:
201 201 ui.error(_("abort: %s") % inst.args[0])
202 202 msg = inst.args[1]
203 203 if isinstance(msg, type(u'')):
204 204 msg = pycompat.sysbytes(msg)
205 205 if not isinstance(msg, bytes):
206 206 ui.error(" %r\n" % (msg,))
207 207 elif not msg:
208 208 ui.error(_(" empty string\n"))
209 209 else:
210 210 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
211 211 except error.CensoredNodeError as inst:
212 212 ui.error(_("abort: file censored %s!\n") % inst)
213 213 except error.StorageError as inst:
214 214 ui.error(_("abort: %s!\n") % inst)
215 215 except error.InterventionRequired as inst:
216 216 ui.error("%s\n" % inst)
217 217 if inst.hint:
218 218 ui.error(_("(%s)\n") % inst.hint)
219 219 return 1
220 220 except error.WdirUnsupported:
221 221 ui.error(_("abort: working directory revision cannot be specified\n"))
222 222 except error.Abort as inst:
223 223 ui.error(_("abort: %s\n") % inst)
224 224 if inst.hint:
225 225 ui.error(_("(%s)\n") % inst.hint)
226 226 except ImportError as inst:
227 227 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
228 228 m = stringutil.forcebytestr(inst).split()[-1]
229 229 if m in "mpatch bdiff".split():
230 230 ui.error(_("(did you forget to compile extensions?)\n"))
231 231 elif m in "zlib".split():
232 232 ui.error(_("(is your Python install correct?)\n"))
233 233 except IOError as inst:
234 234 if util.safehasattr(inst, "code"):
235 235 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
236 236 elif util.safehasattr(inst, "reason"):
237 237 try: # usually it is in the form (errno, strerror)
238 238 reason = inst.reason.args[1]
239 239 except (AttributeError, IndexError):
240 240 # it might be anything, for example a string
241 241 reason = inst.reason
242 242 if isinstance(reason, pycompat.unicode):
243 243 # SSLError of Python 2.7.9 contains a unicode
244 244 reason = encoding.unitolocal(reason)
245 245 ui.error(_("abort: error: %s\n") % reason)
246 246 elif (util.safehasattr(inst, "args")
247 247 and inst.args and inst.args[0] == errno.EPIPE):
248 248 pass
249 249 elif getattr(inst, "strerror", None):
250 250 if getattr(inst, "filename", None):
251 251 ui.error(_("abort: %s: %s\n") % (
252 252 encoding.strtolocal(inst.strerror),
253 253 stringutil.forcebytestr(inst.filename)))
254 254 else:
255 255 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
256 256 else:
257 257 raise
258 258 except OSError as inst:
259 259 if getattr(inst, "filename", None) is not None:
260 260 ui.error(_("abort: %s: '%s'\n") % (
261 261 encoding.strtolocal(inst.strerror),
262 262 stringutil.forcebytestr(inst.filename)))
263 263 else:
264 264 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
265 265 except MemoryError:
266 266 ui.error(_("abort: out of memory\n"))
267 267 except SystemExit as inst:
268 268 # Commands shouldn't sys.exit directly, but give a return code.
269 269 # Just in case catch this and and pass exit code to caller.
270 270 return inst.code
271 271 except socket.error as inst:
272 272 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
273 273
274 274 return -1
275 275
276 276 def checknewlabel(repo, lbl, kind):
277 277 # Do not use the "kind" parameter in ui output.
278 278 # It makes strings difficult to translate.
279 279 if lbl in ['tip', '.', 'null']:
280 280 raise error.Abort(_("the name '%s' is reserved") % lbl)
281 281 for c in (':', '\0', '\n', '\r'):
282 282 if c in lbl:
283 283 raise error.Abort(
284 284 _("%r cannot be used in a name") % pycompat.bytestr(c))
285 285 try:
286 286 int(lbl)
287 287 raise error.Abort(_("cannot use an integer as a name"))
288 288 except ValueError:
289 289 pass
290 290 if lbl.strip() != lbl:
291 291 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
292 292
293 293 def checkfilename(f):
294 294 '''Check that the filename f is an acceptable filename for a tracked file'''
295 295 if '\r' in f or '\n' in f:
296 296 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
297 297 % pycompat.bytestr(f))
298 298
299 299 def checkportable(ui, f):
300 300 '''Check if filename f is portable and warn or abort depending on config'''
301 301 checkfilename(f)
302 302 abort, warn = checkportabilityalert(ui)
303 303 if abort or warn:
304 304 msg = util.checkwinfilename(f)
305 305 if msg:
306 306 msg = "%s: %s" % (msg, procutil.shellquote(f))
307 307 if abort:
308 308 raise error.Abort(msg)
309 309 ui.warn(_("warning: %s\n") % msg)
310 310
311 311 def checkportabilityalert(ui):
312 312 '''check if the user's config requests nothing, a warning, or abort for
313 313 non-portable filenames'''
314 314 val = ui.config('ui', 'portablefilenames')
315 315 lval = val.lower()
316 316 bval = stringutil.parsebool(val)
317 317 abort = pycompat.iswindows or lval == 'abort'
318 318 warn = bval or lval == 'warn'
319 319 if bval is None and not (warn or abort or lval == 'ignore'):
320 320 raise error.ConfigError(
321 321 _("ui.portablefilenames value is invalid ('%s')") % val)
322 322 return abort, warn
323 323
324 324 class casecollisionauditor(object):
325 325 def __init__(self, ui, abort, dirstate):
326 326 self._ui = ui
327 327 self._abort = abort
328 328 allfiles = '\0'.join(dirstate._map)
329 329 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
330 330 self._dirstate = dirstate
331 331 # The purpose of _newfiles is so that we don't complain about
332 332 # case collisions if someone were to call this object with the
333 333 # same filename twice.
334 334 self._newfiles = set()
335 335
336 336 def __call__(self, f):
337 337 if f in self._newfiles:
338 338 return
339 339 fl = encoding.lower(f)
340 340 if fl in self._loweredfiles and f not in self._dirstate:
341 341 msg = _('possible case-folding collision for %s') % f
342 342 if self._abort:
343 343 raise error.Abort(msg)
344 344 self._ui.warn(_("warning: %s\n") % msg)
345 345 self._loweredfiles.add(fl)
346 346 self._newfiles.add(f)
347 347
348 348 def filteredhash(repo, maxrev):
349 349 """build hash of filtered revisions in the current repoview.
350 350
351 351 Multiple caches perform up-to-date validation by checking that the
352 352 tiprev and tipnode stored in the cache file match the current repository.
353 353 However, this is not sufficient for validating repoviews because the set
354 354 of revisions in the view may change without the repository tiprev and
355 355 tipnode changing.
356 356
357 357 This function hashes all the revs filtered from the view and returns
358 358 that SHA-1 digest.
359 359 """
360 360 cl = repo.changelog
361 361 if not cl.filteredrevs:
362 362 return None
363 363 key = None
364 364 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
365 365 if revs:
366 366 s = hashlib.sha1()
367 367 for rev in revs:
368 368 s.update('%d;' % rev)
369 369 key = s.digest()
370 370 return key
371 371
372 372 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
373 373 '''yield every hg repository under path, always recursively.
374 374 The recurse flag will only control recursion into repo working dirs'''
375 375 def errhandler(err):
376 376 if err.filename == path:
377 377 raise err
378 378 samestat = getattr(os.path, 'samestat', None)
379 379 if followsym and samestat is not None:
380 380 def adddir(dirlst, dirname):
381 381 dirstat = os.stat(dirname)
382 382 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
383 383 if not match:
384 384 dirlst.append(dirstat)
385 385 return not match
386 386 else:
387 387 followsym = False
388 388
389 389 if (seen_dirs is None) and followsym:
390 390 seen_dirs = []
391 391 adddir(seen_dirs, path)
392 392 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
393 393 dirs.sort()
394 394 if '.hg' in dirs:
395 395 yield root # found a repository
396 396 qroot = os.path.join(root, '.hg', 'patches')
397 397 if os.path.isdir(os.path.join(qroot, '.hg')):
398 398 yield qroot # we have a patch queue repo here
399 399 if recurse:
400 400 # avoid recursing inside the .hg directory
401 401 dirs.remove('.hg')
402 402 else:
403 403 dirs[:] = [] # don't descend further
404 404 elif followsym:
405 405 newdirs = []
406 406 for d in dirs:
407 407 fname = os.path.join(root, d)
408 408 if adddir(seen_dirs, fname):
409 409 if os.path.islink(fname):
410 410 for hgname in walkrepos(fname, True, seen_dirs):
411 411 yield hgname
412 412 else:
413 413 newdirs.append(d)
414 414 dirs[:] = newdirs
415 415
416 416 def binnode(ctx):
417 417 """Return binary node id for a given basectx"""
418 418 node = ctx.node()
419 419 if node is None:
420 420 return wdirid
421 421 return node
422 422
423 423 def intrev(ctx):
424 424 """Return integer for a given basectx that can be used in comparison or
425 425 arithmetic operation"""
426 426 rev = ctx.rev()
427 427 if rev is None:
428 428 return wdirrev
429 429 return rev
430 430
431 431 def formatchangeid(ctx):
432 432 """Format changectx as '{rev}:{node|formatnode}', which is the default
433 433 template provided by logcmdutil.changesettemplater"""
434 434 repo = ctx.repo()
435 435 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
436 436
437 437 def formatrevnode(ui, rev, node):
438 438 """Format given revision and node depending on the current verbosity"""
439 439 if ui.debugflag:
440 440 hexfunc = hex
441 441 else:
442 442 hexfunc = short
443 443 return '%d:%s' % (rev, hexfunc(node))
444 444
445 445 def resolvehexnodeidprefix(repo, prefix):
446 446 if (prefix.startswith('x') and
447 447 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
448 448 prefix = prefix[1:]
449 449 try:
450 450 # Uses unfiltered repo because it's faster when prefix is ambiguous/
451 451 # This matches the shortesthexnodeidprefix() function below.
452 452 node = repo.unfiltered().changelog._partialmatch(prefix)
453 453 except error.AmbiguousPrefixLookupError:
454 454 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
455 455 if revset:
456 456 # Clear config to avoid infinite recursion
457 457 configoverrides = {('experimental',
458 458 'revisions.disambiguatewithin'): None}
459 459 with repo.ui.configoverride(configoverrides):
460 460 revs = repo.anyrevs([revset], user=True)
461 461 matches = []
462 462 for rev in revs:
463 463 node = repo.changelog.node(rev)
464 464 if hex(node).startswith(prefix):
465 465 matches.append(node)
466 466 if len(matches) == 1:
467 467 return matches[0]
468 468 raise
469 469 if node is None:
470 470 return
471 471 repo.changelog.rev(node) # make sure node isn't filtered
472 472 return node
473 473
474 474 def mayberevnum(repo, prefix):
475 475 """Checks if the given prefix may be mistaken for a revision number"""
476 476 try:
477 477 i = int(prefix)
478 478 # if we are a pure int, then starting with zero will not be
479 479 # confused as a rev; or, obviously, if the int is larger
480 480 # than the value of the tip rev. We still need to disambiguate if
481 481 # prefix == '0', since that *is* a valid revnum.
482 482 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
483 483 return False
484 484 return True
485 485 except ValueError:
486 486 return False
487 487
488 488 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
489 489 """Find the shortest unambiguous prefix that matches hexnode.
490 490
491 491 If "cache" is not None, it must be a dictionary that can be used for
492 492 caching between calls to this method.
493 493 """
494 494 # _partialmatch() of filtered changelog could take O(len(repo)) time,
495 495 # which would be unacceptably slow. so we look for hash collision in
496 496 # unfiltered space, which means some hashes may be slightly longer.
497 497
498 498 minlength=max(minlength, 1)
499 499
500 500 def disambiguate(prefix):
501 501 """Disambiguate against revnums."""
502 502 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
503 503 if mayberevnum(repo, prefix):
504 504 return 'x' + prefix
505 505 else:
506 506 return prefix
507 507
508 508 hexnode = hex(node)
509 509 for length in range(len(prefix), len(hexnode) + 1):
510 510 prefix = hexnode[:length]
511 511 if not mayberevnum(repo, prefix):
512 512 return prefix
513 513
514 514 cl = repo.unfiltered().changelog
515 515 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
516 516 if revset:
517 517 revs = None
518 518 if cache is not None:
519 519 revs = cache.get('disambiguationrevset')
520 520 if revs is None:
521 521 revs = repo.anyrevs([revset], user=True)
522 522 if cache is not None:
523 523 cache['disambiguationrevset'] = revs
524 524 if cl.rev(node) in revs:
525 525 hexnode = hex(node)
526 526 nodetree = None
527 527 if cache is not None:
528 528 nodetree = cache.get('disambiguationnodetree')
529 529 if not nodetree:
530 530 try:
531 531 nodetree = parsers.nodetree(cl.index, len(revs))
532 532 except AttributeError:
533 533 # no native nodetree
534 534 pass
535 535 else:
536 536 for r in revs:
537 537 nodetree.insert(r)
538 538 if cache is not None:
539 539 cache['disambiguationnodetree'] = nodetree
540 540 if nodetree is not None:
541 541 length = max(nodetree.shortest(node), minlength)
542 542 prefix = hexnode[:length]
543 543 return disambiguate(prefix)
544 544 for length in range(minlength, len(hexnode) + 1):
545 545 matches = []
546 546 prefix = hexnode[:length]
547 547 for rev in revs:
548 548 otherhexnode = repo[rev].hex()
549 549 if prefix == otherhexnode[:length]:
550 550 matches.append(otherhexnode)
551 551 if len(matches) == 1:
552 552 return disambiguate(prefix)
553 553
554 554 try:
555 555 return disambiguate(cl.shortest(node, minlength))
556 556 except error.LookupError:
557 557 raise error.RepoLookupError()
558 558
559 559 def isrevsymbol(repo, symbol):
560 560 """Checks if a symbol exists in the repo.
561 561
562 562 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
563 563 symbol is an ambiguous nodeid prefix.
564 564 """
565 565 try:
566 566 revsymbol(repo, symbol)
567 567 return True
568 568 except error.RepoLookupError:
569 569 return False
570 570
571 571 def revsymbol(repo, symbol):
572 572 """Returns a context given a single revision symbol (as string).
573 573
574 574 This is similar to revsingle(), but accepts only a single revision symbol,
575 575 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
576 576 not "max(public())".
577 577 """
578 578 if not isinstance(symbol, bytes):
579 579 msg = ("symbol (%s of type %s) was not a string, did you mean "
580 580 "repo[symbol]?" % (symbol, type(symbol)))
581 581 raise error.ProgrammingError(msg)
582 582 try:
583 583 if symbol in ('.', 'tip', 'null'):
584 584 return repo[symbol]
585 585
586 586 try:
587 587 r = int(symbol)
588 588 if '%d' % r != symbol:
589 589 raise ValueError
590 590 l = len(repo.changelog)
591 591 if r < 0:
592 592 r += l
593 593 if r < 0 or r >= l and r != wdirrev:
594 594 raise ValueError
595 595 return repo[r]
596 596 except error.FilteredIndexError:
597 597 raise
598 598 except (ValueError, OverflowError, IndexError):
599 599 pass
600 600
601 601 if len(symbol) == 40:
602 602 try:
603 603 node = bin(symbol)
604 604 rev = repo.changelog.rev(node)
605 605 return repo[rev]
606 606 except error.FilteredLookupError:
607 607 raise
608 608 except (TypeError, LookupError):
609 609 pass
610 610
611 611 # look up bookmarks through the name interface
612 612 try:
613 613 node = repo.names.singlenode(repo, symbol)
614 614 rev = repo.changelog.rev(node)
615 615 return repo[rev]
616 616 except KeyError:
617 617 pass
618 618
619 619 node = resolvehexnodeidprefix(repo, symbol)
620 620 if node is not None:
621 621 rev = repo.changelog.rev(node)
622 622 return repo[rev]
623 623
624 624 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
625 625
626 626 except error.WdirUnsupported:
627 627 return repo[None]
628 628 except (error.FilteredIndexError, error.FilteredLookupError,
629 629 error.FilteredRepoLookupError):
630 630 raise _filterederror(repo, symbol)
631 631
632 632 def _filterederror(repo, changeid):
633 633 """build an exception to be raised about a filtered changeid
634 634
635 635 This is extracted in a function to help extensions (eg: evolve) to
636 636 experiment with various message variants."""
637 637 if repo.filtername.startswith('visible'):
638 638
639 639 # Check if the changeset is obsolete
640 640 unfilteredrepo = repo.unfiltered()
641 641 ctx = revsymbol(unfilteredrepo, changeid)
642 642
643 643 # If the changeset is obsolete, enrich the message with the reason
644 644 # that made this changeset not visible
645 645 if ctx.obsolete():
646 646 msg = obsutil._getfilteredreason(repo, changeid, ctx)
647 647 else:
648 648 msg = _("hidden revision '%s'") % changeid
649 649
650 650 hint = _('use --hidden to access hidden revisions')
651 651
652 652 return error.FilteredRepoLookupError(msg, hint=hint)
653 653 msg = _("filtered revision '%s' (not in '%s' subset)")
654 654 msg %= (changeid, repo.filtername)
655 655 return error.FilteredRepoLookupError(msg)
656 656
657 657 def revsingle(repo, revspec, default='.', localalias=None):
658 658 if not revspec and revspec != 0:
659 659 return repo[default]
660 660
661 661 l = revrange(repo, [revspec], localalias=localalias)
662 662 if not l:
663 663 raise error.Abort(_('empty revision set'))
664 664 return repo[l.last()]
665 665
666 666 def _pairspec(revspec):
667 667 tree = revsetlang.parse(revspec)
668 668 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
669 669
670 670 def revpair(repo, revs):
671 671 if not revs:
672 672 return repo['.'], repo[None]
673 673
674 674 l = revrange(repo, revs)
675 675
676 676 if not l:
677 677 first = second = None
678 678 elif l.isascending():
679 679 first = l.min()
680 680 second = l.max()
681 681 elif l.isdescending():
682 682 first = l.max()
683 683 second = l.min()
684 684 else:
685 685 first = l.first()
686 686 second = l.last()
687 687
688 688 if first is None:
689 689 raise error.Abort(_('empty revision range'))
690 690 if (first == second and len(revs) >= 2
691 691 and not all(revrange(repo, [r]) for r in revs)):
692 692 raise error.Abort(_('empty revision on one side of range'))
693 693
694 694 # if top-level is range expression, the result must always be a pair
695 695 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
696 696 return repo[first], repo[None]
697 697
698 698 return repo[first], repo[second]
699 699
700 700 def revrange(repo, specs, localalias=None):
701 701 """Execute 1 to many revsets and return the union.
702 702
703 703 This is the preferred mechanism for executing revsets using user-specified
704 704 config options, such as revset aliases.
705 705
706 706 The revsets specified by ``specs`` will be executed via a chained ``OR``
707 707 expression. If ``specs`` is empty, an empty result is returned.
708 708
709 709 ``specs`` can contain integers, in which case they are assumed to be
710 710 revision numbers.
711 711
712 712 It is assumed the revsets are already formatted. If you have arguments
713 713 that need to be expanded in the revset, call ``revsetlang.formatspec()``
714 714 and pass the result as an element of ``specs``.
715 715
716 716 Specifying a single revset is allowed.
717 717
718 718 Returns a ``revset.abstractsmartset`` which is a list-like interface over
719 719 integer revisions.
720 720 """
721 721 allspecs = []
722 722 for spec in specs:
723 723 if isinstance(spec, int):
724 724 spec = revsetlang.formatspec('rev(%d)', spec)
725 725 allspecs.append(spec)
726 726 return repo.anyrevs(allspecs, user=True, localalias=localalias)
727 727
728 728 def meaningfulparents(repo, ctx):
729 729 """Return list of meaningful (or all if debug) parentrevs for rev.
730 730
731 731 For merges (two non-nullrev revisions) both parents are meaningful.
732 732 Otherwise the first parent revision is considered meaningful if it
733 733 is not the preceding revision.
734 734 """
735 735 parents = ctx.parents()
736 736 if len(parents) > 1:
737 737 return parents
738 738 if repo.ui.debugflag:
739 739 return [parents[0], repo[nullrev]]
740 740 if parents[0].rev() >= intrev(ctx) - 1:
741 741 return []
742 742 return parents
743 743
744 744 def expandpats(pats):
745 745 '''Expand bare globs when running on windows.
746 746 On posix we assume it already has already been done by sh.'''
747 747 if not util.expandglobs:
748 748 return list(pats)
749 749 ret = []
750 750 for kindpat in pats:
751 751 kind, pat = matchmod._patsplit(kindpat, None)
752 752 if kind is None:
753 753 try:
754 754 globbed = glob.glob(pat)
755 755 except re.error:
756 756 globbed = [pat]
757 757 if globbed:
758 758 ret.extend(globbed)
759 759 continue
760 760 ret.append(kindpat)
761 761 return ret
762 762
763 763 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
764 764 badfn=None):
765 765 '''Return a matcher and the patterns that were used.
766 766 The matcher will warn about bad matches, unless an alternate badfn callback
767 767 is provided.'''
768 768 if pats == ("",):
769 769 pats = []
770 770 if opts is None:
771 771 opts = {}
772 772 if not globbed and default == 'relpath':
773 773 pats = expandpats(pats or [])
774 774
775 775 def bad(f, msg):
776 776 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
777 777
778 778 if badfn is None:
779 779 badfn = bad
780 780
781 781 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
782 782 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
783 783
784 784 if m.always():
785 785 pats = []
786 786 return m, pats
787 787
788 788 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
789 789 badfn=None):
790 790 '''Return a matcher that will warn about bad matches.'''
791 791 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
792 792
793 793 def matchall(repo):
794 794 '''Return a matcher that will efficiently match everything.'''
795 795 return matchmod.always(repo.root, repo.getcwd())
796 796
797 797 def matchfiles(repo, files, badfn=None):
798 798 '''Return a matcher that will efficiently match exactly these files.'''
799 799 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
800 800
801 801 def parsefollowlinespattern(repo, rev, pat, msg):
802 802 """Return a file name from `pat` pattern suitable for usage in followlines
803 803 logic.
804 804 """
805 805 if not matchmod.patkind(pat):
806 806 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
807 807 else:
808 808 ctx = repo[rev]
809 809 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
810 810 files = [f for f in ctx if m(f)]
811 811 if len(files) != 1:
812 812 raise error.ParseError(msg)
813 813 return files[0]
814 814
815 815 def origpath(ui, repo, filepath):
816 816 '''customize where .orig files are created
817 817
818 818 Fetch user defined path from config file: [ui] origbackuppath = <path>
819 819 Fall back to default (filepath with .orig suffix) if not specified
820 820 '''
821 821 origbackuppath = ui.config('ui', 'origbackuppath')
822 822 if not origbackuppath:
823 823 return filepath + ".orig"
824 824
825 825 # Convert filepath from an absolute path into a path inside the repo.
826 826 filepathfromroot = util.normpath(os.path.relpath(filepath,
827 827 start=repo.root))
828 828
829 829 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
830 830 origbackupdir = origvfs.dirname(filepathfromroot)
831 831 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
832 832 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
833 833
834 834 # Remove any files that conflict with the backup file's path
835 835 for f in reversed(list(util.finddirs(filepathfromroot))):
836 836 if origvfs.isfileorlink(f):
837 837 ui.note(_('removing conflicting file: %s\n')
838 838 % origvfs.join(f))
839 839 origvfs.unlink(f)
840 840 break
841 841
842 842 origvfs.makedirs(origbackupdir)
843 843
844 844 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
845 845 ui.note(_('removing conflicting directory: %s\n')
846 846 % origvfs.join(filepathfromroot))
847 847 origvfs.rmtree(filepathfromroot, forcibly=True)
848 848
849 849 return origvfs.join(filepathfromroot)
850 850
851 851 class _containsnode(object):
852 852 """proxy __contains__(node) to container.__contains__ which accepts revs"""
853 853
854 854 def __init__(self, repo, revcontainer):
855 855 self._torev = repo.changelog.rev
856 856 self._revcontains = revcontainer.__contains__
857 857
858 858 def __contains__(self, node):
859 859 return self._revcontains(self._torev(node))
860 860
861 861 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
862 862 fixphase=False, targetphase=None, backup=True):
863 863 """do common cleanups when old nodes are replaced by new nodes
864 864
865 865 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
866 866 (we might also want to move working directory parent in the future)
867 867
868 868 By default, bookmark moves are calculated automatically from 'replacements',
869 869 but 'moves' can be used to override that. Also, 'moves' may include
870 870 additional bookmark moves that should not have associated obsmarkers.
871 871
872 872 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
873 873 have replacements. operation is a string, like "rebase".
874 874
875 875 metadata is dictionary containing metadata to be stored in obsmarker if
876 876 obsolescence is enabled.
877 877 """
878 878 assert fixphase or targetphase is None
879 879 if not replacements and not moves:
880 880 return
881 881
882 882 # translate mapping's other forms
883 883 if not util.safehasattr(replacements, 'items'):
884 884 replacements = {(n,): () for n in replacements}
885 885 else:
886 886 # upgrading non tuple "source" to tuple ones for BC
887 887 repls = {}
888 888 for key, value in replacements.items():
889 889 if not isinstance(key, tuple):
890 890 key = (key,)
891 891 repls[key] = value
892 892 replacements = repls
893 893
894 894 # Calculate bookmark movements
895 895 if moves is None:
896 896 moves = {}
897 897 # Unfiltered repo is needed since nodes in replacements might be hidden.
898 898 unfi = repo.unfiltered()
899 899 for oldnodes, newnodes in replacements.items():
900 900 for oldnode in oldnodes:
901 901 if oldnode in moves:
902 902 continue
903 903 if len(newnodes) > 1:
904 904 # usually a split, take the one with biggest rev number
905 905 newnode = next(unfi.set('max(%ln)', newnodes)).node()
906 906 elif len(newnodes) == 0:
907 907 # move bookmark backwards
908 908 allreplaced = []
909 909 for rep in replacements:
910 910 allreplaced.extend(rep)
911 911 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
912 912 allreplaced))
913 913 if roots:
914 914 newnode = roots[0].node()
915 915 else:
916 916 newnode = nullid
917 917 else:
918 918 newnode = newnodes[0]
919 919 moves[oldnode] = newnode
920 920
921 921 allnewnodes = [n for ns in replacements.values() for n in ns]
922 922 toretract = {}
923 923 toadvance = {}
924 924 if fixphase:
925 925 precursors = {}
926 926 for oldnodes, newnodes in replacements.items():
927 927 for oldnode in oldnodes:
928 928 for newnode in newnodes:
929 929 precursors.setdefault(newnode, []).append(oldnode)
930 930
931 931 allnewnodes.sort(key=lambda n: unfi[n].rev())
932 932 newphases = {}
933 933 def phase(ctx):
934 934 return newphases.get(ctx.node(), ctx.phase())
935 935 for newnode in allnewnodes:
936 936 ctx = unfi[newnode]
937 937 parentphase = max(phase(p) for p in ctx.parents())
938 938 if targetphase is None:
939 939 oldphase = max(unfi[oldnode].phase()
940 940 for oldnode in precursors[newnode])
941 941 newphase = max(oldphase, parentphase)
942 942 else:
943 943 newphase = max(targetphase, parentphase)
944 944 newphases[newnode] = newphase
945 945 if newphase > ctx.phase():
946 946 toretract.setdefault(newphase, []).append(newnode)
947 947 elif newphase < ctx.phase():
948 948 toadvance.setdefault(newphase, []).append(newnode)
949 949
950 950 with repo.transaction('cleanup') as tr:
951 951 # Move bookmarks
952 952 bmarks = repo._bookmarks
953 953 bmarkchanges = []
954 954 for oldnode, newnode in moves.items():
955 955 oldbmarks = repo.nodebookmarks(oldnode)
956 956 if not oldbmarks:
957 957 continue
958 958 from . import bookmarks # avoid import cycle
959 959 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
960 960 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
961 961 hex(oldnode), hex(newnode)))
962 962 # Delete divergent bookmarks being parents of related newnodes
963 963 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
964 964 allnewnodes, newnode, oldnode)
965 965 deletenodes = _containsnode(repo, deleterevs)
966 966 for name in oldbmarks:
967 967 bmarkchanges.append((name, newnode))
968 968 for b in bookmarks.divergent2delete(repo, deletenodes, name):
969 969 bmarkchanges.append((b, None))
970 970
971 971 if bmarkchanges:
972 972 bmarks.applychanges(repo, tr, bmarkchanges)
973 973
974 974 for phase, nodes in toretract.items():
975 975 phases.retractboundary(repo, tr, phase, nodes)
976 976 for phase, nodes in toadvance.items():
977 977 phases.advanceboundary(repo, tr, phase, nodes)
978 978
979 979 # Obsolete or strip nodes
980 980 if obsolete.isenabled(repo, obsolete.createmarkersopt):
981 981 # If a node is already obsoleted, and we want to obsolete it
982 982 # without a successor, skip that obssolete request since it's
983 983 # unnecessary. That's the "if s or not isobs(n)" check below.
984 984 # Also sort the node in topology order, that might be useful for
985 985 # some obsstore logic.
986 986 # NOTE: the sorting might belong to createmarkers.
987 987 torev = unfi.changelog.rev
988 988 sortfunc = lambda ns: torev(ns[0][0])
989 989 rels = []
990 990 for ns, s in sorted(replacements.items(), key=sortfunc):
991 991 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
992 992 rels.append(rel)
993 993 if rels:
994 994 obsolete.createmarkers(repo, rels, operation=operation,
995 995 metadata=metadata)
996 996 else:
997 997 from . import repair # avoid import cycle
998 998 tostrip = list(n for ns in replacements for n in ns)
999 999 if tostrip:
1000 1000 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1001 1001 backup=backup)
1002 1002
1003 1003 def addremove(repo, matcher, prefix, opts=None):
1004 1004 if opts is None:
1005 1005 opts = {}
1006 1006 m = matcher
1007 1007 dry_run = opts.get('dry_run')
1008 1008 try:
1009 1009 similarity = float(opts.get('similarity') or 0)
1010 1010 except ValueError:
1011 1011 raise error.Abort(_('similarity must be a number'))
1012 1012 if similarity < 0 or similarity > 100:
1013 1013 raise error.Abort(_('similarity must be between 0 and 100'))
1014 1014 similarity /= 100.0
1015 1015
1016 1016 ret = 0
1017 1017 join = lambda f: os.path.join(prefix, f)
1018 1018
1019 1019 wctx = repo[None]
1020 1020 for subpath in sorted(wctx.substate):
1021 1021 submatch = matchmod.subdirmatcher(subpath, m)
1022 1022 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1023 1023 sub = wctx.sub(subpath)
1024 1024 try:
1025 1025 if sub.addremove(submatch, prefix, opts):
1026 1026 ret = 1
1027 1027 except error.LookupError:
1028 1028 repo.ui.status(_("skipping missing subrepository: %s\n")
1029 1029 % join(subpath))
1030 1030
1031 1031 rejected = []
1032 1032 def badfn(f, msg):
1033 1033 if f in m.files():
1034 1034 m.bad(f, msg)
1035 1035 rejected.append(f)
1036 1036
1037 1037 badmatch = matchmod.badmatch(m, badfn)
1038 1038 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1039 1039 badmatch)
1040 1040
1041 1041 unknownset = set(unknown + forgotten)
1042 1042 toprint = unknownset.copy()
1043 1043 toprint.update(deleted)
1044 1044 for abs in sorted(toprint):
1045 1045 if repo.ui.verbose or not m.exact(abs):
1046 1046 if abs in unknownset:
1047 1047 status = _('adding %s\n') % m.uipath(abs)
1048 1048 label = 'ui.addremove.added'
1049 1049 else:
1050 1050 status = _('removing %s\n') % m.uipath(abs)
1051 1051 label = 'ui.addremove.removed'
1052 1052 repo.ui.status(status, label=label)
1053 1053
1054 1054 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1055 1055 similarity)
1056 1056
1057 1057 if not dry_run:
1058 1058 _markchanges(repo, unknown + forgotten, deleted, renames)
1059 1059
1060 1060 for f in rejected:
1061 1061 if f in m.files():
1062 1062 return 1
1063 1063 return ret
1064 1064
1065 1065 def marktouched(repo, files, similarity=0.0):
1066 1066 '''Assert that files have somehow been operated upon. files are relative to
1067 1067 the repo root.'''
1068 1068 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1069 1069 rejected = []
1070 1070
1071 1071 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1072 1072
1073 1073 if repo.ui.verbose:
1074 1074 unknownset = set(unknown + forgotten)
1075 1075 toprint = unknownset.copy()
1076 1076 toprint.update(deleted)
1077 1077 for abs in sorted(toprint):
1078 1078 if abs in unknownset:
1079 1079 status = _('adding %s\n') % abs
1080 1080 else:
1081 1081 status = _('removing %s\n') % abs
1082 1082 repo.ui.status(status)
1083 1083
1084 1084 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1085 1085 similarity)
1086 1086
1087 1087 _markchanges(repo, unknown + forgotten, deleted, renames)
1088 1088
1089 1089 for f in rejected:
1090 1090 if f in m.files():
1091 1091 return 1
1092 1092 return 0
1093 1093
1094 1094 def _interestingfiles(repo, matcher):
1095 1095 '''Walk dirstate with matcher, looking for files that addremove would care
1096 1096 about.
1097 1097
1098 1098 This is different from dirstate.status because it doesn't care about
1099 1099 whether files are modified or clean.'''
1100 1100 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1101 1101 audit_path = pathutil.pathauditor(repo.root, cached=True)
1102 1102
1103 1103 ctx = repo[None]
1104 1104 dirstate = repo.dirstate
1105 1105 matcher = repo.narrowmatch(matcher, includeexact=True)
1106 1106 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1107 1107 unknown=True, ignored=False, full=False)
1108 1108 for abs, st in walkresults.iteritems():
1109 1109 dstate = dirstate[abs]
1110 1110 if dstate == '?' and audit_path.check(abs):
1111 1111 unknown.append(abs)
1112 1112 elif dstate != 'r' and not st:
1113 1113 deleted.append(abs)
1114 1114 elif dstate == 'r' and st:
1115 1115 forgotten.append(abs)
1116 1116 # for finding renames
1117 1117 elif dstate == 'r' and not st:
1118 1118 removed.append(abs)
1119 1119 elif dstate == 'a':
1120 1120 added.append(abs)
1121 1121
1122 1122 return added, unknown, deleted, removed, forgotten
1123 1123
1124 1124 def _findrenames(repo, matcher, added, removed, similarity):
1125 1125 '''Find renames from removed files to added ones.'''
1126 1126 renames = {}
1127 1127 if similarity > 0:
1128 1128 for old, new, score in similar.findrenames(repo, added, removed,
1129 1129 similarity):
1130 1130 if (repo.ui.verbose or not matcher.exact(old)
1131 1131 or not matcher.exact(new)):
1132 1132 repo.ui.status(_('recording removal of %s as rename to %s '
1133 1133 '(%d%% similar)\n') %
1134 1134 (matcher.rel(old), matcher.rel(new),
1135 1135 score * 100))
1136 1136 renames[new] = old
1137 1137 return renames
1138 1138
1139 1139 def _markchanges(repo, unknown, deleted, renames):
1140 1140 '''Marks the files in unknown as added, the files in deleted as removed,
1141 1141 and the files in renames as copied.'''
1142 1142 wctx = repo[None]
1143 1143 with repo.wlock():
1144 1144 wctx.forget(deleted)
1145 1145 wctx.add(unknown)
1146 1146 for new, old in renames.iteritems():
1147 1147 wctx.copy(old, new)
1148 1148
1149 1149 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1150 1150 """Update the dirstate to reflect the intent of copying src to dst. For
1151 1151 different reasons it might not end with dst being marked as copied from src.
1152 1152 """
1153 1153 origsrc = repo.dirstate.copied(src) or src
1154 1154 if dst == origsrc: # copying back a copy?
1155 1155 if repo.dirstate[dst] not in 'mn' and not dryrun:
1156 1156 repo.dirstate.normallookup(dst)
1157 1157 else:
1158 1158 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1159 1159 if not ui.quiet:
1160 1160 ui.warn(_("%s has not been committed yet, so no copy "
1161 1161 "data will be stored for %s.\n")
1162 1162 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1163 1163 if repo.dirstate[dst] in '?r' and not dryrun:
1164 1164 wctx.add([dst])
1165 1165 elif not dryrun:
1166 1166 wctx.copy(origsrc, dst)
1167 1167
1168 1168 def writerequires(opener, requirements):
1169 with opener('requires', 'w') as fp:
1169 with opener('requires', 'w', atomictemp=True) as fp:
1170 1170 for r in sorted(requirements):
1171 1171 fp.write("%s\n" % r)
1172 1172
1173 1173 class filecachesubentry(object):
1174 1174 def __init__(self, path, stat):
1175 1175 self.path = path
1176 1176 self.cachestat = None
1177 1177 self._cacheable = None
1178 1178
1179 1179 if stat:
1180 1180 self.cachestat = filecachesubentry.stat(self.path)
1181 1181
1182 1182 if self.cachestat:
1183 1183 self._cacheable = self.cachestat.cacheable()
1184 1184 else:
1185 1185 # None means we don't know yet
1186 1186 self._cacheable = None
1187 1187
1188 1188 def refresh(self):
1189 1189 if self.cacheable():
1190 1190 self.cachestat = filecachesubentry.stat(self.path)
1191 1191
1192 1192 def cacheable(self):
1193 1193 if self._cacheable is not None:
1194 1194 return self._cacheable
1195 1195
1196 1196 # we don't know yet, assume it is for now
1197 1197 return True
1198 1198
1199 1199 def changed(self):
1200 1200 # no point in going further if we can't cache it
1201 1201 if not self.cacheable():
1202 1202 return True
1203 1203
1204 1204 newstat = filecachesubentry.stat(self.path)
1205 1205
1206 1206 # we may not know if it's cacheable yet, check again now
1207 1207 if newstat and self._cacheable is None:
1208 1208 self._cacheable = newstat.cacheable()
1209 1209
1210 1210 # check again
1211 1211 if not self._cacheable:
1212 1212 return True
1213 1213
1214 1214 if self.cachestat != newstat:
1215 1215 self.cachestat = newstat
1216 1216 return True
1217 1217 else:
1218 1218 return False
1219 1219
1220 1220 @staticmethod
1221 1221 def stat(path):
1222 1222 try:
1223 1223 return util.cachestat(path)
1224 1224 except OSError as e:
1225 1225 if e.errno != errno.ENOENT:
1226 1226 raise
1227 1227
1228 1228 class filecacheentry(object):
1229 1229 def __init__(self, paths, stat=True):
1230 1230 self._entries = []
1231 1231 for path in paths:
1232 1232 self._entries.append(filecachesubentry(path, stat))
1233 1233
1234 1234 def changed(self):
1235 1235 '''true if any entry has changed'''
1236 1236 for entry in self._entries:
1237 1237 if entry.changed():
1238 1238 return True
1239 1239 return False
1240 1240
1241 1241 def refresh(self):
1242 1242 for entry in self._entries:
1243 1243 entry.refresh()
1244 1244
1245 1245 class filecache(object):
1246 1246 """A property like decorator that tracks files under .hg/ for updates.
1247 1247
1248 1248 On first access, the files defined as arguments are stat()ed and the
1249 1249 results cached. The decorated function is called. The results are stashed
1250 1250 away in a ``_filecache`` dict on the object whose method is decorated.
1251 1251
1252 1252 On subsequent access, the cached result is used as it is set to the
1253 1253 instance dictionary.
1254 1254
1255 1255 On external property set/delete operations, the caller must update the
1256 1256 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1257 1257 instead of directly setting <attr>.
1258 1258
1259 1259 When using the property API, the cached data is always used if available.
1260 1260 No stat() is performed to check if the file has changed.
1261 1261
1262 1262 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1263 1263 can populate an entry before the property's getter is called. In this case,
1264 1264 entries in ``_filecache`` will be used during property operations,
1265 1265 if available. If the underlying file changes, it is up to external callers
1266 1266 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1267 1267 method result as well as possibly calling ``del obj._filecache[attr]`` to
1268 1268 remove the ``filecacheentry``.
1269 1269 """
1270 1270
1271 1271 def __init__(self, *paths):
1272 1272 self.paths = paths
1273 1273
1274 1274 def join(self, obj, fname):
1275 1275 """Used to compute the runtime path of a cached file.
1276 1276
1277 1277 Users should subclass filecache and provide their own version of this
1278 1278 function to call the appropriate join function on 'obj' (an instance
1279 1279 of the class that its member function was decorated).
1280 1280 """
1281 1281 raise NotImplementedError
1282 1282
1283 1283 def __call__(self, func):
1284 1284 self.func = func
1285 1285 self.sname = func.__name__
1286 1286 self.name = pycompat.sysbytes(self.sname)
1287 1287 return self
1288 1288
1289 1289 def __get__(self, obj, type=None):
1290 1290 # if accessed on the class, return the descriptor itself.
1291 1291 if obj is None:
1292 1292 return self
1293 1293
1294 1294 assert self.sname not in obj.__dict__
1295 1295
1296 1296 entry = obj._filecache.get(self.name)
1297 1297
1298 1298 if entry:
1299 1299 if entry.changed():
1300 1300 entry.obj = self.func(obj)
1301 1301 else:
1302 1302 paths = [self.join(obj, path) for path in self.paths]
1303 1303
1304 1304 # We stat -before- creating the object so our cache doesn't lie if
1305 1305 # a writer modified between the time we read and stat
1306 1306 entry = filecacheentry(paths, True)
1307 1307 entry.obj = self.func(obj)
1308 1308
1309 1309 obj._filecache[self.name] = entry
1310 1310
1311 1311 obj.__dict__[self.sname] = entry.obj
1312 1312 return entry.obj
1313 1313
1314 1314 # don't implement __set__(), which would make __dict__ lookup as slow as
1315 1315 # function call.
1316 1316
1317 1317 def set(self, obj, value):
1318 1318 if self.name not in obj._filecache:
1319 1319 # we add an entry for the missing value because X in __dict__
1320 1320 # implies X in _filecache
1321 1321 paths = [self.join(obj, path) for path in self.paths]
1322 1322 ce = filecacheentry(paths, False)
1323 1323 obj._filecache[self.name] = ce
1324 1324 else:
1325 1325 ce = obj._filecache[self.name]
1326 1326
1327 1327 ce.obj = value # update cached copy
1328 1328 obj.__dict__[self.sname] = value # update copy returned by obj.x
1329 1329
1330 1330 def extdatasource(repo, source):
1331 1331 """Gather a map of rev -> value dict from the specified source
1332 1332
1333 1333 A source spec is treated as a URL, with a special case shell: type
1334 1334 for parsing the output from a shell command.
1335 1335
1336 1336 The data is parsed as a series of newline-separated records where
1337 1337 each record is a revision specifier optionally followed by a space
1338 1338 and a freeform string value. If the revision is known locally, it
1339 1339 is converted to a rev, otherwise the record is skipped.
1340 1340
1341 1341 Note that both key and value are treated as UTF-8 and converted to
1342 1342 the local encoding. This allows uniformity between local and
1343 1343 remote data sources.
1344 1344 """
1345 1345
1346 1346 spec = repo.ui.config("extdata", source)
1347 1347 if not spec:
1348 1348 raise error.Abort(_("unknown extdata source '%s'") % source)
1349 1349
1350 1350 data = {}
1351 1351 src = proc = None
1352 1352 try:
1353 1353 if spec.startswith("shell:"):
1354 1354 # external commands should be run relative to the repo root
1355 1355 cmd = spec[6:]
1356 1356 proc = subprocess.Popen(procutil.tonativestr(cmd),
1357 1357 shell=True, bufsize=-1,
1358 1358 close_fds=procutil.closefds,
1359 1359 stdout=subprocess.PIPE,
1360 1360 cwd=procutil.tonativestr(repo.root))
1361 1361 src = proc.stdout
1362 1362 else:
1363 1363 # treat as a URL or file
1364 1364 src = url.open(repo.ui, spec)
1365 1365 for l in src:
1366 1366 if " " in l:
1367 1367 k, v = l.strip().split(" ", 1)
1368 1368 else:
1369 1369 k, v = l.strip(), ""
1370 1370
1371 1371 k = encoding.tolocal(k)
1372 1372 try:
1373 1373 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1374 1374 except (error.LookupError, error.RepoLookupError):
1375 1375 pass # we ignore data for nodes that don't exist locally
1376 1376 finally:
1377 1377 if proc:
1378 1378 proc.communicate()
1379 1379 if src:
1380 1380 src.close()
1381 1381 if proc and proc.returncode != 0:
1382 1382 raise error.Abort(_("extdata command '%s' failed: %s")
1383 1383 % (cmd, procutil.explainexit(proc.returncode)))
1384 1384
1385 1385 return data
1386 1386
1387 1387 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1388 1388 if lock is None:
1389 1389 raise error.LockInheritanceContractViolation(
1390 1390 'lock can only be inherited while held')
1391 1391 if environ is None:
1392 1392 environ = {}
1393 1393 with lock.inherit() as locker:
1394 1394 environ[envvar] = locker
1395 1395 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1396 1396
1397 1397 def wlocksub(repo, cmd, *args, **kwargs):
1398 1398 """run cmd as a subprocess that allows inheriting repo's wlock
1399 1399
1400 1400 This can only be called while the wlock is held. This takes all the
1401 1401 arguments that ui.system does, and returns the exit code of the
1402 1402 subprocess."""
1403 1403 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1404 1404 **kwargs)
1405 1405
1406 1406 class progress(object):
1407 1407 def __init__(self, ui, topic, unit="", total=None):
1408 1408 self.ui = ui
1409 1409 self.pos = 0
1410 1410 self.topic = topic
1411 1411 self.unit = unit
1412 1412 self.total = total
1413 1413
1414 1414 def __enter__(self):
1415 1415 return self
1416 1416
1417 1417 def __exit__(self, exc_type, exc_value, exc_tb):
1418 1418 self.complete()
1419 1419
1420 1420 def update(self, pos, item="", total=None):
1421 1421 assert pos is not None
1422 1422 if total:
1423 1423 self.total = total
1424 1424 self.pos = pos
1425 1425 self._print(item)
1426 1426
1427 1427 def increment(self, step=1, item="", total=None):
1428 1428 self.update(self.pos + step, item, total)
1429 1429
1430 1430 def complete(self):
1431 1431 self.ui.progress(self.topic, None)
1432 1432
1433 1433 def _print(self, item):
1434 1434 self.ui.progress(self.topic, self.pos, item, self.unit,
1435 1435 self.total)
1436 1436
1437 1437 def gdinitconfig(ui):
1438 1438 """helper function to know if a repo should be created as general delta
1439 1439 """
1440 1440 # experimental config: format.generaldelta
1441 1441 return (ui.configbool('format', 'generaldelta')
1442 1442 or ui.configbool('format', 'usegeneraldelta')
1443 1443 or ui.configbool('format', 'sparse-revlog'))
1444 1444
1445 1445 def gddeltaconfig(ui):
1446 1446 """helper function to know if incoming delta should be optimised
1447 1447 """
1448 1448 # experimental config: format.generaldelta
1449 1449 return ui.configbool('format', 'generaldelta')
1450 1450
1451 1451 class simplekeyvaluefile(object):
1452 1452 """A simple file with key=value lines
1453 1453
1454 1454 Keys must be alphanumerics and start with a letter, values must not
1455 1455 contain '\n' characters"""
1456 1456 firstlinekey = '__firstline'
1457 1457
1458 1458 def __init__(self, vfs, path, keys=None):
1459 1459 self.vfs = vfs
1460 1460 self.path = path
1461 1461
1462 1462 def read(self, firstlinenonkeyval=False):
1463 1463 """Read the contents of a simple key-value file
1464 1464
1465 1465 'firstlinenonkeyval' indicates whether the first line of file should
1466 1466 be treated as a key-value pair or reuturned fully under the
1467 1467 __firstline key."""
1468 1468 lines = self.vfs.readlines(self.path)
1469 1469 d = {}
1470 1470 if firstlinenonkeyval:
1471 1471 if not lines:
1472 1472 e = _("empty simplekeyvalue file")
1473 1473 raise error.CorruptedState(e)
1474 1474 # we don't want to include '\n' in the __firstline
1475 1475 d[self.firstlinekey] = lines[0][:-1]
1476 1476 del lines[0]
1477 1477
1478 1478 try:
1479 1479 # the 'if line.strip()' part prevents us from failing on empty
1480 1480 # lines which only contain '\n' therefore are not skipped
1481 1481 # by 'if line'
1482 1482 updatedict = dict(line[:-1].split('=', 1) for line in lines
1483 1483 if line.strip())
1484 1484 if self.firstlinekey in updatedict:
1485 1485 e = _("%r can't be used as a key")
1486 1486 raise error.CorruptedState(e % self.firstlinekey)
1487 1487 d.update(updatedict)
1488 1488 except ValueError as e:
1489 1489 raise error.CorruptedState(str(e))
1490 1490 return d
1491 1491
1492 1492 def write(self, data, firstline=None):
1493 1493 """Write key=>value mapping to a file
1494 1494 data is a dict. Keys must be alphanumerical and start with a letter.
1495 1495 Values must not contain newline characters.
1496 1496
1497 1497 If 'firstline' is not None, it is written to file before
1498 1498 everything else, as it is, not in a key=value form"""
1499 1499 lines = []
1500 1500 if firstline is not None:
1501 1501 lines.append('%s\n' % firstline)
1502 1502
1503 1503 for k, v in data.items():
1504 1504 if k == self.firstlinekey:
1505 1505 e = "key name '%s' is reserved" % self.firstlinekey
1506 1506 raise error.ProgrammingError(e)
1507 1507 if not k[0:1].isalpha():
1508 1508 e = "keys must start with a letter in a key-value file"
1509 1509 raise error.ProgrammingError(e)
1510 1510 if not k.isalnum():
1511 1511 e = "invalid key name in a simple key-value file"
1512 1512 raise error.ProgrammingError(e)
1513 1513 if '\n' in v:
1514 1514 e = "invalid value in a simple key-value file"
1515 1515 raise error.ProgrammingError(e)
1516 1516 lines.append("%s=%s\n" % (k, v))
1517 1517 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1518 1518 fp.write(''.join(lines))
1519 1519
1520 1520 _reportobsoletedsource = [
1521 1521 'debugobsolete',
1522 1522 'pull',
1523 1523 'push',
1524 1524 'serve',
1525 1525 'unbundle',
1526 1526 ]
1527 1527
1528 1528 _reportnewcssource = [
1529 1529 'pull',
1530 1530 'unbundle',
1531 1531 ]
1532 1532
1533 1533 def prefetchfiles(repo, revs, match):
1534 1534 """Invokes the registered file prefetch functions, allowing extensions to
1535 1535 ensure the corresponding files are available locally, before the command
1536 1536 uses them."""
1537 1537 if match:
1538 1538 # The command itself will complain about files that don't exist, so
1539 1539 # don't duplicate the message.
1540 1540 match = matchmod.badmatch(match, lambda fn, msg: None)
1541 1541 else:
1542 1542 match = matchall(repo)
1543 1543
1544 1544 fileprefetchhooks(repo, revs, match)
1545 1545
1546 1546 # a list of (repo, revs, match) prefetch functions
1547 1547 fileprefetchhooks = util.hooks()
1548 1548
1549 1549 # A marker that tells the evolve extension to suppress its own reporting
1550 1550 _reportstroubledchangesets = True
1551 1551
1552 1552 def registersummarycallback(repo, otr, txnname=''):
1553 1553 """register a callback to issue a summary after the transaction is closed
1554 1554 """
1555 1555 def txmatch(sources):
1556 1556 return any(txnname.startswith(source) for source in sources)
1557 1557
1558 1558 categories = []
1559 1559
1560 1560 def reportsummary(func):
1561 1561 """decorator for report callbacks."""
1562 1562 # The repoview life cycle is shorter than the one of the actual
1563 1563 # underlying repository. So the filtered object can die before the
1564 1564 # weakref is used leading to troubles. We keep a reference to the
1565 1565 # unfiltered object and restore the filtering when retrieving the
1566 1566 # repository through the weakref.
1567 1567 filtername = repo.filtername
1568 1568 reporef = weakref.ref(repo.unfiltered())
1569 1569 def wrapped(tr):
1570 1570 repo = reporef()
1571 1571 if filtername:
1572 1572 repo = repo.filtered(filtername)
1573 1573 func(repo, tr)
1574 1574 newcat = '%02i-txnreport' % len(categories)
1575 1575 otr.addpostclose(newcat, wrapped)
1576 1576 categories.append(newcat)
1577 1577 return wrapped
1578 1578
1579 1579 if txmatch(_reportobsoletedsource):
1580 1580 @reportsummary
1581 1581 def reportobsoleted(repo, tr):
1582 1582 obsoleted = obsutil.getobsoleted(repo, tr)
1583 1583 if obsoleted:
1584 1584 repo.ui.status(_('obsoleted %i changesets\n')
1585 1585 % len(obsoleted))
1586 1586
1587 1587 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1588 1588 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1589 1589 instabilitytypes = [
1590 1590 ('orphan', 'orphan'),
1591 1591 ('phase-divergent', 'phasedivergent'),
1592 1592 ('content-divergent', 'contentdivergent'),
1593 1593 ]
1594 1594
1595 1595 def getinstabilitycounts(repo):
1596 1596 filtered = repo.changelog.filteredrevs
1597 1597 counts = {}
1598 1598 for instability, revset in instabilitytypes:
1599 1599 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1600 1600 filtered)
1601 1601 return counts
1602 1602
1603 1603 oldinstabilitycounts = getinstabilitycounts(repo)
1604 1604 @reportsummary
1605 1605 def reportnewinstabilities(repo, tr):
1606 1606 newinstabilitycounts = getinstabilitycounts(repo)
1607 1607 for instability, revset in instabilitytypes:
1608 1608 delta = (newinstabilitycounts[instability] -
1609 1609 oldinstabilitycounts[instability])
1610 1610 msg = getinstabilitymessage(delta, instability)
1611 1611 if msg:
1612 1612 repo.ui.warn(msg)
1613 1613
1614 1614 if txmatch(_reportnewcssource):
1615 1615 @reportsummary
1616 1616 def reportnewcs(repo, tr):
1617 1617 """Report the range of new revisions pulled/unbundled."""
1618 1618 origrepolen = tr.changes.get('origrepolen', len(repo))
1619 1619 unfi = repo.unfiltered()
1620 1620 if origrepolen >= len(unfi):
1621 1621 return
1622 1622
1623 1623 # Compute the bounds of new visible revisions' range.
1624 1624 revs = smartset.spanset(repo, start=origrepolen)
1625 1625 if revs:
1626 1626 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1627 1627
1628 1628 if minrev == maxrev:
1629 1629 revrange = minrev
1630 1630 else:
1631 1631 revrange = '%s:%s' % (minrev, maxrev)
1632 1632 draft = len(repo.revs('%ld and draft()', revs))
1633 1633 secret = len(repo.revs('%ld and secret()', revs))
1634 1634 if not (draft or secret):
1635 1635 msg = _('new changesets %s\n') % revrange
1636 1636 elif draft and secret:
1637 1637 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1638 1638 msg %= (revrange, draft, secret)
1639 1639 elif draft:
1640 1640 msg = _('new changesets %s (%d drafts)\n')
1641 1641 msg %= (revrange, draft)
1642 1642 elif secret:
1643 1643 msg = _('new changesets %s (%d secrets)\n')
1644 1644 msg %= (revrange, secret)
1645 1645 else:
1646 1646 errormsg = 'entered unreachable condition'
1647 1647 raise error.ProgrammingError(errormsg)
1648 1648 repo.ui.status(msg)
1649 1649
1650 1650 # search new changesets directly pulled as obsolete
1651 1651 duplicates = tr.changes.get('revduplicates', ())
1652 1652 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1653 1653 origrepolen, duplicates)
1654 1654 cl = repo.changelog
1655 1655 extinctadded = [r for r in obsadded if r not in cl]
1656 1656 if extinctadded:
1657 1657 # They are not just obsolete, but obsolete and invisible
1658 1658 # we call them "extinct" internally but the terms have not been
1659 1659 # exposed to users.
1660 1660 msg = '(%d other changesets obsolete on arrival)\n'
1661 1661 repo.ui.status(msg % len(extinctadded))
1662 1662
1663 1663 @reportsummary
1664 1664 def reportphasechanges(repo, tr):
1665 1665 """Report statistics of phase changes for changesets pre-existing
1666 1666 pull/unbundle.
1667 1667 """
1668 1668 origrepolen = tr.changes.get('origrepolen', len(repo))
1669 1669 phasetracking = tr.changes.get('phases', {})
1670 1670 if not phasetracking:
1671 1671 return
1672 1672 published = [
1673 1673 rev for rev, (old, new) in phasetracking.iteritems()
1674 1674 if new == phases.public and rev < origrepolen
1675 1675 ]
1676 1676 if not published:
1677 1677 return
1678 1678 repo.ui.status(_('%d local changesets published\n')
1679 1679 % len(published))
1680 1680
1681 1681 def getinstabilitymessage(delta, instability):
1682 1682 """function to return the message to show warning about new instabilities
1683 1683
1684 1684 exists as a separate function so that extension can wrap to show more
1685 1685 information like how to fix instabilities"""
1686 1686 if delta > 0:
1687 1687 return _('%i new %s changesets\n') % (delta, instability)
1688 1688
1689 1689 def nodesummaries(repo, nodes, maxnumnodes=4):
1690 1690 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1691 1691 return ' '.join(short(h) for h in nodes)
1692 1692 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1693 1693 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1694 1694
1695 1695 def enforcesinglehead(repo, tr, desc):
1696 1696 """check that no named branch has multiple heads"""
1697 1697 if desc in ('strip', 'repair'):
1698 1698 # skip the logic during strip
1699 1699 return
1700 1700 visible = repo.filtered('visible')
1701 1701 # possible improvement: we could restrict the check to affected branch
1702 1702 for name, heads in visible.branchmap().iteritems():
1703 1703 if len(heads) > 1:
1704 1704 msg = _('rejecting multiple heads on branch "%s"')
1705 1705 msg %= name
1706 1706 hint = _('%d heads: %s')
1707 1707 hint %= (len(heads), nodesummaries(repo, heads))
1708 1708 raise error.Abort(msg, hint=hint)
1709 1709
1710 1710 def wrapconvertsink(sink):
1711 1711 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1712 1712 before it is used, whether or not the convert extension was formally loaded.
1713 1713 """
1714 1714 return sink
1715 1715
1716 1716 def unhidehashlikerevs(repo, specs, hiddentype):
1717 1717 """parse the user specs and unhide changesets whose hash or revision number
1718 1718 is passed.
1719 1719
1720 1720 hiddentype can be: 1) 'warn': warn while unhiding changesets
1721 1721 2) 'nowarn': don't warn while unhiding changesets
1722 1722
1723 1723 returns a repo object with the required changesets unhidden
1724 1724 """
1725 1725 if not repo.filtername or not repo.ui.configbool('experimental',
1726 1726 'directaccess'):
1727 1727 return repo
1728 1728
1729 1729 if repo.filtername not in ('visible', 'visible-hidden'):
1730 1730 return repo
1731 1731
1732 1732 symbols = set()
1733 1733 for spec in specs:
1734 1734 try:
1735 1735 tree = revsetlang.parse(spec)
1736 1736 except error.ParseError: # will be reported by scmutil.revrange()
1737 1737 continue
1738 1738
1739 1739 symbols.update(revsetlang.gethashlikesymbols(tree))
1740 1740
1741 1741 if not symbols:
1742 1742 return repo
1743 1743
1744 1744 revs = _getrevsfromsymbols(repo, symbols)
1745 1745
1746 1746 if not revs:
1747 1747 return repo
1748 1748
1749 1749 if hiddentype == 'warn':
1750 1750 unfi = repo.unfiltered()
1751 1751 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1752 1752 repo.ui.warn(_("warning: accessing hidden changesets for write "
1753 1753 "operation: %s\n") % revstr)
1754 1754
1755 1755 # we have to use new filtername to separate branch/tags cache until we can
1756 1756 # disbale these cache when revisions are dynamically pinned.
1757 1757 return repo.filtered('visible-hidden', revs)
1758 1758
1759 1759 def _getrevsfromsymbols(repo, symbols):
1760 1760 """parse the list of symbols and returns a set of revision numbers of hidden
1761 1761 changesets present in symbols"""
1762 1762 revs = set()
1763 1763 unfi = repo.unfiltered()
1764 1764 unficl = unfi.changelog
1765 1765 cl = repo.changelog
1766 1766 tiprev = len(unficl)
1767 1767 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1768 1768 for s in symbols:
1769 1769 try:
1770 1770 n = int(s)
1771 1771 if n <= tiprev:
1772 1772 if not allowrevnums:
1773 1773 continue
1774 1774 else:
1775 1775 if n not in cl:
1776 1776 revs.add(n)
1777 1777 continue
1778 1778 except ValueError:
1779 1779 pass
1780 1780
1781 1781 try:
1782 1782 s = resolvehexnodeidprefix(unfi, s)
1783 1783 except (error.LookupError, error.WdirUnsupported):
1784 1784 s = None
1785 1785
1786 1786 if s is not None:
1787 1787 rev = unficl.rev(s)
1788 1788 if rev not in cl:
1789 1789 revs.add(rev)
1790 1790
1791 1791 return revs
1792 1792
1793 1793 def bookmarkrevs(repo, mark):
1794 1794 """
1795 1795 Select revisions reachable by a given bookmark
1796 1796 """
1797 1797 return repo.revs("ancestors(bookmark(%s)) - "
1798 1798 "ancestors(head() and not bookmark(%s)) - "
1799 1799 "ancestors(bookmark() and not bookmark(%s))",
1800 1800 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now