##// END OF EJS Templates
pullreport: skip or rework some early return...
Boris Feld -
r39934:b5e12039 default
parent child Browse files
Show More
@@ -1,1790 +1,1791
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29
30 30 from . import (
31 31 encoding,
32 32 error,
33 33 match as matchmod,
34 34 obsolete,
35 35 obsutil,
36 36 pathutil,
37 37 phases,
38 38 policy,
39 39 pycompat,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 procutil,
50 50 stringutil,
51 51 )
52 52
53 53 if pycompat.iswindows:
54 54 from . import scmwindows as scmplatform
55 55 else:
56 56 from . import scmposix as scmplatform
57 57
58 58 parsers = policy.importmod(r'parsers')
59 59
60 60 termsize = scmplatform.termsize
61 61
62 62 class status(tuple):
63 63 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 64 and 'ignored' properties are only relevant to the working copy.
65 65 '''
66 66
67 67 __slots__ = ()
68 68
69 69 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 70 clean):
71 71 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 72 ignored, clean))
73 73
74 74 @property
75 75 def modified(self):
76 76 '''files that have been modified'''
77 77 return self[0]
78 78
79 79 @property
80 80 def added(self):
81 81 '''files that have been added'''
82 82 return self[1]
83 83
84 84 @property
85 85 def removed(self):
86 86 '''files that have been removed'''
87 87 return self[2]
88 88
89 89 @property
90 90 def deleted(self):
91 91 '''files that are in the dirstate, but have been deleted from the
92 92 working copy (aka "missing")
93 93 '''
94 94 return self[3]
95 95
96 96 @property
97 97 def unknown(self):
98 98 '''files not in the dirstate that are not ignored'''
99 99 return self[4]
100 100
101 101 @property
102 102 def ignored(self):
103 103 '''files not in the dirstate that are ignored (by _dirignore())'''
104 104 return self[5]
105 105
106 106 @property
107 107 def clean(self):
108 108 '''files that have not been modified'''
109 109 return self[6]
110 110
111 111 def __repr__(self, *args, **kwargs):
112 112 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 113 r'unknown=%s, ignored=%s, clean=%s>') %
114 114 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115 115
116 116 def itersubrepos(ctx1, ctx2):
117 117 """find subrepos in ctx1 or ctx2"""
118 118 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 119 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 120 # has been modified (in ctx2) but not yet committed (in ctx1).
121 121 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 122 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123 123
124 124 missing = set()
125 125
126 126 for subpath in ctx2.substate:
127 127 if subpath not in ctx1.substate:
128 128 del subpaths[subpath]
129 129 missing.add(subpath)
130 130
131 131 for subpath, ctx in sorted(subpaths.iteritems()):
132 132 yield subpath, ctx.sub(subpath)
133 133
134 134 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 135 # status and diff will have an accurate result when it does
136 136 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 137 # against itself.
138 138 for subpath in missing:
139 139 yield subpath, ctx2.nullsub(subpath, ctx1)
140 140
141 141 def nochangesfound(ui, repo, excluded=None):
142 142 '''Report no changes for push/pull, excluded is None or a list of
143 143 nodes excluded from the push/pull.
144 144 '''
145 145 secretlist = []
146 146 if excluded:
147 147 for n in excluded:
148 148 ctx = repo[n]
149 149 if ctx.phase() >= phases.secret and not ctx.extinct():
150 150 secretlist.append(n)
151 151
152 152 if secretlist:
153 153 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 154 % len(secretlist))
155 155 else:
156 156 ui.status(_("no changes found\n"))
157 157
158 158 def callcatch(ui, func):
159 159 """call func() with global exception handling
160 160
161 161 return func() if no exception happens. otherwise do some error handling
162 162 and return an exit code accordingly. does not handle all exceptions.
163 163 """
164 164 try:
165 165 try:
166 166 return func()
167 167 except: # re-raises
168 168 ui.traceback()
169 169 raise
170 170 # Global exception handling, alphabetically
171 171 # Mercurial-specific first, followed by built-in and library exceptions
172 172 except error.LockHeld as inst:
173 173 if inst.errno == errno.ETIMEDOUT:
174 174 reason = _('timed out waiting for lock held by %r') % inst.locker
175 175 else:
176 176 reason = _('lock held by %r') % inst.locker
177 177 ui.error(_("abort: %s: %s\n") % (
178 178 inst.desc or stringutil.forcebytestr(inst.filename), reason))
179 179 if not inst.locker:
180 180 ui.error(_("(lock might be very busy)\n"))
181 181 except error.LockUnavailable as inst:
182 182 ui.error(_("abort: could not lock %s: %s\n") %
183 183 (inst.desc or stringutil.forcebytestr(inst.filename),
184 184 encoding.strtolocal(inst.strerror)))
185 185 except error.OutOfBandError as inst:
186 186 if inst.args:
187 187 msg = _("abort: remote error:\n")
188 188 else:
189 189 msg = _("abort: remote error\n")
190 190 ui.error(msg)
191 191 if inst.args:
192 192 ui.error(''.join(inst.args))
193 193 if inst.hint:
194 194 ui.error('(%s)\n' % inst.hint)
195 195 except error.RepoError as inst:
196 196 ui.error(_("abort: %s!\n") % inst)
197 197 if inst.hint:
198 198 ui.error(_("(%s)\n") % inst.hint)
199 199 except error.ResponseError as inst:
200 200 ui.error(_("abort: %s") % inst.args[0])
201 201 msg = inst.args[1]
202 202 if isinstance(msg, type(u'')):
203 203 msg = pycompat.sysbytes(msg)
204 204 if not isinstance(msg, bytes):
205 205 ui.error(" %r\n" % (msg,))
206 206 elif not msg:
207 207 ui.error(_(" empty string\n"))
208 208 else:
209 209 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
210 210 except error.CensoredNodeError as inst:
211 211 ui.error(_("abort: file censored %s!\n") % inst)
212 212 except error.StorageError as inst:
213 213 ui.error(_("abort: %s!\n") % inst)
214 214 except error.InterventionRequired as inst:
215 215 ui.error("%s\n" % inst)
216 216 if inst.hint:
217 217 ui.error(_("(%s)\n") % inst.hint)
218 218 return 1
219 219 except error.WdirUnsupported:
220 220 ui.error(_("abort: working directory revision cannot be specified\n"))
221 221 except error.Abort as inst:
222 222 ui.error(_("abort: %s\n") % inst)
223 223 if inst.hint:
224 224 ui.error(_("(%s)\n") % inst.hint)
225 225 except ImportError as inst:
226 226 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
227 227 m = stringutil.forcebytestr(inst).split()[-1]
228 228 if m in "mpatch bdiff".split():
229 229 ui.error(_("(did you forget to compile extensions?)\n"))
230 230 elif m in "zlib".split():
231 231 ui.error(_("(is your Python install correct?)\n"))
232 232 except IOError as inst:
233 233 if util.safehasattr(inst, "code"):
234 234 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
235 235 elif util.safehasattr(inst, "reason"):
236 236 try: # usually it is in the form (errno, strerror)
237 237 reason = inst.reason.args[1]
238 238 except (AttributeError, IndexError):
239 239 # it might be anything, for example a string
240 240 reason = inst.reason
241 241 if isinstance(reason, pycompat.unicode):
242 242 # SSLError of Python 2.7.9 contains a unicode
243 243 reason = encoding.unitolocal(reason)
244 244 ui.error(_("abort: error: %s\n") % reason)
245 245 elif (util.safehasattr(inst, "args")
246 246 and inst.args and inst.args[0] == errno.EPIPE):
247 247 pass
248 248 elif getattr(inst, "strerror", None):
249 249 if getattr(inst, "filename", None):
250 250 ui.error(_("abort: %s: %s\n") % (
251 251 encoding.strtolocal(inst.strerror),
252 252 stringutil.forcebytestr(inst.filename)))
253 253 else:
254 254 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
255 255 else:
256 256 raise
257 257 except OSError as inst:
258 258 if getattr(inst, "filename", None) is not None:
259 259 ui.error(_("abort: %s: '%s'\n") % (
260 260 encoding.strtolocal(inst.strerror),
261 261 stringutil.forcebytestr(inst.filename)))
262 262 else:
263 263 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
264 264 except MemoryError:
265 265 ui.error(_("abort: out of memory\n"))
266 266 except SystemExit as inst:
267 267 # Commands shouldn't sys.exit directly, but give a return code.
268 268 # Just in case catch this and and pass exit code to caller.
269 269 return inst.code
270 270 except socket.error as inst:
271 271 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
272 272
273 273 return -1
274 274
275 275 def checknewlabel(repo, lbl, kind):
276 276 # Do not use the "kind" parameter in ui output.
277 277 # It makes strings difficult to translate.
278 278 if lbl in ['tip', '.', 'null']:
279 279 raise error.Abort(_("the name '%s' is reserved") % lbl)
280 280 for c in (':', '\0', '\n', '\r'):
281 281 if c in lbl:
282 282 raise error.Abort(
283 283 _("%r cannot be used in a name") % pycompat.bytestr(c))
284 284 try:
285 285 int(lbl)
286 286 raise error.Abort(_("cannot use an integer as a name"))
287 287 except ValueError:
288 288 pass
289 289 if lbl.strip() != lbl:
290 290 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
291 291
292 292 def checkfilename(f):
293 293 '''Check that the filename f is an acceptable filename for a tracked file'''
294 294 if '\r' in f or '\n' in f:
295 295 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
296 296 % pycompat.bytestr(f))
297 297
298 298 def checkportable(ui, f):
299 299 '''Check if filename f is portable and warn or abort depending on config'''
300 300 checkfilename(f)
301 301 abort, warn = checkportabilityalert(ui)
302 302 if abort or warn:
303 303 msg = util.checkwinfilename(f)
304 304 if msg:
305 305 msg = "%s: %s" % (msg, procutil.shellquote(f))
306 306 if abort:
307 307 raise error.Abort(msg)
308 308 ui.warn(_("warning: %s\n") % msg)
309 309
310 310 def checkportabilityalert(ui):
311 311 '''check if the user's config requests nothing, a warning, or abort for
312 312 non-portable filenames'''
313 313 val = ui.config('ui', 'portablefilenames')
314 314 lval = val.lower()
315 315 bval = stringutil.parsebool(val)
316 316 abort = pycompat.iswindows or lval == 'abort'
317 317 warn = bval or lval == 'warn'
318 318 if bval is None and not (warn or abort or lval == 'ignore'):
319 319 raise error.ConfigError(
320 320 _("ui.portablefilenames value is invalid ('%s')") % val)
321 321 return abort, warn
322 322
323 323 class casecollisionauditor(object):
324 324 def __init__(self, ui, abort, dirstate):
325 325 self._ui = ui
326 326 self._abort = abort
327 327 allfiles = '\0'.join(dirstate._map)
328 328 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
329 329 self._dirstate = dirstate
330 330 # The purpose of _newfiles is so that we don't complain about
331 331 # case collisions if someone were to call this object with the
332 332 # same filename twice.
333 333 self._newfiles = set()
334 334
335 335 def __call__(self, f):
336 336 if f in self._newfiles:
337 337 return
338 338 fl = encoding.lower(f)
339 339 if fl in self._loweredfiles and f not in self._dirstate:
340 340 msg = _('possible case-folding collision for %s') % f
341 341 if self._abort:
342 342 raise error.Abort(msg)
343 343 self._ui.warn(_("warning: %s\n") % msg)
344 344 self._loweredfiles.add(fl)
345 345 self._newfiles.add(f)
346 346
347 347 def filteredhash(repo, maxrev):
348 348 """build hash of filtered revisions in the current repoview.
349 349
350 350 Multiple caches perform up-to-date validation by checking that the
351 351 tiprev and tipnode stored in the cache file match the current repository.
352 352 However, this is not sufficient for validating repoviews because the set
353 353 of revisions in the view may change without the repository tiprev and
354 354 tipnode changing.
355 355
356 356 This function hashes all the revs filtered from the view and returns
357 357 that SHA-1 digest.
358 358 """
359 359 cl = repo.changelog
360 360 if not cl.filteredrevs:
361 361 return None
362 362 key = None
363 363 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
364 364 if revs:
365 365 s = hashlib.sha1()
366 366 for rev in revs:
367 367 s.update('%d;' % rev)
368 368 key = s.digest()
369 369 return key
370 370
371 371 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
372 372 '''yield every hg repository under path, always recursively.
373 373 The recurse flag will only control recursion into repo working dirs'''
374 374 def errhandler(err):
375 375 if err.filename == path:
376 376 raise err
377 377 samestat = getattr(os.path, 'samestat', None)
378 378 if followsym and samestat is not None:
379 379 def adddir(dirlst, dirname):
380 380 dirstat = os.stat(dirname)
381 381 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
382 382 if not match:
383 383 dirlst.append(dirstat)
384 384 return not match
385 385 else:
386 386 followsym = False
387 387
388 388 if (seen_dirs is None) and followsym:
389 389 seen_dirs = []
390 390 adddir(seen_dirs, path)
391 391 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
392 392 dirs.sort()
393 393 if '.hg' in dirs:
394 394 yield root # found a repository
395 395 qroot = os.path.join(root, '.hg', 'patches')
396 396 if os.path.isdir(os.path.join(qroot, '.hg')):
397 397 yield qroot # we have a patch queue repo here
398 398 if recurse:
399 399 # avoid recursing inside the .hg directory
400 400 dirs.remove('.hg')
401 401 else:
402 402 dirs[:] = [] # don't descend further
403 403 elif followsym:
404 404 newdirs = []
405 405 for d in dirs:
406 406 fname = os.path.join(root, d)
407 407 if adddir(seen_dirs, fname):
408 408 if os.path.islink(fname):
409 409 for hgname in walkrepos(fname, True, seen_dirs):
410 410 yield hgname
411 411 else:
412 412 newdirs.append(d)
413 413 dirs[:] = newdirs
414 414
415 415 def binnode(ctx):
416 416 """Return binary node id for a given basectx"""
417 417 node = ctx.node()
418 418 if node is None:
419 419 return wdirid
420 420 return node
421 421
422 422 def intrev(ctx):
423 423 """Return integer for a given basectx that can be used in comparison or
424 424 arithmetic operation"""
425 425 rev = ctx.rev()
426 426 if rev is None:
427 427 return wdirrev
428 428 return rev
429 429
430 430 def formatchangeid(ctx):
431 431 """Format changectx as '{rev}:{node|formatnode}', which is the default
432 432 template provided by logcmdutil.changesettemplater"""
433 433 repo = ctx.repo()
434 434 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
435 435
436 436 def formatrevnode(ui, rev, node):
437 437 """Format given revision and node depending on the current verbosity"""
438 438 if ui.debugflag:
439 439 hexfunc = hex
440 440 else:
441 441 hexfunc = short
442 442 return '%d:%s' % (rev, hexfunc(node))
443 443
444 444 def resolvehexnodeidprefix(repo, prefix):
445 445 if (prefix.startswith('x') and
446 446 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
447 447 prefix = prefix[1:]
448 448 try:
449 449 # Uses unfiltered repo because it's faster when prefix is ambiguous/
450 450 # This matches the shortesthexnodeidprefix() function below.
451 451 node = repo.unfiltered().changelog._partialmatch(prefix)
452 452 except error.AmbiguousPrefixLookupError:
453 453 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
454 454 if revset:
455 455 # Clear config to avoid infinite recursion
456 456 configoverrides = {('experimental',
457 457 'revisions.disambiguatewithin'): None}
458 458 with repo.ui.configoverride(configoverrides):
459 459 revs = repo.anyrevs([revset], user=True)
460 460 matches = []
461 461 for rev in revs:
462 462 node = repo.changelog.node(rev)
463 463 if hex(node).startswith(prefix):
464 464 matches.append(node)
465 465 if len(matches) == 1:
466 466 return matches[0]
467 467 raise
468 468 if node is None:
469 469 return
470 470 repo.changelog.rev(node) # make sure node isn't filtered
471 471 return node
472 472
473 473 def mayberevnum(repo, prefix):
474 474 """Checks if the given prefix may be mistaken for a revision number"""
475 475 try:
476 476 i = int(prefix)
477 477 # if we are a pure int, then starting with zero will not be
478 478 # confused as a rev; or, obviously, if the int is larger
479 479 # than the value of the tip rev
480 480 if prefix[0:1] == b'0' or i >= len(repo):
481 481 return False
482 482 return True
483 483 except ValueError:
484 484 return False
485 485
486 486 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
487 487 """Find the shortest unambiguous prefix that matches hexnode.
488 488
489 489 If "cache" is not None, it must be a dictionary that can be used for
490 490 caching between calls to this method.
491 491 """
492 492 # _partialmatch() of filtered changelog could take O(len(repo)) time,
493 493 # which would be unacceptably slow. so we look for hash collision in
494 494 # unfiltered space, which means some hashes may be slightly longer.
495 495
496 496 def disambiguate(prefix):
497 497 """Disambiguate against revnums."""
498 498 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
499 499 if mayberevnum(repo, prefix):
500 500 return 'x' + prefix
501 501 else:
502 502 return prefix
503 503
504 504 hexnode = hex(node)
505 505 for length in range(len(prefix), len(hexnode) + 1):
506 506 prefix = hexnode[:length]
507 507 if not mayberevnum(repo, prefix):
508 508 return prefix
509 509
510 510 cl = repo.unfiltered().changelog
511 511 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
512 512 if revset:
513 513 revs = None
514 514 if cache is not None:
515 515 revs = cache.get('disambiguationrevset')
516 516 if revs is None:
517 517 revs = repo.anyrevs([revset], user=True)
518 518 if cache is not None:
519 519 cache['disambiguationrevset'] = revs
520 520 if cl.rev(node) in revs:
521 521 hexnode = hex(node)
522 522 nodetree = None
523 523 if cache is not None:
524 524 nodetree = cache.get('disambiguationnodetree')
525 525 if not nodetree:
526 526 try:
527 527 nodetree = parsers.nodetree(cl.index, len(revs))
528 528 except AttributeError:
529 529 # no native nodetree
530 530 pass
531 531 else:
532 532 for r in revs:
533 533 nodetree.insert(r)
534 534 if cache is not None:
535 535 cache['disambiguationnodetree'] = nodetree
536 536 if nodetree is not None:
537 537 length = max(nodetree.shortest(node), minlength)
538 538 prefix = hexnode[:length]
539 539 return disambiguate(prefix)
540 540 for length in range(minlength, len(hexnode) + 1):
541 541 matches = []
542 542 prefix = hexnode[:length]
543 543 for rev in revs:
544 544 otherhexnode = repo[rev].hex()
545 545 if prefix == otherhexnode[:length]:
546 546 matches.append(otherhexnode)
547 547 if len(matches) == 1:
548 548 return disambiguate(prefix)
549 549
550 550 try:
551 551 return disambiguate(cl.shortest(node, minlength))
552 552 except error.LookupError:
553 553 raise error.RepoLookupError()
554 554
555 555 def isrevsymbol(repo, symbol):
556 556 """Checks if a symbol exists in the repo.
557 557
558 558 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
559 559 symbol is an ambiguous nodeid prefix.
560 560 """
561 561 try:
562 562 revsymbol(repo, symbol)
563 563 return True
564 564 except error.RepoLookupError:
565 565 return False
566 566
567 567 def revsymbol(repo, symbol):
568 568 """Returns a context given a single revision symbol (as string).
569 569
570 570 This is similar to revsingle(), but accepts only a single revision symbol,
571 571 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
572 572 not "max(public())".
573 573 """
574 574 if not isinstance(symbol, bytes):
575 575 msg = ("symbol (%s of type %s) was not a string, did you mean "
576 576 "repo[symbol]?" % (symbol, type(symbol)))
577 577 raise error.ProgrammingError(msg)
578 578 try:
579 579 if symbol in ('.', 'tip', 'null'):
580 580 return repo[symbol]
581 581
582 582 try:
583 583 r = int(symbol)
584 584 if '%d' % r != symbol:
585 585 raise ValueError
586 586 l = len(repo.changelog)
587 587 if r < 0:
588 588 r += l
589 589 if r < 0 or r >= l and r != wdirrev:
590 590 raise ValueError
591 591 return repo[r]
592 592 except error.FilteredIndexError:
593 593 raise
594 594 except (ValueError, OverflowError, IndexError):
595 595 pass
596 596
597 597 if len(symbol) == 40:
598 598 try:
599 599 node = bin(symbol)
600 600 rev = repo.changelog.rev(node)
601 601 return repo[rev]
602 602 except error.FilteredLookupError:
603 603 raise
604 604 except (TypeError, LookupError):
605 605 pass
606 606
607 607 # look up bookmarks through the name interface
608 608 try:
609 609 node = repo.names.singlenode(repo, symbol)
610 610 rev = repo.changelog.rev(node)
611 611 return repo[rev]
612 612 except KeyError:
613 613 pass
614 614
615 615 node = resolvehexnodeidprefix(repo, symbol)
616 616 if node is not None:
617 617 rev = repo.changelog.rev(node)
618 618 return repo[rev]
619 619
620 620 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
621 621
622 622 except error.WdirUnsupported:
623 623 return repo[None]
624 624 except (error.FilteredIndexError, error.FilteredLookupError,
625 625 error.FilteredRepoLookupError):
626 626 raise _filterederror(repo, symbol)
627 627
628 628 def _filterederror(repo, changeid):
629 629 """build an exception to be raised about a filtered changeid
630 630
631 631 This is extracted in a function to help extensions (eg: evolve) to
632 632 experiment with various message variants."""
633 633 if repo.filtername.startswith('visible'):
634 634
635 635 # Check if the changeset is obsolete
636 636 unfilteredrepo = repo.unfiltered()
637 637 ctx = revsymbol(unfilteredrepo, changeid)
638 638
639 639 # If the changeset is obsolete, enrich the message with the reason
640 640 # that made this changeset not visible
641 641 if ctx.obsolete():
642 642 msg = obsutil._getfilteredreason(repo, changeid, ctx)
643 643 else:
644 644 msg = _("hidden revision '%s'") % changeid
645 645
646 646 hint = _('use --hidden to access hidden revisions')
647 647
648 648 return error.FilteredRepoLookupError(msg, hint=hint)
649 649 msg = _("filtered revision '%s' (not in '%s' subset)")
650 650 msg %= (changeid, repo.filtername)
651 651 return error.FilteredRepoLookupError(msg)
652 652
653 653 def revsingle(repo, revspec, default='.', localalias=None):
654 654 if not revspec and revspec != 0:
655 655 return repo[default]
656 656
657 657 l = revrange(repo, [revspec], localalias=localalias)
658 658 if not l:
659 659 raise error.Abort(_('empty revision set'))
660 660 return repo[l.last()]
661 661
662 662 def _pairspec(revspec):
663 663 tree = revsetlang.parse(revspec)
664 664 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
665 665
666 666 def revpair(repo, revs):
667 667 if not revs:
668 668 return repo['.'], repo[None]
669 669
670 670 l = revrange(repo, revs)
671 671
672 672 if not l:
673 673 first = second = None
674 674 elif l.isascending():
675 675 first = l.min()
676 676 second = l.max()
677 677 elif l.isdescending():
678 678 first = l.max()
679 679 second = l.min()
680 680 else:
681 681 first = l.first()
682 682 second = l.last()
683 683
684 684 if first is None:
685 685 raise error.Abort(_('empty revision range'))
686 686 if (first == second and len(revs) >= 2
687 687 and not all(revrange(repo, [r]) for r in revs)):
688 688 raise error.Abort(_('empty revision on one side of range'))
689 689
690 690 # if top-level is range expression, the result must always be a pair
691 691 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
692 692 return repo[first], repo[None]
693 693
694 694 return repo[first], repo[second]
695 695
696 696 def revrange(repo, specs, localalias=None):
697 697 """Execute 1 to many revsets and return the union.
698 698
699 699 This is the preferred mechanism for executing revsets using user-specified
700 700 config options, such as revset aliases.
701 701
702 702 The revsets specified by ``specs`` will be executed via a chained ``OR``
703 703 expression. If ``specs`` is empty, an empty result is returned.
704 704
705 705 ``specs`` can contain integers, in which case they are assumed to be
706 706 revision numbers.
707 707
708 708 It is assumed the revsets are already formatted. If you have arguments
709 709 that need to be expanded in the revset, call ``revsetlang.formatspec()``
710 710 and pass the result as an element of ``specs``.
711 711
712 712 Specifying a single revset is allowed.
713 713
714 714 Returns a ``revset.abstractsmartset`` which is a list-like interface over
715 715 integer revisions.
716 716 """
717 717 allspecs = []
718 718 for spec in specs:
719 719 if isinstance(spec, int):
720 720 spec = revsetlang.formatspec('rev(%d)', spec)
721 721 allspecs.append(spec)
722 722 return repo.anyrevs(allspecs, user=True, localalias=localalias)
723 723
724 724 def meaningfulparents(repo, ctx):
725 725 """Return list of meaningful (or all if debug) parentrevs for rev.
726 726
727 727 For merges (two non-nullrev revisions) both parents are meaningful.
728 728 Otherwise the first parent revision is considered meaningful if it
729 729 is not the preceding revision.
730 730 """
731 731 parents = ctx.parents()
732 732 if len(parents) > 1:
733 733 return parents
734 734 if repo.ui.debugflag:
735 735 return [parents[0], repo[nullrev]]
736 736 if parents[0].rev() >= intrev(ctx) - 1:
737 737 return []
738 738 return parents
739 739
740 740 def expandpats(pats):
741 741 '''Expand bare globs when running on windows.
742 742 On posix we assume it already has already been done by sh.'''
743 743 if not util.expandglobs:
744 744 return list(pats)
745 745 ret = []
746 746 for kindpat in pats:
747 747 kind, pat = matchmod._patsplit(kindpat, None)
748 748 if kind is None:
749 749 try:
750 750 globbed = glob.glob(pat)
751 751 except re.error:
752 752 globbed = [pat]
753 753 if globbed:
754 754 ret.extend(globbed)
755 755 continue
756 756 ret.append(kindpat)
757 757 return ret
758 758
759 759 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
760 760 badfn=None):
761 761 '''Return a matcher and the patterns that were used.
762 762 The matcher will warn about bad matches, unless an alternate badfn callback
763 763 is provided.'''
764 764 if pats == ("",):
765 765 pats = []
766 766 if opts is None:
767 767 opts = {}
768 768 if not globbed and default == 'relpath':
769 769 pats = expandpats(pats or [])
770 770
771 771 def bad(f, msg):
772 772 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
773 773
774 774 if badfn is None:
775 775 badfn = bad
776 776
777 777 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
778 778 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
779 779
780 780 if m.always():
781 781 pats = []
782 782 return m, pats
783 783
784 784 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
785 785 badfn=None):
786 786 '''Return a matcher that will warn about bad matches.'''
787 787 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
788 788
789 789 def matchall(repo):
790 790 '''Return a matcher that will efficiently match everything.'''
791 791 return matchmod.always(repo.root, repo.getcwd())
792 792
793 793 def matchfiles(repo, files, badfn=None):
794 794 '''Return a matcher that will efficiently match exactly these files.'''
795 795 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
796 796
797 797 def parsefollowlinespattern(repo, rev, pat, msg):
798 798 """Return a file name from `pat` pattern suitable for usage in followlines
799 799 logic.
800 800 """
801 801 if not matchmod.patkind(pat):
802 802 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
803 803 else:
804 804 ctx = repo[rev]
805 805 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
806 806 files = [f for f in ctx if m(f)]
807 807 if len(files) != 1:
808 808 raise error.ParseError(msg)
809 809 return files[0]
810 810
811 811 def origpath(ui, repo, filepath):
812 812 '''customize where .orig files are created
813 813
814 814 Fetch user defined path from config file: [ui] origbackuppath = <path>
815 815 Fall back to default (filepath with .orig suffix) if not specified
816 816 '''
817 817 origbackuppath = ui.config('ui', 'origbackuppath')
818 818 if not origbackuppath:
819 819 return filepath + ".orig"
820 820
821 821 # Convert filepath from an absolute path into a path inside the repo.
822 822 filepathfromroot = util.normpath(os.path.relpath(filepath,
823 823 start=repo.root))
824 824
825 825 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
826 826 origbackupdir = origvfs.dirname(filepathfromroot)
827 827 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
828 828 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
829 829
830 830 # Remove any files that conflict with the backup file's path
831 831 for f in reversed(list(util.finddirs(filepathfromroot))):
832 832 if origvfs.isfileorlink(f):
833 833 ui.note(_('removing conflicting file: %s\n')
834 834 % origvfs.join(f))
835 835 origvfs.unlink(f)
836 836 break
837 837
838 838 origvfs.makedirs(origbackupdir)
839 839
840 840 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
841 841 ui.note(_('removing conflicting directory: %s\n')
842 842 % origvfs.join(filepathfromroot))
843 843 origvfs.rmtree(filepathfromroot, forcibly=True)
844 844
845 845 return origvfs.join(filepathfromroot)
846 846
847 847 class _containsnode(object):
848 848 """proxy __contains__(node) to container.__contains__ which accepts revs"""
849 849
850 850 def __init__(self, repo, revcontainer):
851 851 self._torev = repo.changelog.rev
852 852 self._revcontains = revcontainer.__contains__
853 853
854 854 def __contains__(self, node):
855 855 return self._revcontains(self._torev(node))
856 856
857 857 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
858 858 fixphase=False, targetphase=None, backup=True):
859 859 """do common cleanups when old nodes are replaced by new nodes
860 860
861 861 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
862 862 (we might also want to move working directory parent in the future)
863 863
864 864 By default, bookmark moves are calculated automatically from 'replacements',
865 865 but 'moves' can be used to override that. Also, 'moves' may include
866 866 additional bookmark moves that should not have associated obsmarkers.
867 867
868 868 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
869 869 have replacements. operation is a string, like "rebase".
870 870
871 871 metadata is dictionary containing metadata to be stored in obsmarker if
872 872 obsolescence is enabled.
873 873 """
874 874 assert fixphase or targetphase is None
875 875 if not replacements and not moves:
876 876 return
877 877
878 878 # translate mapping's other forms
879 879 if not util.safehasattr(replacements, 'items'):
880 880 replacements = {(n,): () for n in replacements}
881 881 else:
882 882 # upgrading non tuple "source" to tuple ones for BC
883 883 repls = {}
884 884 for key, value in replacements.items():
885 885 if not isinstance(key, tuple):
886 886 key = (key,)
887 887 repls[key] = value
888 888 replacements = repls
889 889
890 890 # Calculate bookmark movements
891 891 if moves is None:
892 892 moves = {}
893 893 # Unfiltered repo is needed since nodes in replacements might be hidden.
894 894 unfi = repo.unfiltered()
895 895 for oldnodes, newnodes in replacements.items():
896 896 for oldnode in oldnodes:
897 897 if oldnode in moves:
898 898 continue
899 899 if len(newnodes) > 1:
900 900 # usually a split, take the one with biggest rev number
901 901 newnode = next(unfi.set('max(%ln)', newnodes)).node()
902 902 elif len(newnodes) == 0:
903 903 # move bookmark backwards
904 904 allreplaced = []
905 905 for rep in replacements:
906 906 allreplaced.extend(rep)
907 907 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
908 908 allreplaced))
909 909 if roots:
910 910 newnode = roots[0].node()
911 911 else:
912 912 newnode = nullid
913 913 else:
914 914 newnode = newnodes[0]
915 915 moves[oldnode] = newnode
916 916
917 917 allnewnodes = [n for ns in replacements.values() for n in ns]
918 918 toretract = {}
919 919 toadvance = {}
920 920 if fixphase:
921 921 precursors = {}
922 922 for oldnodes, newnodes in replacements.items():
923 923 for oldnode in oldnodes:
924 924 for newnode in newnodes:
925 925 precursors.setdefault(newnode, []).append(oldnode)
926 926
927 927 allnewnodes.sort(key=lambda n: unfi[n].rev())
928 928 newphases = {}
929 929 def phase(ctx):
930 930 return newphases.get(ctx.node(), ctx.phase())
931 931 for newnode in allnewnodes:
932 932 ctx = unfi[newnode]
933 933 parentphase = max(phase(p) for p in ctx.parents())
934 934 if targetphase is None:
935 935 oldphase = max(unfi[oldnode].phase()
936 936 for oldnode in precursors[newnode])
937 937 newphase = max(oldphase, parentphase)
938 938 else:
939 939 newphase = max(targetphase, parentphase)
940 940 newphases[newnode] = newphase
941 941 if newphase > ctx.phase():
942 942 toretract.setdefault(newphase, []).append(newnode)
943 943 elif newphase < ctx.phase():
944 944 toadvance.setdefault(newphase, []).append(newnode)
945 945
946 946 with repo.transaction('cleanup') as tr:
947 947 # Move bookmarks
948 948 bmarks = repo._bookmarks
949 949 bmarkchanges = []
950 950 for oldnode, newnode in moves.items():
951 951 oldbmarks = repo.nodebookmarks(oldnode)
952 952 if not oldbmarks:
953 953 continue
954 954 from . import bookmarks # avoid import cycle
955 955 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
956 956 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
957 957 hex(oldnode), hex(newnode)))
958 958 # Delete divergent bookmarks being parents of related newnodes
959 959 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
960 960 allnewnodes, newnode, oldnode)
961 961 deletenodes = _containsnode(repo, deleterevs)
962 962 for name in oldbmarks:
963 963 bmarkchanges.append((name, newnode))
964 964 for b in bookmarks.divergent2delete(repo, deletenodes, name):
965 965 bmarkchanges.append((b, None))
966 966
967 967 if bmarkchanges:
968 968 bmarks.applychanges(repo, tr, bmarkchanges)
969 969
970 970 for phase, nodes in toretract.items():
971 971 phases.retractboundary(repo, tr, phase, nodes)
972 972 for phase, nodes in toadvance.items():
973 973 phases.advanceboundary(repo, tr, phase, nodes)
974 974
975 975 # Obsolete or strip nodes
976 976 if obsolete.isenabled(repo, obsolete.createmarkersopt):
977 977 # If a node is already obsoleted, and we want to obsolete it
978 978 # without a successor, skip that obssolete request since it's
979 979 # unnecessary. That's the "if s or not isobs(n)" check below.
980 980 # Also sort the node in topology order, that might be useful for
981 981 # some obsstore logic.
982 982 # NOTE: the filtering and sorting might belong to createmarkers.
983 983 isobs = unfi.obsstore.successors.__contains__
984 984 torev = unfi.changelog.rev
985 985 sortfunc = lambda ns: torev(ns[0][0])
986 986 rels = []
987 987 for ns, s in sorted(replacements.items(), key=sortfunc):
988 988 for n in ns:
989 989 if s or not isobs(n):
990 990 rel = (unfi[n], tuple(unfi[m] for m in s))
991 991 rels.append(rel)
992 992 if rels:
993 993 obsolete.createmarkers(repo, rels, operation=operation,
994 994 metadata=metadata)
995 995 else:
996 996 from . import repair # avoid import cycle
997 997 tostrip = list(n for ns in replacements for n in ns)
998 998 if tostrip:
999 999 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1000 1000 backup=backup)
1001 1001
1002 1002 def addremove(repo, matcher, prefix, opts=None):
1003 1003 if opts is None:
1004 1004 opts = {}
1005 1005 m = matcher
1006 1006 dry_run = opts.get('dry_run')
1007 1007 try:
1008 1008 similarity = float(opts.get('similarity') or 0)
1009 1009 except ValueError:
1010 1010 raise error.Abort(_('similarity must be a number'))
1011 1011 if similarity < 0 or similarity > 100:
1012 1012 raise error.Abort(_('similarity must be between 0 and 100'))
1013 1013 similarity /= 100.0
1014 1014
1015 1015 ret = 0
1016 1016 join = lambda f: os.path.join(prefix, f)
1017 1017
1018 1018 wctx = repo[None]
1019 1019 for subpath in sorted(wctx.substate):
1020 1020 submatch = matchmod.subdirmatcher(subpath, m)
1021 1021 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1022 1022 sub = wctx.sub(subpath)
1023 1023 try:
1024 1024 if sub.addremove(submatch, prefix, opts):
1025 1025 ret = 1
1026 1026 except error.LookupError:
1027 1027 repo.ui.status(_("skipping missing subrepository: %s\n")
1028 1028 % join(subpath))
1029 1029
1030 1030 rejected = []
1031 1031 def badfn(f, msg):
1032 1032 if f in m.files():
1033 1033 m.bad(f, msg)
1034 1034 rejected.append(f)
1035 1035
1036 1036 badmatch = matchmod.badmatch(m, badfn)
1037 1037 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1038 1038 badmatch)
1039 1039
1040 1040 unknownset = set(unknown + forgotten)
1041 1041 toprint = unknownset.copy()
1042 1042 toprint.update(deleted)
1043 1043 for abs in sorted(toprint):
1044 1044 if repo.ui.verbose or not m.exact(abs):
1045 1045 if abs in unknownset:
1046 1046 status = _('adding %s\n') % m.uipath(abs)
1047 1047 label = 'addremove.added'
1048 1048 else:
1049 1049 status = _('removing %s\n') % m.uipath(abs)
1050 1050 label = 'addremove.removed'
1051 1051 repo.ui.status(status, label=label)
1052 1052
1053 1053 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1054 1054 similarity)
1055 1055
1056 1056 if not dry_run:
1057 1057 _markchanges(repo, unknown + forgotten, deleted, renames)
1058 1058
1059 1059 for f in rejected:
1060 1060 if f in m.files():
1061 1061 return 1
1062 1062 return ret
1063 1063
1064 1064 def marktouched(repo, files, similarity=0.0):
1065 1065 '''Assert that files have somehow been operated upon. files are relative to
1066 1066 the repo root.'''
1067 1067 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1068 1068 rejected = []
1069 1069
1070 1070 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1071 1071
1072 1072 if repo.ui.verbose:
1073 1073 unknownset = set(unknown + forgotten)
1074 1074 toprint = unknownset.copy()
1075 1075 toprint.update(deleted)
1076 1076 for abs in sorted(toprint):
1077 1077 if abs in unknownset:
1078 1078 status = _('adding %s\n') % abs
1079 1079 else:
1080 1080 status = _('removing %s\n') % abs
1081 1081 repo.ui.status(status)
1082 1082
1083 1083 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1084 1084 similarity)
1085 1085
1086 1086 _markchanges(repo, unknown + forgotten, deleted, renames)
1087 1087
1088 1088 for f in rejected:
1089 1089 if f in m.files():
1090 1090 return 1
1091 1091 return 0
1092 1092
1093 1093 def _interestingfiles(repo, matcher):
1094 1094 '''Walk dirstate with matcher, looking for files that addremove would care
1095 1095 about.
1096 1096
1097 1097 This is different from dirstate.status because it doesn't care about
1098 1098 whether files are modified or clean.'''
1099 1099 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1100 1100 audit_path = pathutil.pathauditor(repo.root, cached=True)
1101 1101
1102 1102 ctx = repo[None]
1103 1103 dirstate = repo.dirstate
1104 1104 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1105 1105 unknown=True, ignored=False, full=False)
1106 1106 for abs, st in walkresults.iteritems():
1107 1107 dstate = dirstate[abs]
1108 1108 if dstate == '?' and audit_path.check(abs):
1109 1109 unknown.append(abs)
1110 1110 elif dstate != 'r' and not st:
1111 1111 deleted.append(abs)
1112 1112 elif dstate == 'r' and st:
1113 1113 forgotten.append(abs)
1114 1114 # for finding renames
1115 1115 elif dstate == 'r' and not st:
1116 1116 removed.append(abs)
1117 1117 elif dstate == 'a':
1118 1118 added.append(abs)
1119 1119
1120 1120 return added, unknown, deleted, removed, forgotten
1121 1121
1122 1122 def _findrenames(repo, matcher, added, removed, similarity):
1123 1123 '''Find renames from removed files to added ones.'''
1124 1124 renames = {}
1125 1125 if similarity > 0:
1126 1126 for old, new, score in similar.findrenames(repo, added, removed,
1127 1127 similarity):
1128 1128 if (repo.ui.verbose or not matcher.exact(old)
1129 1129 or not matcher.exact(new)):
1130 1130 repo.ui.status(_('recording removal of %s as rename to %s '
1131 1131 '(%d%% similar)\n') %
1132 1132 (matcher.rel(old), matcher.rel(new),
1133 1133 score * 100))
1134 1134 renames[new] = old
1135 1135 return renames
1136 1136
1137 1137 def _markchanges(repo, unknown, deleted, renames):
1138 1138 '''Marks the files in unknown as added, the files in deleted as removed,
1139 1139 and the files in renames as copied.'''
1140 1140 wctx = repo[None]
1141 1141 with repo.wlock():
1142 1142 wctx.forget(deleted)
1143 1143 wctx.add(unknown)
1144 1144 for new, old in renames.iteritems():
1145 1145 wctx.copy(old, new)
1146 1146
1147 1147 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1148 1148 """Update the dirstate to reflect the intent of copying src to dst. For
1149 1149 different reasons it might not end with dst being marked as copied from src.
1150 1150 """
1151 1151 origsrc = repo.dirstate.copied(src) or src
1152 1152 if dst == origsrc: # copying back a copy?
1153 1153 if repo.dirstate[dst] not in 'mn' and not dryrun:
1154 1154 repo.dirstate.normallookup(dst)
1155 1155 else:
1156 1156 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1157 1157 if not ui.quiet:
1158 1158 ui.warn(_("%s has not been committed yet, so no copy "
1159 1159 "data will be stored for %s.\n")
1160 1160 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1161 1161 if repo.dirstate[dst] in '?r' and not dryrun:
1162 1162 wctx.add([dst])
1163 1163 elif not dryrun:
1164 1164 wctx.copy(origsrc, dst)
1165 1165
1166 1166 def writerequires(opener, requirements):
1167 1167 with opener('requires', 'w') as fp:
1168 1168 for r in sorted(requirements):
1169 1169 fp.write("%s\n" % r)
1170 1170
1171 1171 class filecachesubentry(object):
1172 1172 def __init__(self, path, stat):
1173 1173 self.path = path
1174 1174 self.cachestat = None
1175 1175 self._cacheable = None
1176 1176
1177 1177 if stat:
1178 1178 self.cachestat = filecachesubentry.stat(self.path)
1179 1179
1180 1180 if self.cachestat:
1181 1181 self._cacheable = self.cachestat.cacheable()
1182 1182 else:
1183 1183 # None means we don't know yet
1184 1184 self._cacheable = None
1185 1185
1186 1186 def refresh(self):
1187 1187 if self.cacheable():
1188 1188 self.cachestat = filecachesubentry.stat(self.path)
1189 1189
1190 1190 def cacheable(self):
1191 1191 if self._cacheable is not None:
1192 1192 return self._cacheable
1193 1193
1194 1194 # we don't know yet, assume it is for now
1195 1195 return True
1196 1196
1197 1197 def changed(self):
1198 1198 # no point in going further if we can't cache it
1199 1199 if not self.cacheable():
1200 1200 return True
1201 1201
1202 1202 newstat = filecachesubentry.stat(self.path)
1203 1203
1204 1204 # we may not know if it's cacheable yet, check again now
1205 1205 if newstat and self._cacheable is None:
1206 1206 self._cacheable = newstat.cacheable()
1207 1207
1208 1208 # check again
1209 1209 if not self._cacheable:
1210 1210 return True
1211 1211
1212 1212 if self.cachestat != newstat:
1213 1213 self.cachestat = newstat
1214 1214 return True
1215 1215 else:
1216 1216 return False
1217 1217
1218 1218 @staticmethod
1219 1219 def stat(path):
1220 1220 try:
1221 1221 return util.cachestat(path)
1222 1222 except OSError as e:
1223 1223 if e.errno != errno.ENOENT:
1224 1224 raise
1225 1225
1226 1226 class filecacheentry(object):
1227 1227 def __init__(self, paths, stat=True):
1228 1228 self._entries = []
1229 1229 for path in paths:
1230 1230 self._entries.append(filecachesubentry(path, stat))
1231 1231
1232 1232 def changed(self):
1233 1233 '''true if any entry has changed'''
1234 1234 for entry in self._entries:
1235 1235 if entry.changed():
1236 1236 return True
1237 1237 return False
1238 1238
1239 1239 def refresh(self):
1240 1240 for entry in self._entries:
1241 1241 entry.refresh()
1242 1242
1243 1243 class filecache(object):
1244 1244 """A property like decorator that tracks files under .hg/ for updates.
1245 1245
1246 1246 On first access, the files defined as arguments are stat()ed and the
1247 1247 results cached. The decorated function is called. The results are stashed
1248 1248 away in a ``_filecache`` dict on the object whose method is decorated.
1249 1249
1250 1250 On subsequent access, the cached result is returned.
1251 1251
1252 1252 On external property set operations, stat() calls are performed and the new
1253 1253 value is cached.
1254 1254
1255 1255 On property delete operations, cached data is removed.
1256 1256
1257 1257 When using the property API, cached data is always returned, if available:
1258 1258 no stat() is performed to check if the file has changed and if the function
1259 1259 needs to be called to reflect file changes.
1260 1260
1261 1261 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1262 1262 can populate an entry before the property's getter is called. In this case,
1263 1263 entries in ``_filecache`` will be used during property operations,
1264 1264 if available. If the underlying file changes, it is up to external callers
1265 1265 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1266 1266 method result as well as possibly calling ``del obj._filecache[attr]`` to
1267 1267 remove the ``filecacheentry``.
1268 1268 """
1269 1269
1270 1270 def __init__(self, *paths):
1271 1271 self.paths = paths
1272 1272
1273 1273 def join(self, obj, fname):
1274 1274 """Used to compute the runtime path of a cached file.
1275 1275
1276 1276 Users should subclass filecache and provide their own version of this
1277 1277 function to call the appropriate join function on 'obj' (an instance
1278 1278 of the class that its member function was decorated).
1279 1279 """
1280 1280 raise NotImplementedError
1281 1281
1282 1282 def __call__(self, func):
1283 1283 self.func = func
1284 1284 self.sname = func.__name__
1285 1285 self.name = pycompat.sysbytes(self.sname)
1286 1286 return self
1287 1287
1288 1288 def __get__(self, obj, type=None):
1289 1289 # if accessed on the class, return the descriptor itself.
1290 1290 if obj is None:
1291 1291 return self
1292 1292 # do we need to check if the file changed?
1293 1293 if self.sname in obj.__dict__:
1294 1294 assert self.name in obj._filecache, self.name
1295 1295 return obj.__dict__[self.sname]
1296 1296
1297 1297 entry = obj._filecache.get(self.name)
1298 1298
1299 1299 if entry:
1300 1300 if entry.changed():
1301 1301 entry.obj = self.func(obj)
1302 1302 else:
1303 1303 paths = [self.join(obj, path) for path in self.paths]
1304 1304
1305 1305 # We stat -before- creating the object so our cache doesn't lie if
1306 1306 # a writer modified between the time we read and stat
1307 1307 entry = filecacheentry(paths, True)
1308 1308 entry.obj = self.func(obj)
1309 1309
1310 1310 obj._filecache[self.name] = entry
1311 1311
1312 1312 obj.__dict__[self.sname] = entry.obj
1313 1313 return entry.obj
1314 1314
1315 1315 def __set__(self, obj, value):
1316 1316 if self.name not in obj._filecache:
1317 1317 # we add an entry for the missing value because X in __dict__
1318 1318 # implies X in _filecache
1319 1319 paths = [self.join(obj, path) for path in self.paths]
1320 1320 ce = filecacheentry(paths, False)
1321 1321 obj._filecache[self.name] = ce
1322 1322 else:
1323 1323 ce = obj._filecache[self.name]
1324 1324
1325 1325 ce.obj = value # update cached copy
1326 1326 obj.__dict__[self.sname] = value # update copy returned by obj.x
1327 1327
1328 1328 def __delete__(self, obj):
1329 1329 try:
1330 1330 del obj.__dict__[self.sname]
1331 1331 except KeyError:
1332 1332 raise AttributeError(self.sname)
1333 1333
1334 1334 def extdatasource(repo, source):
1335 1335 """Gather a map of rev -> value dict from the specified source
1336 1336
1337 1337 A source spec is treated as a URL, with a special case shell: type
1338 1338 for parsing the output from a shell command.
1339 1339
1340 1340 The data is parsed as a series of newline-separated records where
1341 1341 each record is a revision specifier optionally followed by a space
1342 1342 and a freeform string value. If the revision is known locally, it
1343 1343 is converted to a rev, otherwise the record is skipped.
1344 1344
1345 1345 Note that both key and value are treated as UTF-8 and converted to
1346 1346 the local encoding. This allows uniformity between local and
1347 1347 remote data sources.
1348 1348 """
1349 1349
1350 1350 spec = repo.ui.config("extdata", source)
1351 1351 if not spec:
1352 1352 raise error.Abort(_("unknown extdata source '%s'") % source)
1353 1353
1354 1354 data = {}
1355 1355 src = proc = None
1356 1356 try:
1357 1357 if spec.startswith("shell:"):
1358 1358 # external commands should be run relative to the repo root
1359 1359 cmd = spec[6:]
1360 1360 proc = subprocess.Popen(procutil.tonativestr(cmd),
1361 1361 shell=True, bufsize=-1,
1362 1362 close_fds=procutil.closefds,
1363 1363 stdout=subprocess.PIPE,
1364 1364 cwd=procutil.tonativestr(repo.root))
1365 1365 src = proc.stdout
1366 1366 else:
1367 1367 # treat as a URL or file
1368 1368 src = url.open(repo.ui, spec)
1369 1369 for l in src:
1370 1370 if " " in l:
1371 1371 k, v = l.strip().split(" ", 1)
1372 1372 else:
1373 1373 k, v = l.strip(), ""
1374 1374
1375 1375 k = encoding.tolocal(k)
1376 1376 try:
1377 1377 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1378 1378 except (error.LookupError, error.RepoLookupError):
1379 1379 pass # we ignore data for nodes that don't exist locally
1380 1380 finally:
1381 1381 if proc:
1382 1382 proc.communicate()
1383 1383 if src:
1384 1384 src.close()
1385 1385 if proc and proc.returncode != 0:
1386 1386 raise error.Abort(_("extdata command '%s' failed: %s")
1387 1387 % (cmd, procutil.explainexit(proc.returncode)))
1388 1388
1389 1389 return data
1390 1390
1391 1391 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1392 1392 if lock is None:
1393 1393 raise error.LockInheritanceContractViolation(
1394 1394 'lock can only be inherited while held')
1395 1395 if environ is None:
1396 1396 environ = {}
1397 1397 with lock.inherit() as locker:
1398 1398 environ[envvar] = locker
1399 1399 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1400 1400
1401 1401 def wlocksub(repo, cmd, *args, **kwargs):
1402 1402 """run cmd as a subprocess that allows inheriting repo's wlock
1403 1403
1404 1404 This can only be called while the wlock is held. This takes all the
1405 1405 arguments that ui.system does, and returns the exit code of the
1406 1406 subprocess."""
1407 1407 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1408 1408 **kwargs)
1409 1409
1410 1410 class progress(object):
1411 1411 def __init__(self, ui, topic, unit="", total=None):
1412 1412 self.ui = ui
1413 1413 self.pos = 0
1414 1414 self.topic = topic
1415 1415 self.unit = unit
1416 1416 self.total = total
1417 1417
1418 1418 def __enter__(self):
1419 1419 return self
1420 1420
1421 1421 def __exit__(self, exc_type, exc_value, exc_tb):
1422 1422 self.complete()
1423 1423
1424 1424 def update(self, pos, item="", total=None):
1425 1425 assert pos is not None
1426 1426 if total:
1427 1427 self.total = total
1428 1428 self.pos = pos
1429 1429 self._print(item)
1430 1430
1431 1431 def increment(self, step=1, item="", total=None):
1432 1432 self.update(self.pos + step, item, total)
1433 1433
1434 1434 def complete(self):
1435 1435 self.ui.progress(self.topic, None)
1436 1436
1437 1437 def _print(self, item):
1438 1438 self.ui.progress(self.topic, self.pos, item, self.unit,
1439 1439 self.total)
1440 1440
1441 1441 def gdinitconfig(ui):
1442 1442 """helper function to know if a repo should be created as general delta
1443 1443 """
1444 1444 # experimental config: format.generaldelta
1445 1445 return (ui.configbool('format', 'generaldelta')
1446 1446 or ui.configbool('format', 'usegeneraldelta')
1447 1447 or ui.configbool('format', 'sparse-revlog'))
1448 1448
1449 1449 def gddeltaconfig(ui):
1450 1450 """helper function to know if incoming delta should be optimised
1451 1451 """
1452 1452 # experimental config: format.generaldelta
1453 1453 return ui.configbool('format', 'generaldelta')
1454 1454
1455 1455 class simplekeyvaluefile(object):
1456 1456 """A simple file with key=value lines
1457 1457
1458 1458 Keys must be alphanumerics and start with a letter, values must not
1459 1459 contain '\n' characters"""
1460 1460 firstlinekey = '__firstline'
1461 1461
1462 1462 def __init__(self, vfs, path, keys=None):
1463 1463 self.vfs = vfs
1464 1464 self.path = path
1465 1465
1466 1466 def read(self, firstlinenonkeyval=False):
1467 1467 """Read the contents of a simple key-value file
1468 1468
1469 1469 'firstlinenonkeyval' indicates whether the first line of file should
1470 1470 be treated as a key-value pair or reuturned fully under the
1471 1471 __firstline key."""
1472 1472 lines = self.vfs.readlines(self.path)
1473 1473 d = {}
1474 1474 if firstlinenonkeyval:
1475 1475 if not lines:
1476 1476 e = _("empty simplekeyvalue file")
1477 1477 raise error.CorruptedState(e)
1478 1478 # we don't want to include '\n' in the __firstline
1479 1479 d[self.firstlinekey] = lines[0][:-1]
1480 1480 del lines[0]
1481 1481
1482 1482 try:
1483 1483 # the 'if line.strip()' part prevents us from failing on empty
1484 1484 # lines which only contain '\n' therefore are not skipped
1485 1485 # by 'if line'
1486 1486 updatedict = dict(line[:-1].split('=', 1) for line in lines
1487 1487 if line.strip())
1488 1488 if self.firstlinekey in updatedict:
1489 1489 e = _("%r can't be used as a key")
1490 1490 raise error.CorruptedState(e % self.firstlinekey)
1491 1491 d.update(updatedict)
1492 1492 except ValueError as e:
1493 1493 raise error.CorruptedState(str(e))
1494 1494 return d
1495 1495
1496 1496 def write(self, data, firstline=None):
1497 1497 """Write key=>value mapping to a file
1498 1498 data is a dict. Keys must be alphanumerical and start with a letter.
1499 1499 Values must not contain newline characters.
1500 1500
1501 1501 If 'firstline' is not None, it is written to file before
1502 1502 everything else, as it is, not in a key=value form"""
1503 1503 lines = []
1504 1504 if firstline is not None:
1505 1505 lines.append('%s\n' % firstline)
1506 1506
1507 1507 for k, v in data.items():
1508 1508 if k == self.firstlinekey:
1509 1509 e = "key name '%s' is reserved" % self.firstlinekey
1510 1510 raise error.ProgrammingError(e)
1511 1511 if not k[0:1].isalpha():
1512 1512 e = "keys must start with a letter in a key-value file"
1513 1513 raise error.ProgrammingError(e)
1514 1514 if not k.isalnum():
1515 1515 e = "invalid key name in a simple key-value file"
1516 1516 raise error.ProgrammingError(e)
1517 1517 if '\n' in v:
1518 1518 e = "invalid value in a simple key-value file"
1519 1519 raise error.ProgrammingError(e)
1520 1520 lines.append("%s=%s\n" % (k, v))
1521 1521 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1522 1522 fp.write(''.join(lines))
1523 1523
1524 1524 _reportobsoletedsource = [
1525 1525 'debugobsolete',
1526 1526 'pull',
1527 1527 'push',
1528 1528 'serve',
1529 1529 'unbundle',
1530 1530 ]
1531 1531
1532 1532 _reportnewcssource = [
1533 1533 'pull',
1534 1534 'unbundle',
1535 1535 ]
1536 1536
1537 1537 def prefetchfiles(repo, revs, match):
1538 1538 """Invokes the registered file prefetch functions, allowing extensions to
1539 1539 ensure the corresponding files are available locally, before the command
1540 1540 uses them."""
1541 1541 if match:
1542 1542 # The command itself will complain about files that don't exist, so
1543 1543 # don't duplicate the message.
1544 1544 match = matchmod.badmatch(match, lambda fn, msg: None)
1545 1545 else:
1546 1546 match = matchall(repo)
1547 1547
1548 1548 fileprefetchhooks(repo, revs, match)
1549 1549
1550 1550 # a list of (repo, revs, match) prefetch functions
1551 1551 fileprefetchhooks = util.hooks()
1552 1552
1553 1553 # A marker that tells the evolve extension to suppress its own reporting
1554 1554 _reportstroubledchangesets = True
1555 1555
1556 1556 def registersummarycallback(repo, otr, txnname=''):
1557 1557 """register a callback to issue a summary after the transaction is closed
1558 1558 """
1559 1559 def txmatch(sources):
1560 1560 return any(txnname.startswith(source) for source in sources)
1561 1561
1562 1562 categories = []
1563 1563
1564 1564 def reportsummary(func):
1565 1565 """decorator for report callbacks."""
1566 1566 # The repoview life cycle is shorter than the one of the actual
1567 1567 # underlying repository. So the filtered object can die before the
1568 1568 # weakref is used leading to troubles. We keep a reference to the
1569 1569 # unfiltered object and restore the filtering when retrieving the
1570 1570 # repository through the weakref.
1571 1571 filtername = repo.filtername
1572 1572 reporef = weakref.ref(repo.unfiltered())
1573 1573 def wrapped(tr):
1574 1574 repo = reporef()
1575 1575 if filtername:
1576 1576 repo = repo.filtered(filtername)
1577 1577 func(repo, tr)
1578 1578 newcat = '%02i-txnreport' % len(categories)
1579 1579 otr.addpostclose(newcat, wrapped)
1580 1580 categories.append(newcat)
1581 1581 return wrapped
1582 1582
1583 1583 if txmatch(_reportobsoletedsource):
1584 1584 @reportsummary
1585 1585 def reportobsoleted(repo, tr):
1586 1586 obsoleted = obsutil.getobsoleted(repo, tr)
1587 1587 if obsoleted:
1588 1588 repo.ui.status(_('obsoleted %i changesets\n')
1589 1589 % len(obsoleted))
1590 1590
1591 1591 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1592 1592 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1593 1593 instabilitytypes = [
1594 1594 ('orphan', 'orphan'),
1595 1595 ('phase-divergent', 'phasedivergent'),
1596 1596 ('content-divergent', 'contentdivergent'),
1597 1597 ]
1598 1598
1599 1599 def getinstabilitycounts(repo):
1600 1600 filtered = repo.changelog.filteredrevs
1601 1601 counts = {}
1602 1602 for instability, revset in instabilitytypes:
1603 1603 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1604 1604 filtered)
1605 1605 return counts
1606 1606
1607 1607 oldinstabilitycounts = getinstabilitycounts(repo)
1608 1608 @reportsummary
1609 1609 def reportnewinstabilities(repo, tr):
1610 1610 newinstabilitycounts = getinstabilitycounts(repo)
1611 1611 for instability, revset in instabilitytypes:
1612 1612 delta = (newinstabilitycounts[instability] -
1613 1613 oldinstabilitycounts[instability])
1614 1614 msg = getinstabilitymessage(delta, instability)
1615 1615 if msg:
1616 1616 repo.ui.warn(msg)
1617 1617
1618 1618 if txmatch(_reportnewcssource):
1619 1619 @reportsummary
1620 1620 def reportnewcs(repo, tr):
1621 1621 """Report the range of new revisions pulled/unbundled."""
1622 1622 origrepolen = tr.changes.get('origrepolen', len(repo))
1623 if origrepolen >= len(repo):
1623 unfi = repo.unfiltered()
1624 if origrepolen >= len(unfi):
1624 1625 return
1625 1626
1626 1627 # Compute the bounds of new visible revisions' range.
1627 1628 revs = smartset.spanset(repo, start=origrepolen)
1628 if not revs:
1629 return
1630 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1629 if revs:
1630 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1631 1631
1632 if minrev == maxrev:
1633 revrange = minrev
1634 else:
1635 revrange = '%s:%s' % (minrev, maxrev)
1636 draft = len(repo.revs('%ld and draft()', revs))
1637 secret = len(repo.revs('%ld and secret()', revs))
1638 if not (draft or secret):
1639 msg = _('new changesets %s\n') % revrange
1640 elif draft and secret:
1641 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1642 msg %= (revrange, draft, secret)
1643 elif draft:
1644 msg = _('new changesets %s (%d drafts)\n')
1645 msg %= (revrange, draft)
1646 elif secret:
1647 msg = _('new changesets %s (%d secrets)\n')
1648 msg %= (revrange, secret)
1649 else:
1650 raise error.ProgrammingError('entered unreachable condition')
1651 repo.ui.status(msg)
1632 if minrev == maxrev:
1633 revrange = minrev
1634 else:
1635 revrange = '%s:%s' % (minrev, maxrev)
1636 draft = len(repo.revs('%ld and draft()', revs))
1637 secret = len(repo.revs('%ld and secret()', revs))
1638 if not (draft or secret):
1639 msg = _('new changesets %s\n') % revrange
1640 elif draft and secret:
1641 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1642 msg %= (revrange, draft, secret)
1643 elif draft:
1644 msg = _('new changesets %s (%d drafts)\n')
1645 msg %= (revrange, draft)
1646 elif secret:
1647 msg = _('new changesets %s (%d secrets)\n')
1648 msg %= (revrange, secret)
1649 else:
1650 errormsg = 'entered unreachable condition'
1651 raise error.ProgrammingError(errormsg)
1652 repo.ui.status(msg)
1652 1653
1653 1654 @reportsummary
1654 1655 def reportphasechanges(repo, tr):
1655 1656 """Report statistics of phase changes for changesets pre-existing
1656 1657 pull/unbundle.
1657 1658 """
1658 1659 origrepolen = tr.changes.get('origrepolen', len(repo))
1659 1660 phasetracking = tr.changes.get('phases', {})
1660 1661 if not phasetracking:
1661 1662 return
1662 1663 published = [
1663 1664 rev for rev, (old, new) in phasetracking.iteritems()
1664 1665 if new == phases.public and rev < origrepolen
1665 1666 ]
1666 1667 if not published:
1667 1668 return
1668 1669 repo.ui.status(_('%d local changesets published\n')
1669 1670 % len(published))
1670 1671
1671 1672 def getinstabilitymessage(delta, instability):
1672 1673 """function to return the message to show warning about new instabilities
1673 1674
1674 1675 exists as a separate function so that extension can wrap to show more
1675 1676 information like how to fix instabilities"""
1676 1677 if delta > 0:
1677 1678 return _('%i new %s changesets\n') % (delta, instability)
1678 1679
1679 1680 def nodesummaries(repo, nodes, maxnumnodes=4):
1680 1681 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1681 1682 return ' '.join(short(h) for h in nodes)
1682 1683 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1683 1684 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1684 1685
1685 1686 def enforcesinglehead(repo, tr, desc):
1686 1687 """check that no named branch has multiple heads"""
1687 1688 if desc in ('strip', 'repair'):
1688 1689 # skip the logic during strip
1689 1690 return
1690 1691 visible = repo.filtered('visible')
1691 1692 # possible improvement: we could restrict the check to affected branch
1692 1693 for name, heads in visible.branchmap().iteritems():
1693 1694 if len(heads) > 1:
1694 1695 msg = _('rejecting multiple heads on branch "%s"')
1695 1696 msg %= name
1696 1697 hint = _('%d heads: %s')
1697 1698 hint %= (len(heads), nodesummaries(repo, heads))
1698 1699 raise error.Abort(msg, hint=hint)
1699 1700
1700 1701 def wrapconvertsink(sink):
1701 1702 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1702 1703 before it is used, whether or not the convert extension was formally loaded.
1703 1704 """
1704 1705 return sink
1705 1706
1706 1707 def unhidehashlikerevs(repo, specs, hiddentype):
1707 1708 """parse the user specs and unhide changesets whose hash or revision number
1708 1709 is passed.
1709 1710
1710 1711 hiddentype can be: 1) 'warn': warn while unhiding changesets
1711 1712 2) 'nowarn': don't warn while unhiding changesets
1712 1713
1713 1714 returns a repo object with the required changesets unhidden
1714 1715 """
1715 1716 if not repo.filtername or not repo.ui.configbool('experimental',
1716 1717 'directaccess'):
1717 1718 return repo
1718 1719
1719 1720 if repo.filtername not in ('visible', 'visible-hidden'):
1720 1721 return repo
1721 1722
1722 1723 symbols = set()
1723 1724 for spec in specs:
1724 1725 try:
1725 1726 tree = revsetlang.parse(spec)
1726 1727 except error.ParseError: # will be reported by scmutil.revrange()
1727 1728 continue
1728 1729
1729 1730 symbols.update(revsetlang.gethashlikesymbols(tree))
1730 1731
1731 1732 if not symbols:
1732 1733 return repo
1733 1734
1734 1735 revs = _getrevsfromsymbols(repo, symbols)
1735 1736
1736 1737 if not revs:
1737 1738 return repo
1738 1739
1739 1740 if hiddentype == 'warn':
1740 1741 unfi = repo.unfiltered()
1741 1742 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1742 1743 repo.ui.warn(_("warning: accessing hidden changesets for write "
1743 1744 "operation: %s\n") % revstr)
1744 1745
1745 1746 # we have to use new filtername to separate branch/tags cache until we can
1746 1747 # disbale these cache when revisions are dynamically pinned.
1747 1748 return repo.filtered('visible-hidden', revs)
1748 1749
1749 1750 def _getrevsfromsymbols(repo, symbols):
1750 1751 """parse the list of symbols and returns a set of revision numbers of hidden
1751 1752 changesets present in symbols"""
1752 1753 revs = set()
1753 1754 unfi = repo.unfiltered()
1754 1755 unficl = unfi.changelog
1755 1756 cl = repo.changelog
1756 1757 tiprev = len(unficl)
1757 1758 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1758 1759 for s in symbols:
1759 1760 try:
1760 1761 n = int(s)
1761 1762 if n <= tiprev:
1762 1763 if not allowrevnums:
1763 1764 continue
1764 1765 else:
1765 1766 if n not in cl:
1766 1767 revs.add(n)
1767 1768 continue
1768 1769 except ValueError:
1769 1770 pass
1770 1771
1771 1772 try:
1772 1773 s = resolvehexnodeidprefix(unfi, s)
1773 1774 except (error.LookupError, error.WdirUnsupported):
1774 1775 s = None
1775 1776
1776 1777 if s is not None:
1777 1778 rev = unficl.rev(s)
1778 1779 if rev not in cl:
1779 1780 revs.add(rev)
1780 1781
1781 1782 return revs
1782 1783
1783 1784 def bookmarkrevs(repo, mark):
1784 1785 """
1785 1786 Select revisions reachable by a given bookmark
1786 1787 """
1787 1788 return repo.revs("ancestors(bookmark(%s)) - "
1788 1789 "ancestors(head() and not bookmark(%s)) - "
1789 1790 "ancestors(bookmark() and not bookmark(%s))",
1790 1791 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now