##// END OF EJS Templates
revert: extract origvfs logic in a sub-function...
Boris Feld -
r40783:65591a51 default
parent child Browse files
Show More
@@ -1,1802 +1,1810 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29
30 30 from . import (
31 31 encoding,
32 32 error,
33 33 match as matchmod,
34 34 obsolete,
35 35 obsutil,
36 36 pathutil,
37 37 phases,
38 38 policy,
39 39 pycompat,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 procutil,
50 50 stringutil,
51 51 )
52 52
53 53 if pycompat.iswindows:
54 54 from . import scmwindows as scmplatform
55 55 else:
56 56 from . import scmposix as scmplatform
57 57
58 58 parsers = policy.importmod(r'parsers')
59 59
60 60 termsize = scmplatform.termsize
61 61
62 62 class status(tuple):
63 63 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 64 and 'ignored' properties are only relevant to the working copy.
65 65 '''
66 66
67 67 __slots__ = ()
68 68
69 69 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 70 clean):
71 71 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 72 ignored, clean))
73 73
74 74 @property
75 75 def modified(self):
76 76 '''files that have been modified'''
77 77 return self[0]
78 78
79 79 @property
80 80 def added(self):
81 81 '''files that have been added'''
82 82 return self[1]
83 83
84 84 @property
85 85 def removed(self):
86 86 '''files that have been removed'''
87 87 return self[2]
88 88
89 89 @property
90 90 def deleted(self):
91 91 '''files that are in the dirstate, but have been deleted from the
92 92 working copy (aka "missing")
93 93 '''
94 94 return self[3]
95 95
96 96 @property
97 97 def unknown(self):
98 98 '''files not in the dirstate that are not ignored'''
99 99 return self[4]
100 100
101 101 @property
102 102 def ignored(self):
103 103 '''files not in the dirstate that are ignored (by _dirignore())'''
104 104 return self[5]
105 105
106 106 @property
107 107 def clean(self):
108 108 '''files that have not been modified'''
109 109 return self[6]
110 110
111 111 def __repr__(self, *args, **kwargs):
112 112 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 113 r'unknown=%s, ignored=%s, clean=%s>') %
114 114 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115 115
116 116 def itersubrepos(ctx1, ctx2):
117 117 """find subrepos in ctx1 or ctx2"""
118 118 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 119 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 120 # has been modified (in ctx2) but not yet committed (in ctx1).
121 121 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 122 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123 123
124 124 missing = set()
125 125
126 126 for subpath in ctx2.substate:
127 127 if subpath not in ctx1.substate:
128 128 del subpaths[subpath]
129 129 missing.add(subpath)
130 130
131 131 for subpath, ctx in sorted(subpaths.iteritems()):
132 132 yield subpath, ctx.sub(subpath)
133 133
134 134 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 135 # status and diff will have an accurate result when it does
136 136 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 137 # against itself.
138 138 for subpath in missing:
139 139 yield subpath, ctx2.nullsub(subpath, ctx1)
140 140
141 141 def nochangesfound(ui, repo, excluded=None):
142 142 '''Report no changes for push/pull, excluded is None or a list of
143 143 nodes excluded from the push/pull.
144 144 '''
145 145 secretlist = []
146 146 if excluded:
147 147 for n in excluded:
148 148 ctx = repo[n]
149 149 if ctx.phase() >= phases.secret and not ctx.extinct():
150 150 secretlist.append(n)
151 151
152 152 if secretlist:
153 153 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 154 % len(secretlist))
155 155 else:
156 156 ui.status(_("no changes found\n"))
157 157
158 158 def callcatch(ui, func):
159 159 """call func() with global exception handling
160 160
161 161 return func() if no exception happens. otherwise do some error handling
162 162 and return an exit code accordingly. does not handle all exceptions.
163 163 """
164 164 try:
165 165 try:
166 166 return func()
167 167 except: # re-raises
168 168 ui.traceback()
169 169 raise
170 170 # Global exception handling, alphabetically
171 171 # Mercurial-specific first, followed by built-in and library exceptions
172 172 except error.LockHeld as inst:
173 173 if inst.errno == errno.ETIMEDOUT:
174 174 reason = _('timed out waiting for lock held by %r') % (
175 175 pycompat.bytestr(inst.locker))
176 176 else:
177 177 reason = _('lock held by %r') % inst.locker
178 178 ui.error(_("abort: %s: %s\n") % (
179 179 inst.desc or stringutil.forcebytestr(inst.filename), reason))
180 180 if not inst.locker:
181 181 ui.error(_("(lock might be very busy)\n"))
182 182 except error.LockUnavailable as inst:
183 183 ui.error(_("abort: could not lock %s: %s\n") %
184 184 (inst.desc or stringutil.forcebytestr(inst.filename),
185 185 encoding.strtolocal(inst.strerror)))
186 186 except error.OutOfBandError as inst:
187 187 if inst.args:
188 188 msg = _("abort: remote error:\n")
189 189 else:
190 190 msg = _("abort: remote error\n")
191 191 ui.error(msg)
192 192 if inst.args:
193 193 ui.error(''.join(inst.args))
194 194 if inst.hint:
195 195 ui.error('(%s)\n' % inst.hint)
196 196 except error.RepoError as inst:
197 197 ui.error(_("abort: %s!\n") % inst)
198 198 if inst.hint:
199 199 ui.error(_("(%s)\n") % inst.hint)
200 200 except error.ResponseError as inst:
201 201 ui.error(_("abort: %s") % inst.args[0])
202 202 msg = inst.args[1]
203 203 if isinstance(msg, type(u'')):
204 204 msg = pycompat.sysbytes(msg)
205 205 if not isinstance(msg, bytes):
206 206 ui.error(" %r\n" % (msg,))
207 207 elif not msg:
208 208 ui.error(_(" empty string\n"))
209 209 else:
210 210 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
211 211 except error.CensoredNodeError as inst:
212 212 ui.error(_("abort: file censored %s!\n") % inst)
213 213 except error.StorageError as inst:
214 214 ui.error(_("abort: %s!\n") % inst)
215 215 if inst.hint:
216 216 ui.error(_("(%s)\n") % inst.hint)
217 217 except error.InterventionRequired as inst:
218 218 ui.error("%s\n" % inst)
219 219 if inst.hint:
220 220 ui.error(_("(%s)\n") % inst.hint)
221 221 return 1
222 222 except error.WdirUnsupported:
223 223 ui.error(_("abort: working directory revision cannot be specified\n"))
224 224 except error.Abort as inst:
225 225 ui.error(_("abort: %s\n") % inst)
226 226 if inst.hint:
227 227 ui.error(_("(%s)\n") % inst.hint)
228 228 except ImportError as inst:
229 229 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
230 230 m = stringutil.forcebytestr(inst).split()[-1]
231 231 if m in "mpatch bdiff".split():
232 232 ui.error(_("(did you forget to compile extensions?)\n"))
233 233 elif m in "zlib".split():
234 234 ui.error(_("(is your Python install correct?)\n"))
235 235 except IOError as inst:
236 236 if util.safehasattr(inst, "code"):
237 237 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
238 238 elif util.safehasattr(inst, "reason"):
239 239 try: # usually it is in the form (errno, strerror)
240 240 reason = inst.reason.args[1]
241 241 except (AttributeError, IndexError):
242 242 # it might be anything, for example a string
243 243 reason = inst.reason
244 244 if isinstance(reason, pycompat.unicode):
245 245 # SSLError of Python 2.7.9 contains a unicode
246 246 reason = encoding.unitolocal(reason)
247 247 ui.error(_("abort: error: %s\n") % reason)
248 248 elif (util.safehasattr(inst, "args")
249 249 and inst.args and inst.args[0] == errno.EPIPE):
250 250 pass
251 251 elif getattr(inst, "strerror", None):
252 252 if getattr(inst, "filename", None):
253 253 ui.error(_("abort: %s: %s\n") % (
254 254 encoding.strtolocal(inst.strerror),
255 255 stringutil.forcebytestr(inst.filename)))
256 256 else:
257 257 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 258 else:
259 259 raise
260 260 except OSError as inst:
261 261 if getattr(inst, "filename", None) is not None:
262 262 ui.error(_("abort: %s: '%s'\n") % (
263 263 encoding.strtolocal(inst.strerror),
264 264 stringutil.forcebytestr(inst.filename)))
265 265 else:
266 266 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
267 267 except MemoryError:
268 268 ui.error(_("abort: out of memory\n"))
269 269 except SystemExit as inst:
270 270 # Commands shouldn't sys.exit directly, but give a return code.
271 271 # Just in case catch this and and pass exit code to caller.
272 272 return inst.code
273 273 except socket.error as inst:
274 274 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
275 275
276 276 return -1
277 277
278 278 def checknewlabel(repo, lbl, kind):
279 279 # Do not use the "kind" parameter in ui output.
280 280 # It makes strings difficult to translate.
281 281 if lbl in ['tip', '.', 'null']:
282 282 raise error.Abort(_("the name '%s' is reserved") % lbl)
283 283 for c in (':', '\0', '\n', '\r'):
284 284 if c in lbl:
285 285 raise error.Abort(
286 286 _("%r cannot be used in a name") % pycompat.bytestr(c))
287 287 try:
288 288 int(lbl)
289 289 raise error.Abort(_("cannot use an integer as a name"))
290 290 except ValueError:
291 291 pass
292 292 if lbl.strip() != lbl:
293 293 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
294 294
295 295 def checkfilename(f):
296 296 '''Check that the filename f is an acceptable filename for a tracked file'''
297 297 if '\r' in f or '\n' in f:
298 298 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
299 299 % pycompat.bytestr(f))
300 300
301 301 def checkportable(ui, f):
302 302 '''Check if filename f is portable and warn or abort depending on config'''
303 303 checkfilename(f)
304 304 abort, warn = checkportabilityalert(ui)
305 305 if abort or warn:
306 306 msg = util.checkwinfilename(f)
307 307 if msg:
308 308 msg = "%s: %s" % (msg, procutil.shellquote(f))
309 309 if abort:
310 310 raise error.Abort(msg)
311 311 ui.warn(_("warning: %s\n") % msg)
312 312
313 313 def checkportabilityalert(ui):
314 314 '''check if the user's config requests nothing, a warning, or abort for
315 315 non-portable filenames'''
316 316 val = ui.config('ui', 'portablefilenames')
317 317 lval = val.lower()
318 318 bval = stringutil.parsebool(val)
319 319 abort = pycompat.iswindows or lval == 'abort'
320 320 warn = bval or lval == 'warn'
321 321 if bval is None and not (warn or abort or lval == 'ignore'):
322 322 raise error.ConfigError(
323 323 _("ui.portablefilenames value is invalid ('%s')") % val)
324 324 return abort, warn
325 325
326 326 class casecollisionauditor(object):
327 327 def __init__(self, ui, abort, dirstate):
328 328 self._ui = ui
329 329 self._abort = abort
330 330 allfiles = '\0'.join(dirstate._map)
331 331 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
332 332 self._dirstate = dirstate
333 333 # The purpose of _newfiles is so that we don't complain about
334 334 # case collisions if someone were to call this object with the
335 335 # same filename twice.
336 336 self._newfiles = set()
337 337
338 338 def __call__(self, f):
339 339 if f in self._newfiles:
340 340 return
341 341 fl = encoding.lower(f)
342 342 if fl in self._loweredfiles and f not in self._dirstate:
343 343 msg = _('possible case-folding collision for %s') % f
344 344 if self._abort:
345 345 raise error.Abort(msg)
346 346 self._ui.warn(_("warning: %s\n") % msg)
347 347 self._loweredfiles.add(fl)
348 348 self._newfiles.add(f)
349 349
350 350 def filteredhash(repo, maxrev):
351 351 """build hash of filtered revisions in the current repoview.
352 352
353 353 Multiple caches perform up-to-date validation by checking that the
354 354 tiprev and tipnode stored in the cache file match the current repository.
355 355 However, this is not sufficient for validating repoviews because the set
356 356 of revisions in the view may change without the repository tiprev and
357 357 tipnode changing.
358 358
359 359 This function hashes all the revs filtered from the view and returns
360 360 that SHA-1 digest.
361 361 """
362 362 cl = repo.changelog
363 363 if not cl.filteredrevs:
364 364 return None
365 365 key = None
366 366 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
367 367 if revs:
368 368 s = hashlib.sha1()
369 369 for rev in revs:
370 370 s.update('%d;' % rev)
371 371 key = s.digest()
372 372 return key
373 373
374 374 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
375 375 '''yield every hg repository under path, always recursively.
376 376 The recurse flag will only control recursion into repo working dirs'''
377 377 def errhandler(err):
378 378 if err.filename == path:
379 379 raise err
380 380 samestat = getattr(os.path, 'samestat', None)
381 381 if followsym and samestat is not None:
382 382 def adddir(dirlst, dirname):
383 383 dirstat = os.stat(dirname)
384 384 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
385 385 if not match:
386 386 dirlst.append(dirstat)
387 387 return not match
388 388 else:
389 389 followsym = False
390 390
391 391 if (seen_dirs is None) and followsym:
392 392 seen_dirs = []
393 393 adddir(seen_dirs, path)
394 394 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
395 395 dirs.sort()
396 396 if '.hg' in dirs:
397 397 yield root # found a repository
398 398 qroot = os.path.join(root, '.hg', 'patches')
399 399 if os.path.isdir(os.path.join(qroot, '.hg')):
400 400 yield qroot # we have a patch queue repo here
401 401 if recurse:
402 402 # avoid recursing inside the .hg directory
403 403 dirs.remove('.hg')
404 404 else:
405 405 dirs[:] = [] # don't descend further
406 406 elif followsym:
407 407 newdirs = []
408 408 for d in dirs:
409 409 fname = os.path.join(root, d)
410 410 if adddir(seen_dirs, fname):
411 411 if os.path.islink(fname):
412 412 for hgname in walkrepos(fname, True, seen_dirs):
413 413 yield hgname
414 414 else:
415 415 newdirs.append(d)
416 416 dirs[:] = newdirs
417 417
418 418 def binnode(ctx):
419 419 """Return binary node id for a given basectx"""
420 420 node = ctx.node()
421 421 if node is None:
422 422 return wdirid
423 423 return node
424 424
425 425 def intrev(ctx):
426 426 """Return integer for a given basectx that can be used in comparison or
427 427 arithmetic operation"""
428 428 rev = ctx.rev()
429 429 if rev is None:
430 430 return wdirrev
431 431 return rev
432 432
433 433 def formatchangeid(ctx):
434 434 """Format changectx as '{rev}:{node|formatnode}', which is the default
435 435 template provided by logcmdutil.changesettemplater"""
436 436 repo = ctx.repo()
437 437 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
438 438
439 439 def formatrevnode(ui, rev, node):
440 440 """Format given revision and node depending on the current verbosity"""
441 441 if ui.debugflag:
442 442 hexfunc = hex
443 443 else:
444 444 hexfunc = short
445 445 return '%d:%s' % (rev, hexfunc(node))
446 446
447 447 def resolvehexnodeidprefix(repo, prefix):
448 448 if (prefix.startswith('x') and
449 449 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
450 450 prefix = prefix[1:]
451 451 try:
452 452 # Uses unfiltered repo because it's faster when prefix is ambiguous/
453 453 # This matches the shortesthexnodeidprefix() function below.
454 454 node = repo.unfiltered().changelog._partialmatch(prefix)
455 455 except error.AmbiguousPrefixLookupError:
456 456 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
457 457 if revset:
458 458 # Clear config to avoid infinite recursion
459 459 configoverrides = {('experimental',
460 460 'revisions.disambiguatewithin'): None}
461 461 with repo.ui.configoverride(configoverrides):
462 462 revs = repo.anyrevs([revset], user=True)
463 463 matches = []
464 464 for rev in revs:
465 465 node = repo.changelog.node(rev)
466 466 if hex(node).startswith(prefix):
467 467 matches.append(node)
468 468 if len(matches) == 1:
469 469 return matches[0]
470 470 raise
471 471 if node is None:
472 472 return
473 473 repo.changelog.rev(node) # make sure node isn't filtered
474 474 return node
475 475
476 476 def mayberevnum(repo, prefix):
477 477 """Checks if the given prefix may be mistaken for a revision number"""
478 478 try:
479 479 i = int(prefix)
480 480 # if we are a pure int, then starting with zero will not be
481 481 # confused as a rev; or, obviously, if the int is larger
482 482 # than the value of the tip rev. We still need to disambiguate if
483 483 # prefix == '0', since that *is* a valid revnum.
484 484 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
485 485 return False
486 486 return True
487 487 except ValueError:
488 488 return False
489 489
490 490 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
491 491 """Find the shortest unambiguous prefix that matches hexnode.
492 492
493 493 If "cache" is not None, it must be a dictionary that can be used for
494 494 caching between calls to this method.
495 495 """
496 496 # _partialmatch() of filtered changelog could take O(len(repo)) time,
497 497 # which would be unacceptably slow. so we look for hash collision in
498 498 # unfiltered space, which means some hashes may be slightly longer.
499 499
500 500 minlength=max(minlength, 1)
501 501
502 502 def disambiguate(prefix):
503 503 """Disambiguate against revnums."""
504 504 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
505 505 if mayberevnum(repo, prefix):
506 506 return 'x' + prefix
507 507 else:
508 508 return prefix
509 509
510 510 hexnode = hex(node)
511 511 for length in range(len(prefix), len(hexnode) + 1):
512 512 prefix = hexnode[:length]
513 513 if not mayberevnum(repo, prefix):
514 514 return prefix
515 515
516 516 cl = repo.unfiltered().changelog
517 517 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
518 518 if revset:
519 519 revs = None
520 520 if cache is not None:
521 521 revs = cache.get('disambiguationrevset')
522 522 if revs is None:
523 523 revs = repo.anyrevs([revset], user=True)
524 524 if cache is not None:
525 525 cache['disambiguationrevset'] = revs
526 526 if cl.rev(node) in revs:
527 527 hexnode = hex(node)
528 528 nodetree = None
529 529 if cache is not None:
530 530 nodetree = cache.get('disambiguationnodetree')
531 531 if not nodetree:
532 532 try:
533 533 nodetree = parsers.nodetree(cl.index, len(revs))
534 534 except AttributeError:
535 535 # no native nodetree
536 536 pass
537 537 else:
538 538 for r in revs:
539 539 nodetree.insert(r)
540 540 if cache is not None:
541 541 cache['disambiguationnodetree'] = nodetree
542 542 if nodetree is not None:
543 543 length = max(nodetree.shortest(node), minlength)
544 544 prefix = hexnode[:length]
545 545 return disambiguate(prefix)
546 546 for length in range(minlength, len(hexnode) + 1):
547 547 matches = []
548 548 prefix = hexnode[:length]
549 549 for rev in revs:
550 550 otherhexnode = repo[rev].hex()
551 551 if prefix == otherhexnode[:length]:
552 552 matches.append(otherhexnode)
553 553 if len(matches) == 1:
554 554 return disambiguate(prefix)
555 555
556 556 try:
557 557 return disambiguate(cl.shortest(node, minlength))
558 558 except error.LookupError:
559 559 raise error.RepoLookupError()
560 560
561 561 def isrevsymbol(repo, symbol):
562 562 """Checks if a symbol exists in the repo.
563 563
564 564 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
565 565 symbol is an ambiguous nodeid prefix.
566 566 """
567 567 try:
568 568 revsymbol(repo, symbol)
569 569 return True
570 570 except error.RepoLookupError:
571 571 return False
572 572
573 573 def revsymbol(repo, symbol):
574 574 """Returns a context given a single revision symbol (as string).
575 575
576 576 This is similar to revsingle(), but accepts only a single revision symbol,
577 577 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
578 578 not "max(public())".
579 579 """
580 580 if not isinstance(symbol, bytes):
581 581 msg = ("symbol (%s of type %s) was not a string, did you mean "
582 582 "repo[symbol]?" % (symbol, type(symbol)))
583 583 raise error.ProgrammingError(msg)
584 584 try:
585 585 if symbol in ('.', 'tip', 'null'):
586 586 return repo[symbol]
587 587
588 588 try:
589 589 r = int(symbol)
590 590 if '%d' % r != symbol:
591 591 raise ValueError
592 592 l = len(repo.changelog)
593 593 if r < 0:
594 594 r += l
595 595 if r < 0 or r >= l and r != wdirrev:
596 596 raise ValueError
597 597 return repo[r]
598 598 except error.FilteredIndexError:
599 599 raise
600 600 except (ValueError, OverflowError, IndexError):
601 601 pass
602 602
603 603 if len(symbol) == 40:
604 604 try:
605 605 node = bin(symbol)
606 606 rev = repo.changelog.rev(node)
607 607 return repo[rev]
608 608 except error.FilteredLookupError:
609 609 raise
610 610 except (TypeError, LookupError):
611 611 pass
612 612
613 613 # look up bookmarks through the name interface
614 614 try:
615 615 node = repo.names.singlenode(repo, symbol)
616 616 rev = repo.changelog.rev(node)
617 617 return repo[rev]
618 618 except KeyError:
619 619 pass
620 620
621 621 node = resolvehexnodeidprefix(repo, symbol)
622 622 if node is not None:
623 623 rev = repo.changelog.rev(node)
624 624 return repo[rev]
625 625
626 626 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
627 627
628 628 except error.WdirUnsupported:
629 629 return repo[None]
630 630 except (error.FilteredIndexError, error.FilteredLookupError,
631 631 error.FilteredRepoLookupError):
632 632 raise _filterederror(repo, symbol)
633 633
634 634 def _filterederror(repo, changeid):
635 635 """build an exception to be raised about a filtered changeid
636 636
637 637 This is extracted in a function to help extensions (eg: evolve) to
638 638 experiment with various message variants."""
639 639 if repo.filtername.startswith('visible'):
640 640
641 641 # Check if the changeset is obsolete
642 642 unfilteredrepo = repo.unfiltered()
643 643 ctx = revsymbol(unfilteredrepo, changeid)
644 644
645 645 # If the changeset is obsolete, enrich the message with the reason
646 646 # that made this changeset not visible
647 647 if ctx.obsolete():
648 648 msg = obsutil._getfilteredreason(repo, changeid, ctx)
649 649 else:
650 650 msg = _("hidden revision '%s'") % changeid
651 651
652 652 hint = _('use --hidden to access hidden revisions')
653 653
654 654 return error.FilteredRepoLookupError(msg, hint=hint)
655 655 msg = _("filtered revision '%s' (not in '%s' subset)")
656 656 msg %= (changeid, repo.filtername)
657 657 return error.FilteredRepoLookupError(msg)
658 658
659 659 def revsingle(repo, revspec, default='.', localalias=None):
660 660 if not revspec and revspec != 0:
661 661 return repo[default]
662 662
663 663 l = revrange(repo, [revspec], localalias=localalias)
664 664 if not l:
665 665 raise error.Abort(_('empty revision set'))
666 666 return repo[l.last()]
667 667
668 668 def _pairspec(revspec):
669 669 tree = revsetlang.parse(revspec)
670 670 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
671 671
672 672 def revpair(repo, revs):
673 673 if not revs:
674 674 return repo['.'], repo[None]
675 675
676 676 l = revrange(repo, revs)
677 677
678 678 if not l:
679 679 first = second = None
680 680 elif l.isascending():
681 681 first = l.min()
682 682 second = l.max()
683 683 elif l.isdescending():
684 684 first = l.max()
685 685 second = l.min()
686 686 else:
687 687 first = l.first()
688 688 second = l.last()
689 689
690 690 if first is None:
691 691 raise error.Abort(_('empty revision range'))
692 692 if (first == second and len(revs) >= 2
693 693 and not all(revrange(repo, [r]) for r in revs)):
694 694 raise error.Abort(_('empty revision on one side of range'))
695 695
696 696 # if top-level is range expression, the result must always be a pair
697 697 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
698 698 return repo[first], repo[None]
699 699
700 700 return repo[first], repo[second]
701 701
702 702 def revrange(repo, specs, localalias=None):
703 703 """Execute 1 to many revsets and return the union.
704 704
705 705 This is the preferred mechanism for executing revsets using user-specified
706 706 config options, such as revset aliases.
707 707
708 708 The revsets specified by ``specs`` will be executed via a chained ``OR``
709 709 expression. If ``specs`` is empty, an empty result is returned.
710 710
711 711 ``specs`` can contain integers, in which case they are assumed to be
712 712 revision numbers.
713 713
714 714 It is assumed the revsets are already formatted. If you have arguments
715 715 that need to be expanded in the revset, call ``revsetlang.formatspec()``
716 716 and pass the result as an element of ``specs``.
717 717
718 718 Specifying a single revset is allowed.
719 719
720 720 Returns a ``revset.abstractsmartset`` which is a list-like interface over
721 721 integer revisions.
722 722 """
723 723 allspecs = []
724 724 for spec in specs:
725 725 if isinstance(spec, int):
726 726 spec = revsetlang.formatspec('rev(%d)', spec)
727 727 allspecs.append(spec)
728 728 return repo.anyrevs(allspecs, user=True, localalias=localalias)
729 729
730 730 def meaningfulparents(repo, ctx):
731 731 """Return list of meaningful (or all if debug) parentrevs for rev.
732 732
733 733 For merges (two non-nullrev revisions) both parents are meaningful.
734 734 Otherwise the first parent revision is considered meaningful if it
735 735 is not the preceding revision.
736 736 """
737 737 parents = ctx.parents()
738 738 if len(parents) > 1:
739 739 return parents
740 740 if repo.ui.debugflag:
741 741 return [parents[0], repo[nullrev]]
742 742 if parents[0].rev() >= intrev(ctx) - 1:
743 743 return []
744 744 return parents
745 745
746 746 def expandpats(pats):
747 747 '''Expand bare globs when running on windows.
748 748 On posix we assume it already has already been done by sh.'''
749 749 if not util.expandglobs:
750 750 return list(pats)
751 751 ret = []
752 752 for kindpat in pats:
753 753 kind, pat = matchmod._patsplit(kindpat, None)
754 754 if kind is None:
755 755 try:
756 756 globbed = glob.glob(pat)
757 757 except re.error:
758 758 globbed = [pat]
759 759 if globbed:
760 760 ret.extend(globbed)
761 761 continue
762 762 ret.append(kindpat)
763 763 return ret
764 764
765 765 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
766 766 badfn=None):
767 767 '''Return a matcher and the patterns that were used.
768 768 The matcher will warn about bad matches, unless an alternate badfn callback
769 769 is provided.'''
770 770 if pats == ("",):
771 771 pats = []
772 772 if opts is None:
773 773 opts = {}
774 774 if not globbed and default == 'relpath':
775 775 pats = expandpats(pats or [])
776 776
777 777 def bad(f, msg):
778 778 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
779 779
780 780 if badfn is None:
781 781 badfn = bad
782 782
783 783 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
784 784 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
785 785
786 786 if m.always():
787 787 pats = []
788 788 return m, pats
789 789
790 790 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
791 791 badfn=None):
792 792 '''Return a matcher that will warn about bad matches.'''
793 793 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
794 794
795 795 def matchall(repo):
796 796 '''Return a matcher that will efficiently match everything.'''
797 797 return matchmod.always(repo.root, repo.getcwd())
798 798
799 799 def matchfiles(repo, files, badfn=None):
800 800 '''Return a matcher that will efficiently match exactly these files.'''
801 801 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
802 802
803 803 def parsefollowlinespattern(repo, rev, pat, msg):
804 804 """Return a file name from `pat` pattern suitable for usage in followlines
805 805 logic.
806 806 """
807 807 if not matchmod.patkind(pat):
808 808 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
809 809 else:
810 810 ctx = repo[rev]
811 811 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
812 812 files = [f for f in ctx if m(f)]
813 813 if len(files) != 1:
814 814 raise error.ParseError(msg)
815 815 return files[0]
816 816
817 def getorigvfs(ui, repo):
818 """return a vfs suitable to save 'orig' file
819
820 return None if no special directory is configured"""
821 origbackuppath = ui.config('ui', 'origbackuppath')
822 if not origbackuppath:
823 return None
824 return vfs.vfs(repo.wvfs.join(origbackuppath))
825
817 826 def origpath(ui, repo, filepath):
818 827 '''customize where .orig files are created
819 828
820 829 Fetch user defined path from config file: [ui] origbackuppath = <path>
821 830 Fall back to default (filepath with .orig suffix) if not specified
822 831 '''
823 origbackuppath = ui.config('ui', 'origbackuppath')
824 if not origbackuppath:
832 origvfs = getorigvfs(ui, repo)
833 if origvfs is None:
825 834 return filepath + ".orig"
826 835
827 836 # Convert filepath from an absolute path into a path inside the repo.
828 837 filepathfromroot = util.normpath(os.path.relpath(filepath,
829 838 start=repo.root))
830 839
831 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
832 840 origbackupdir = origvfs.dirname(filepathfromroot)
833 841 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
834 842 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
835 843
836 844 # Remove any files that conflict with the backup file's path
837 845 for f in reversed(list(util.finddirs(filepathfromroot))):
838 846 if origvfs.isfileorlink(f):
839 847 ui.note(_('removing conflicting file: %s\n')
840 848 % origvfs.join(f))
841 849 origvfs.unlink(f)
842 850 break
843 851
844 852 origvfs.makedirs(origbackupdir)
845 853
846 854 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
847 855 ui.note(_('removing conflicting directory: %s\n')
848 856 % origvfs.join(filepathfromroot))
849 857 origvfs.rmtree(filepathfromroot, forcibly=True)
850 858
851 859 return origvfs.join(filepathfromroot)
852 860
853 861 class _containsnode(object):
854 862 """proxy __contains__(node) to container.__contains__ which accepts revs"""
855 863
856 864 def __init__(self, repo, revcontainer):
857 865 self._torev = repo.changelog.rev
858 866 self._revcontains = revcontainer.__contains__
859 867
860 868 def __contains__(self, node):
861 869 return self._revcontains(self._torev(node))
862 870
863 871 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
864 872 fixphase=False, targetphase=None, backup=True):
865 873 """do common cleanups when old nodes are replaced by new nodes
866 874
867 875 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
868 876 (we might also want to move working directory parent in the future)
869 877
870 878 By default, bookmark moves are calculated automatically from 'replacements',
871 879 but 'moves' can be used to override that. Also, 'moves' may include
872 880 additional bookmark moves that should not have associated obsmarkers.
873 881
874 882 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
875 883 have replacements. operation is a string, like "rebase".
876 884
877 885 metadata is dictionary containing metadata to be stored in obsmarker if
878 886 obsolescence is enabled.
879 887 """
880 888 assert fixphase or targetphase is None
881 889 if not replacements and not moves:
882 890 return
883 891
884 892 # translate mapping's other forms
885 893 if not util.safehasattr(replacements, 'items'):
886 894 replacements = {(n,): () for n in replacements}
887 895 else:
888 896 # upgrading non tuple "source" to tuple ones for BC
889 897 repls = {}
890 898 for key, value in replacements.items():
891 899 if not isinstance(key, tuple):
892 900 key = (key,)
893 901 repls[key] = value
894 902 replacements = repls
895 903
896 904 # Calculate bookmark movements
897 905 if moves is None:
898 906 moves = {}
899 907 # Unfiltered repo is needed since nodes in replacements might be hidden.
900 908 unfi = repo.unfiltered()
901 909 for oldnodes, newnodes in replacements.items():
902 910 for oldnode in oldnodes:
903 911 if oldnode in moves:
904 912 continue
905 913 if len(newnodes) > 1:
906 914 # usually a split, take the one with biggest rev number
907 915 newnode = next(unfi.set('max(%ln)', newnodes)).node()
908 916 elif len(newnodes) == 0:
909 917 # move bookmark backwards
910 918 allreplaced = []
911 919 for rep in replacements:
912 920 allreplaced.extend(rep)
913 921 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
914 922 allreplaced))
915 923 if roots:
916 924 newnode = roots[0].node()
917 925 else:
918 926 newnode = nullid
919 927 else:
920 928 newnode = newnodes[0]
921 929 moves[oldnode] = newnode
922 930
923 931 allnewnodes = [n for ns in replacements.values() for n in ns]
924 932 toretract = {}
925 933 toadvance = {}
926 934 if fixphase:
927 935 precursors = {}
928 936 for oldnodes, newnodes in replacements.items():
929 937 for oldnode in oldnodes:
930 938 for newnode in newnodes:
931 939 precursors.setdefault(newnode, []).append(oldnode)
932 940
933 941 allnewnodes.sort(key=lambda n: unfi[n].rev())
934 942 newphases = {}
935 943 def phase(ctx):
936 944 return newphases.get(ctx.node(), ctx.phase())
937 945 for newnode in allnewnodes:
938 946 ctx = unfi[newnode]
939 947 parentphase = max(phase(p) for p in ctx.parents())
940 948 if targetphase is None:
941 949 oldphase = max(unfi[oldnode].phase()
942 950 for oldnode in precursors[newnode])
943 951 newphase = max(oldphase, parentphase)
944 952 else:
945 953 newphase = max(targetphase, parentphase)
946 954 newphases[newnode] = newphase
947 955 if newphase > ctx.phase():
948 956 toretract.setdefault(newphase, []).append(newnode)
949 957 elif newphase < ctx.phase():
950 958 toadvance.setdefault(newphase, []).append(newnode)
951 959
952 960 with repo.transaction('cleanup') as tr:
953 961 # Move bookmarks
954 962 bmarks = repo._bookmarks
955 963 bmarkchanges = []
956 964 for oldnode, newnode in moves.items():
957 965 oldbmarks = repo.nodebookmarks(oldnode)
958 966 if not oldbmarks:
959 967 continue
960 968 from . import bookmarks # avoid import cycle
961 969 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
962 970 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
963 971 hex(oldnode), hex(newnode)))
964 972 # Delete divergent bookmarks being parents of related newnodes
965 973 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
966 974 allnewnodes, newnode, oldnode)
967 975 deletenodes = _containsnode(repo, deleterevs)
968 976 for name in oldbmarks:
969 977 bmarkchanges.append((name, newnode))
970 978 for b in bookmarks.divergent2delete(repo, deletenodes, name):
971 979 bmarkchanges.append((b, None))
972 980
973 981 if bmarkchanges:
974 982 bmarks.applychanges(repo, tr, bmarkchanges)
975 983
976 984 for phase, nodes in toretract.items():
977 985 phases.retractboundary(repo, tr, phase, nodes)
978 986 for phase, nodes in toadvance.items():
979 987 phases.advanceboundary(repo, tr, phase, nodes)
980 988
981 989 # Obsolete or strip nodes
982 990 if obsolete.isenabled(repo, obsolete.createmarkersopt):
983 991 # If a node is already obsoleted, and we want to obsolete it
984 992 # without a successor, skip that obssolete request since it's
985 993 # unnecessary. That's the "if s or not isobs(n)" check below.
986 994 # Also sort the node in topology order, that might be useful for
987 995 # some obsstore logic.
988 996 # NOTE: the sorting might belong to createmarkers.
989 997 torev = unfi.changelog.rev
990 998 sortfunc = lambda ns: torev(ns[0][0])
991 999 rels = []
992 1000 for ns, s in sorted(replacements.items(), key=sortfunc):
993 1001 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
994 1002 rels.append(rel)
995 1003 if rels:
996 1004 obsolete.createmarkers(repo, rels, operation=operation,
997 1005 metadata=metadata)
998 1006 else:
999 1007 from . import repair # avoid import cycle
1000 1008 tostrip = list(n for ns in replacements for n in ns)
1001 1009 if tostrip:
1002 1010 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1003 1011 backup=backup)
1004 1012
1005 1013 def addremove(repo, matcher, prefix, opts=None):
1006 1014 if opts is None:
1007 1015 opts = {}
1008 1016 m = matcher
1009 1017 dry_run = opts.get('dry_run')
1010 1018 try:
1011 1019 similarity = float(opts.get('similarity') or 0)
1012 1020 except ValueError:
1013 1021 raise error.Abort(_('similarity must be a number'))
1014 1022 if similarity < 0 or similarity > 100:
1015 1023 raise error.Abort(_('similarity must be between 0 and 100'))
1016 1024 similarity /= 100.0
1017 1025
1018 1026 ret = 0
1019 1027 join = lambda f: os.path.join(prefix, f)
1020 1028
1021 1029 wctx = repo[None]
1022 1030 for subpath in sorted(wctx.substate):
1023 1031 submatch = matchmod.subdirmatcher(subpath, m)
1024 1032 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1025 1033 sub = wctx.sub(subpath)
1026 1034 try:
1027 1035 if sub.addremove(submatch, prefix, opts):
1028 1036 ret = 1
1029 1037 except error.LookupError:
1030 1038 repo.ui.status(_("skipping missing subrepository: %s\n")
1031 1039 % join(subpath))
1032 1040
1033 1041 rejected = []
1034 1042 def badfn(f, msg):
1035 1043 if f in m.files():
1036 1044 m.bad(f, msg)
1037 1045 rejected.append(f)
1038 1046
1039 1047 badmatch = matchmod.badmatch(m, badfn)
1040 1048 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1041 1049 badmatch)
1042 1050
1043 1051 unknownset = set(unknown + forgotten)
1044 1052 toprint = unknownset.copy()
1045 1053 toprint.update(deleted)
1046 1054 for abs in sorted(toprint):
1047 1055 if repo.ui.verbose or not m.exact(abs):
1048 1056 if abs in unknownset:
1049 1057 status = _('adding %s\n') % m.uipath(abs)
1050 1058 label = 'ui.addremove.added'
1051 1059 else:
1052 1060 status = _('removing %s\n') % m.uipath(abs)
1053 1061 label = 'ui.addremove.removed'
1054 1062 repo.ui.status(status, label=label)
1055 1063
1056 1064 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1057 1065 similarity)
1058 1066
1059 1067 if not dry_run:
1060 1068 _markchanges(repo, unknown + forgotten, deleted, renames)
1061 1069
1062 1070 for f in rejected:
1063 1071 if f in m.files():
1064 1072 return 1
1065 1073 return ret
1066 1074
1067 1075 def marktouched(repo, files, similarity=0.0):
1068 1076 '''Assert that files have somehow been operated upon. files are relative to
1069 1077 the repo root.'''
1070 1078 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1071 1079 rejected = []
1072 1080
1073 1081 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1074 1082
1075 1083 if repo.ui.verbose:
1076 1084 unknownset = set(unknown + forgotten)
1077 1085 toprint = unknownset.copy()
1078 1086 toprint.update(deleted)
1079 1087 for abs in sorted(toprint):
1080 1088 if abs in unknownset:
1081 1089 status = _('adding %s\n') % abs
1082 1090 else:
1083 1091 status = _('removing %s\n') % abs
1084 1092 repo.ui.status(status)
1085 1093
1086 1094 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1087 1095 similarity)
1088 1096
1089 1097 _markchanges(repo, unknown + forgotten, deleted, renames)
1090 1098
1091 1099 for f in rejected:
1092 1100 if f in m.files():
1093 1101 return 1
1094 1102 return 0
1095 1103
1096 1104 def _interestingfiles(repo, matcher):
1097 1105 '''Walk dirstate with matcher, looking for files that addremove would care
1098 1106 about.
1099 1107
1100 1108 This is different from dirstate.status because it doesn't care about
1101 1109 whether files are modified or clean.'''
1102 1110 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1103 1111 audit_path = pathutil.pathauditor(repo.root, cached=True)
1104 1112
1105 1113 ctx = repo[None]
1106 1114 dirstate = repo.dirstate
1107 1115 matcher = repo.narrowmatch(matcher, includeexact=True)
1108 1116 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1109 1117 unknown=True, ignored=False, full=False)
1110 1118 for abs, st in walkresults.iteritems():
1111 1119 dstate = dirstate[abs]
1112 1120 if dstate == '?' and audit_path.check(abs):
1113 1121 unknown.append(abs)
1114 1122 elif dstate != 'r' and not st:
1115 1123 deleted.append(abs)
1116 1124 elif dstate == 'r' and st:
1117 1125 forgotten.append(abs)
1118 1126 # for finding renames
1119 1127 elif dstate == 'r' and not st:
1120 1128 removed.append(abs)
1121 1129 elif dstate == 'a':
1122 1130 added.append(abs)
1123 1131
1124 1132 return added, unknown, deleted, removed, forgotten
1125 1133
1126 1134 def _findrenames(repo, matcher, added, removed, similarity):
1127 1135 '''Find renames from removed files to added ones.'''
1128 1136 renames = {}
1129 1137 if similarity > 0:
1130 1138 for old, new, score in similar.findrenames(repo, added, removed,
1131 1139 similarity):
1132 1140 if (repo.ui.verbose or not matcher.exact(old)
1133 1141 or not matcher.exact(new)):
1134 1142 repo.ui.status(_('recording removal of %s as rename to %s '
1135 1143 '(%d%% similar)\n') %
1136 1144 (matcher.rel(old), matcher.rel(new),
1137 1145 score * 100))
1138 1146 renames[new] = old
1139 1147 return renames
1140 1148
1141 1149 def _markchanges(repo, unknown, deleted, renames):
1142 1150 '''Marks the files in unknown as added, the files in deleted as removed,
1143 1151 and the files in renames as copied.'''
1144 1152 wctx = repo[None]
1145 1153 with repo.wlock():
1146 1154 wctx.forget(deleted)
1147 1155 wctx.add(unknown)
1148 1156 for new, old in renames.iteritems():
1149 1157 wctx.copy(old, new)
1150 1158
1151 1159 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1152 1160 """Update the dirstate to reflect the intent of copying src to dst. For
1153 1161 different reasons it might not end with dst being marked as copied from src.
1154 1162 """
1155 1163 origsrc = repo.dirstate.copied(src) or src
1156 1164 if dst == origsrc: # copying back a copy?
1157 1165 if repo.dirstate[dst] not in 'mn' and not dryrun:
1158 1166 repo.dirstate.normallookup(dst)
1159 1167 else:
1160 1168 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1161 1169 if not ui.quiet:
1162 1170 ui.warn(_("%s has not been committed yet, so no copy "
1163 1171 "data will be stored for %s.\n")
1164 1172 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1165 1173 if repo.dirstate[dst] in '?r' and not dryrun:
1166 1174 wctx.add([dst])
1167 1175 elif not dryrun:
1168 1176 wctx.copy(origsrc, dst)
1169 1177
1170 1178 def writerequires(opener, requirements):
1171 1179 with opener('requires', 'w', atomictemp=True) as fp:
1172 1180 for r in sorted(requirements):
1173 1181 fp.write("%s\n" % r)
1174 1182
1175 1183 class filecachesubentry(object):
1176 1184 def __init__(self, path, stat):
1177 1185 self.path = path
1178 1186 self.cachestat = None
1179 1187 self._cacheable = None
1180 1188
1181 1189 if stat:
1182 1190 self.cachestat = filecachesubentry.stat(self.path)
1183 1191
1184 1192 if self.cachestat:
1185 1193 self._cacheable = self.cachestat.cacheable()
1186 1194 else:
1187 1195 # None means we don't know yet
1188 1196 self._cacheable = None
1189 1197
1190 1198 def refresh(self):
1191 1199 if self.cacheable():
1192 1200 self.cachestat = filecachesubentry.stat(self.path)
1193 1201
1194 1202 def cacheable(self):
1195 1203 if self._cacheable is not None:
1196 1204 return self._cacheable
1197 1205
1198 1206 # we don't know yet, assume it is for now
1199 1207 return True
1200 1208
1201 1209 def changed(self):
1202 1210 # no point in going further if we can't cache it
1203 1211 if not self.cacheable():
1204 1212 return True
1205 1213
1206 1214 newstat = filecachesubentry.stat(self.path)
1207 1215
1208 1216 # we may not know if it's cacheable yet, check again now
1209 1217 if newstat and self._cacheable is None:
1210 1218 self._cacheable = newstat.cacheable()
1211 1219
1212 1220 # check again
1213 1221 if not self._cacheable:
1214 1222 return True
1215 1223
1216 1224 if self.cachestat != newstat:
1217 1225 self.cachestat = newstat
1218 1226 return True
1219 1227 else:
1220 1228 return False
1221 1229
1222 1230 @staticmethod
1223 1231 def stat(path):
1224 1232 try:
1225 1233 return util.cachestat(path)
1226 1234 except OSError as e:
1227 1235 if e.errno != errno.ENOENT:
1228 1236 raise
1229 1237
1230 1238 class filecacheentry(object):
1231 1239 def __init__(self, paths, stat=True):
1232 1240 self._entries = []
1233 1241 for path in paths:
1234 1242 self._entries.append(filecachesubentry(path, stat))
1235 1243
1236 1244 def changed(self):
1237 1245 '''true if any entry has changed'''
1238 1246 for entry in self._entries:
1239 1247 if entry.changed():
1240 1248 return True
1241 1249 return False
1242 1250
1243 1251 def refresh(self):
1244 1252 for entry in self._entries:
1245 1253 entry.refresh()
1246 1254
1247 1255 class filecache(object):
1248 1256 """A property like decorator that tracks files under .hg/ for updates.
1249 1257
1250 1258 On first access, the files defined as arguments are stat()ed and the
1251 1259 results cached. The decorated function is called. The results are stashed
1252 1260 away in a ``_filecache`` dict on the object whose method is decorated.
1253 1261
1254 1262 On subsequent access, the cached result is used as it is set to the
1255 1263 instance dictionary.
1256 1264
1257 1265 On external property set/delete operations, the caller must update the
1258 1266 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1259 1267 instead of directly setting <attr>.
1260 1268
1261 1269 When using the property API, the cached data is always used if available.
1262 1270 No stat() is performed to check if the file has changed.
1263 1271
1264 1272 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1265 1273 can populate an entry before the property's getter is called. In this case,
1266 1274 entries in ``_filecache`` will be used during property operations,
1267 1275 if available. If the underlying file changes, it is up to external callers
1268 1276 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1269 1277 method result as well as possibly calling ``del obj._filecache[attr]`` to
1270 1278 remove the ``filecacheentry``.
1271 1279 """
1272 1280
1273 1281 def __init__(self, *paths):
1274 1282 self.paths = paths
1275 1283
1276 1284 def join(self, obj, fname):
1277 1285 """Used to compute the runtime path of a cached file.
1278 1286
1279 1287 Users should subclass filecache and provide their own version of this
1280 1288 function to call the appropriate join function on 'obj' (an instance
1281 1289 of the class that its member function was decorated).
1282 1290 """
1283 1291 raise NotImplementedError
1284 1292
1285 1293 def __call__(self, func):
1286 1294 self.func = func
1287 1295 self.sname = func.__name__
1288 1296 self.name = pycompat.sysbytes(self.sname)
1289 1297 return self
1290 1298
1291 1299 def __get__(self, obj, type=None):
1292 1300 # if accessed on the class, return the descriptor itself.
1293 1301 if obj is None:
1294 1302 return self
1295 1303
1296 1304 assert self.sname not in obj.__dict__
1297 1305
1298 1306 entry = obj._filecache.get(self.name)
1299 1307
1300 1308 if entry:
1301 1309 if entry.changed():
1302 1310 entry.obj = self.func(obj)
1303 1311 else:
1304 1312 paths = [self.join(obj, path) for path in self.paths]
1305 1313
1306 1314 # We stat -before- creating the object so our cache doesn't lie if
1307 1315 # a writer modified between the time we read and stat
1308 1316 entry = filecacheentry(paths, True)
1309 1317 entry.obj = self.func(obj)
1310 1318
1311 1319 obj._filecache[self.name] = entry
1312 1320
1313 1321 obj.__dict__[self.sname] = entry.obj
1314 1322 return entry.obj
1315 1323
1316 1324 # don't implement __set__(), which would make __dict__ lookup as slow as
1317 1325 # function call.
1318 1326
1319 1327 def set(self, obj, value):
1320 1328 if self.name not in obj._filecache:
1321 1329 # we add an entry for the missing value because X in __dict__
1322 1330 # implies X in _filecache
1323 1331 paths = [self.join(obj, path) for path in self.paths]
1324 1332 ce = filecacheentry(paths, False)
1325 1333 obj._filecache[self.name] = ce
1326 1334 else:
1327 1335 ce = obj._filecache[self.name]
1328 1336
1329 1337 ce.obj = value # update cached copy
1330 1338 obj.__dict__[self.sname] = value # update copy returned by obj.x
1331 1339
1332 1340 def extdatasource(repo, source):
1333 1341 """Gather a map of rev -> value dict from the specified source
1334 1342
1335 1343 A source spec is treated as a URL, with a special case shell: type
1336 1344 for parsing the output from a shell command.
1337 1345
1338 1346 The data is parsed as a series of newline-separated records where
1339 1347 each record is a revision specifier optionally followed by a space
1340 1348 and a freeform string value. If the revision is known locally, it
1341 1349 is converted to a rev, otherwise the record is skipped.
1342 1350
1343 1351 Note that both key and value are treated as UTF-8 and converted to
1344 1352 the local encoding. This allows uniformity between local and
1345 1353 remote data sources.
1346 1354 """
1347 1355
1348 1356 spec = repo.ui.config("extdata", source)
1349 1357 if not spec:
1350 1358 raise error.Abort(_("unknown extdata source '%s'") % source)
1351 1359
1352 1360 data = {}
1353 1361 src = proc = None
1354 1362 try:
1355 1363 if spec.startswith("shell:"):
1356 1364 # external commands should be run relative to the repo root
1357 1365 cmd = spec[6:]
1358 1366 proc = subprocess.Popen(procutil.tonativestr(cmd),
1359 1367 shell=True, bufsize=-1,
1360 1368 close_fds=procutil.closefds,
1361 1369 stdout=subprocess.PIPE,
1362 1370 cwd=procutil.tonativestr(repo.root))
1363 1371 src = proc.stdout
1364 1372 else:
1365 1373 # treat as a URL or file
1366 1374 src = url.open(repo.ui, spec)
1367 1375 for l in src:
1368 1376 if " " in l:
1369 1377 k, v = l.strip().split(" ", 1)
1370 1378 else:
1371 1379 k, v = l.strip(), ""
1372 1380
1373 1381 k = encoding.tolocal(k)
1374 1382 try:
1375 1383 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1376 1384 except (error.LookupError, error.RepoLookupError):
1377 1385 pass # we ignore data for nodes that don't exist locally
1378 1386 finally:
1379 1387 if proc:
1380 1388 proc.communicate()
1381 1389 if src:
1382 1390 src.close()
1383 1391 if proc and proc.returncode != 0:
1384 1392 raise error.Abort(_("extdata command '%s' failed: %s")
1385 1393 % (cmd, procutil.explainexit(proc.returncode)))
1386 1394
1387 1395 return data
1388 1396
1389 1397 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1390 1398 if lock is None:
1391 1399 raise error.LockInheritanceContractViolation(
1392 1400 'lock can only be inherited while held')
1393 1401 if environ is None:
1394 1402 environ = {}
1395 1403 with lock.inherit() as locker:
1396 1404 environ[envvar] = locker
1397 1405 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1398 1406
1399 1407 def wlocksub(repo, cmd, *args, **kwargs):
1400 1408 """run cmd as a subprocess that allows inheriting repo's wlock
1401 1409
1402 1410 This can only be called while the wlock is held. This takes all the
1403 1411 arguments that ui.system does, and returns the exit code of the
1404 1412 subprocess."""
1405 1413 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1406 1414 **kwargs)
1407 1415
1408 1416 class progress(object):
1409 1417 def __init__(self, ui, topic, unit="", total=None):
1410 1418 self.ui = ui
1411 1419 self.pos = 0
1412 1420 self.topic = topic
1413 1421 self.unit = unit
1414 1422 self.total = total
1415 1423
1416 1424 def __enter__(self):
1417 1425 return self
1418 1426
1419 1427 def __exit__(self, exc_type, exc_value, exc_tb):
1420 1428 self.complete()
1421 1429
1422 1430 def update(self, pos, item="", total=None):
1423 1431 assert pos is not None
1424 1432 if total:
1425 1433 self.total = total
1426 1434 self.pos = pos
1427 1435 self._print(item)
1428 1436
1429 1437 def increment(self, step=1, item="", total=None):
1430 1438 self.update(self.pos + step, item, total)
1431 1439
1432 1440 def complete(self):
1433 1441 self.ui.progress(self.topic, None)
1434 1442
1435 1443 def _print(self, item):
1436 1444 self.ui.progress(self.topic, self.pos, item, self.unit,
1437 1445 self.total)
1438 1446
1439 1447 def gdinitconfig(ui):
1440 1448 """helper function to know if a repo should be created as general delta
1441 1449 """
1442 1450 # experimental config: format.generaldelta
1443 1451 return (ui.configbool('format', 'generaldelta')
1444 1452 or ui.configbool('format', 'usegeneraldelta')
1445 1453 or ui.configbool('format', 'sparse-revlog'))
1446 1454
1447 1455 def gddeltaconfig(ui):
1448 1456 """helper function to know if incoming delta should be optimised
1449 1457 """
1450 1458 # experimental config: format.generaldelta
1451 1459 return ui.configbool('format', 'generaldelta')
1452 1460
1453 1461 class simplekeyvaluefile(object):
1454 1462 """A simple file with key=value lines
1455 1463
1456 1464 Keys must be alphanumerics and start with a letter, values must not
1457 1465 contain '\n' characters"""
1458 1466 firstlinekey = '__firstline'
1459 1467
1460 1468 def __init__(self, vfs, path, keys=None):
1461 1469 self.vfs = vfs
1462 1470 self.path = path
1463 1471
1464 1472 def read(self, firstlinenonkeyval=False):
1465 1473 """Read the contents of a simple key-value file
1466 1474
1467 1475 'firstlinenonkeyval' indicates whether the first line of file should
1468 1476 be treated as a key-value pair or reuturned fully under the
1469 1477 __firstline key."""
1470 1478 lines = self.vfs.readlines(self.path)
1471 1479 d = {}
1472 1480 if firstlinenonkeyval:
1473 1481 if not lines:
1474 1482 e = _("empty simplekeyvalue file")
1475 1483 raise error.CorruptedState(e)
1476 1484 # we don't want to include '\n' in the __firstline
1477 1485 d[self.firstlinekey] = lines[0][:-1]
1478 1486 del lines[0]
1479 1487
1480 1488 try:
1481 1489 # the 'if line.strip()' part prevents us from failing on empty
1482 1490 # lines which only contain '\n' therefore are not skipped
1483 1491 # by 'if line'
1484 1492 updatedict = dict(line[:-1].split('=', 1) for line in lines
1485 1493 if line.strip())
1486 1494 if self.firstlinekey in updatedict:
1487 1495 e = _("%r can't be used as a key")
1488 1496 raise error.CorruptedState(e % self.firstlinekey)
1489 1497 d.update(updatedict)
1490 1498 except ValueError as e:
1491 1499 raise error.CorruptedState(str(e))
1492 1500 return d
1493 1501
1494 1502 def write(self, data, firstline=None):
1495 1503 """Write key=>value mapping to a file
1496 1504 data is a dict. Keys must be alphanumerical and start with a letter.
1497 1505 Values must not contain newline characters.
1498 1506
1499 1507 If 'firstline' is not None, it is written to file before
1500 1508 everything else, as it is, not in a key=value form"""
1501 1509 lines = []
1502 1510 if firstline is not None:
1503 1511 lines.append('%s\n' % firstline)
1504 1512
1505 1513 for k, v in data.items():
1506 1514 if k == self.firstlinekey:
1507 1515 e = "key name '%s' is reserved" % self.firstlinekey
1508 1516 raise error.ProgrammingError(e)
1509 1517 if not k[0:1].isalpha():
1510 1518 e = "keys must start with a letter in a key-value file"
1511 1519 raise error.ProgrammingError(e)
1512 1520 if not k.isalnum():
1513 1521 e = "invalid key name in a simple key-value file"
1514 1522 raise error.ProgrammingError(e)
1515 1523 if '\n' in v:
1516 1524 e = "invalid value in a simple key-value file"
1517 1525 raise error.ProgrammingError(e)
1518 1526 lines.append("%s=%s\n" % (k, v))
1519 1527 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1520 1528 fp.write(''.join(lines))
1521 1529
1522 1530 _reportobsoletedsource = [
1523 1531 'debugobsolete',
1524 1532 'pull',
1525 1533 'push',
1526 1534 'serve',
1527 1535 'unbundle',
1528 1536 ]
1529 1537
1530 1538 _reportnewcssource = [
1531 1539 'pull',
1532 1540 'unbundle',
1533 1541 ]
1534 1542
1535 1543 def prefetchfiles(repo, revs, match):
1536 1544 """Invokes the registered file prefetch functions, allowing extensions to
1537 1545 ensure the corresponding files are available locally, before the command
1538 1546 uses them."""
1539 1547 if match:
1540 1548 # The command itself will complain about files that don't exist, so
1541 1549 # don't duplicate the message.
1542 1550 match = matchmod.badmatch(match, lambda fn, msg: None)
1543 1551 else:
1544 1552 match = matchall(repo)
1545 1553
1546 1554 fileprefetchhooks(repo, revs, match)
1547 1555
1548 1556 # a list of (repo, revs, match) prefetch functions
1549 1557 fileprefetchhooks = util.hooks()
1550 1558
1551 1559 # A marker that tells the evolve extension to suppress its own reporting
1552 1560 _reportstroubledchangesets = True
1553 1561
1554 1562 def registersummarycallback(repo, otr, txnname=''):
1555 1563 """register a callback to issue a summary after the transaction is closed
1556 1564 """
1557 1565 def txmatch(sources):
1558 1566 return any(txnname.startswith(source) for source in sources)
1559 1567
1560 1568 categories = []
1561 1569
1562 1570 def reportsummary(func):
1563 1571 """decorator for report callbacks."""
1564 1572 # The repoview life cycle is shorter than the one of the actual
1565 1573 # underlying repository. So the filtered object can die before the
1566 1574 # weakref is used leading to troubles. We keep a reference to the
1567 1575 # unfiltered object and restore the filtering when retrieving the
1568 1576 # repository through the weakref.
1569 1577 filtername = repo.filtername
1570 1578 reporef = weakref.ref(repo.unfiltered())
1571 1579 def wrapped(tr):
1572 1580 repo = reporef()
1573 1581 if filtername:
1574 1582 repo = repo.filtered(filtername)
1575 1583 func(repo, tr)
1576 1584 newcat = '%02i-txnreport' % len(categories)
1577 1585 otr.addpostclose(newcat, wrapped)
1578 1586 categories.append(newcat)
1579 1587 return wrapped
1580 1588
1581 1589 if txmatch(_reportobsoletedsource):
1582 1590 @reportsummary
1583 1591 def reportobsoleted(repo, tr):
1584 1592 obsoleted = obsutil.getobsoleted(repo, tr)
1585 1593 if obsoleted:
1586 1594 repo.ui.status(_('obsoleted %i changesets\n')
1587 1595 % len(obsoleted))
1588 1596
1589 1597 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1590 1598 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1591 1599 instabilitytypes = [
1592 1600 ('orphan', 'orphan'),
1593 1601 ('phase-divergent', 'phasedivergent'),
1594 1602 ('content-divergent', 'contentdivergent'),
1595 1603 ]
1596 1604
1597 1605 def getinstabilitycounts(repo):
1598 1606 filtered = repo.changelog.filteredrevs
1599 1607 counts = {}
1600 1608 for instability, revset in instabilitytypes:
1601 1609 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1602 1610 filtered)
1603 1611 return counts
1604 1612
1605 1613 oldinstabilitycounts = getinstabilitycounts(repo)
1606 1614 @reportsummary
1607 1615 def reportnewinstabilities(repo, tr):
1608 1616 newinstabilitycounts = getinstabilitycounts(repo)
1609 1617 for instability, revset in instabilitytypes:
1610 1618 delta = (newinstabilitycounts[instability] -
1611 1619 oldinstabilitycounts[instability])
1612 1620 msg = getinstabilitymessage(delta, instability)
1613 1621 if msg:
1614 1622 repo.ui.warn(msg)
1615 1623
1616 1624 if txmatch(_reportnewcssource):
1617 1625 @reportsummary
1618 1626 def reportnewcs(repo, tr):
1619 1627 """Report the range of new revisions pulled/unbundled."""
1620 1628 origrepolen = tr.changes.get('origrepolen', len(repo))
1621 1629 unfi = repo.unfiltered()
1622 1630 if origrepolen >= len(unfi):
1623 1631 return
1624 1632
1625 1633 # Compute the bounds of new visible revisions' range.
1626 1634 revs = smartset.spanset(repo, start=origrepolen)
1627 1635 if revs:
1628 1636 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1629 1637
1630 1638 if minrev == maxrev:
1631 1639 revrange = minrev
1632 1640 else:
1633 1641 revrange = '%s:%s' % (minrev, maxrev)
1634 1642 draft = len(repo.revs('%ld and draft()', revs))
1635 1643 secret = len(repo.revs('%ld and secret()', revs))
1636 1644 if not (draft or secret):
1637 1645 msg = _('new changesets %s\n') % revrange
1638 1646 elif draft and secret:
1639 1647 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1640 1648 msg %= (revrange, draft, secret)
1641 1649 elif draft:
1642 1650 msg = _('new changesets %s (%d drafts)\n')
1643 1651 msg %= (revrange, draft)
1644 1652 elif secret:
1645 1653 msg = _('new changesets %s (%d secrets)\n')
1646 1654 msg %= (revrange, secret)
1647 1655 else:
1648 1656 errormsg = 'entered unreachable condition'
1649 1657 raise error.ProgrammingError(errormsg)
1650 1658 repo.ui.status(msg)
1651 1659
1652 1660 # search new changesets directly pulled as obsolete
1653 1661 duplicates = tr.changes.get('revduplicates', ())
1654 1662 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1655 1663 origrepolen, duplicates)
1656 1664 cl = repo.changelog
1657 1665 extinctadded = [r for r in obsadded if r not in cl]
1658 1666 if extinctadded:
1659 1667 # They are not just obsolete, but obsolete and invisible
1660 1668 # we call them "extinct" internally but the terms have not been
1661 1669 # exposed to users.
1662 1670 msg = '(%d other changesets obsolete on arrival)\n'
1663 1671 repo.ui.status(msg % len(extinctadded))
1664 1672
1665 1673 @reportsummary
1666 1674 def reportphasechanges(repo, tr):
1667 1675 """Report statistics of phase changes for changesets pre-existing
1668 1676 pull/unbundle.
1669 1677 """
1670 1678 origrepolen = tr.changes.get('origrepolen', len(repo))
1671 1679 phasetracking = tr.changes.get('phases', {})
1672 1680 if not phasetracking:
1673 1681 return
1674 1682 published = [
1675 1683 rev for rev, (old, new) in phasetracking.iteritems()
1676 1684 if new == phases.public and rev < origrepolen
1677 1685 ]
1678 1686 if not published:
1679 1687 return
1680 1688 repo.ui.status(_('%d local changesets published\n')
1681 1689 % len(published))
1682 1690
1683 1691 def getinstabilitymessage(delta, instability):
1684 1692 """function to return the message to show warning about new instabilities
1685 1693
1686 1694 exists as a separate function so that extension can wrap to show more
1687 1695 information like how to fix instabilities"""
1688 1696 if delta > 0:
1689 1697 return _('%i new %s changesets\n') % (delta, instability)
1690 1698
1691 1699 def nodesummaries(repo, nodes, maxnumnodes=4):
1692 1700 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1693 1701 return ' '.join(short(h) for h in nodes)
1694 1702 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1695 1703 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1696 1704
1697 1705 def enforcesinglehead(repo, tr, desc):
1698 1706 """check that no named branch has multiple heads"""
1699 1707 if desc in ('strip', 'repair'):
1700 1708 # skip the logic during strip
1701 1709 return
1702 1710 visible = repo.filtered('visible')
1703 1711 # possible improvement: we could restrict the check to affected branch
1704 1712 for name, heads in visible.branchmap().iteritems():
1705 1713 if len(heads) > 1:
1706 1714 msg = _('rejecting multiple heads on branch "%s"')
1707 1715 msg %= name
1708 1716 hint = _('%d heads: %s')
1709 1717 hint %= (len(heads), nodesummaries(repo, heads))
1710 1718 raise error.Abort(msg, hint=hint)
1711 1719
1712 1720 def wrapconvertsink(sink):
1713 1721 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1714 1722 before it is used, whether or not the convert extension was formally loaded.
1715 1723 """
1716 1724 return sink
1717 1725
1718 1726 def unhidehashlikerevs(repo, specs, hiddentype):
1719 1727 """parse the user specs and unhide changesets whose hash or revision number
1720 1728 is passed.
1721 1729
1722 1730 hiddentype can be: 1) 'warn': warn while unhiding changesets
1723 1731 2) 'nowarn': don't warn while unhiding changesets
1724 1732
1725 1733 returns a repo object with the required changesets unhidden
1726 1734 """
1727 1735 if not repo.filtername or not repo.ui.configbool('experimental',
1728 1736 'directaccess'):
1729 1737 return repo
1730 1738
1731 1739 if repo.filtername not in ('visible', 'visible-hidden'):
1732 1740 return repo
1733 1741
1734 1742 symbols = set()
1735 1743 for spec in specs:
1736 1744 try:
1737 1745 tree = revsetlang.parse(spec)
1738 1746 except error.ParseError: # will be reported by scmutil.revrange()
1739 1747 continue
1740 1748
1741 1749 symbols.update(revsetlang.gethashlikesymbols(tree))
1742 1750
1743 1751 if not symbols:
1744 1752 return repo
1745 1753
1746 1754 revs = _getrevsfromsymbols(repo, symbols)
1747 1755
1748 1756 if not revs:
1749 1757 return repo
1750 1758
1751 1759 if hiddentype == 'warn':
1752 1760 unfi = repo.unfiltered()
1753 1761 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1754 1762 repo.ui.warn(_("warning: accessing hidden changesets for write "
1755 1763 "operation: %s\n") % revstr)
1756 1764
1757 1765 # we have to use new filtername to separate branch/tags cache until we can
1758 1766 # disbale these cache when revisions are dynamically pinned.
1759 1767 return repo.filtered('visible-hidden', revs)
1760 1768
1761 1769 def _getrevsfromsymbols(repo, symbols):
1762 1770 """parse the list of symbols and returns a set of revision numbers of hidden
1763 1771 changesets present in symbols"""
1764 1772 revs = set()
1765 1773 unfi = repo.unfiltered()
1766 1774 unficl = unfi.changelog
1767 1775 cl = repo.changelog
1768 1776 tiprev = len(unficl)
1769 1777 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1770 1778 for s in symbols:
1771 1779 try:
1772 1780 n = int(s)
1773 1781 if n <= tiprev:
1774 1782 if not allowrevnums:
1775 1783 continue
1776 1784 else:
1777 1785 if n not in cl:
1778 1786 revs.add(n)
1779 1787 continue
1780 1788 except ValueError:
1781 1789 pass
1782 1790
1783 1791 try:
1784 1792 s = resolvehexnodeidprefix(unfi, s)
1785 1793 except (error.LookupError, error.WdirUnsupported):
1786 1794 s = None
1787 1795
1788 1796 if s is not None:
1789 1797 rev = unficl.rev(s)
1790 1798 if rev not in cl:
1791 1799 revs.add(rev)
1792 1800
1793 1801 return revs
1794 1802
1795 1803 def bookmarkrevs(repo, mark):
1796 1804 """
1797 1805 Select revisions reachable by a given bookmark
1798 1806 """
1799 1807 return repo.revs("ancestors(bookmark(%s)) - "
1800 1808 "ancestors(head() and not bookmark(%s)) - "
1801 1809 "ancestors(bookmark() and not bookmark(%s))",
1802 1810 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now