##// END OF EJS Templates
cleanupnodes: trust caller when "moves" is not None...
Martin von Zweigbergk -
r40890:b7823bd5 default
parent child Browse files
Show More
@@ -1,1810 +1,1811
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29
30 30 from . import (
31 31 encoding,
32 32 error,
33 33 match as matchmod,
34 34 obsolete,
35 35 obsutil,
36 36 pathutil,
37 37 phases,
38 38 policy,
39 39 pycompat,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 procutil,
50 50 stringutil,
51 51 )
52 52
53 53 if pycompat.iswindows:
54 54 from . import scmwindows as scmplatform
55 55 else:
56 56 from . import scmposix as scmplatform
57 57
58 58 parsers = policy.importmod(r'parsers')
59 59
60 60 termsize = scmplatform.termsize
61 61
62 62 class status(tuple):
63 63 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 64 and 'ignored' properties are only relevant to the working copy.
65 65 '''
66 66
67 67 __slots__ = ()
68 68
69 69 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 70 clean):
71 71 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 72 ignored, clean))
73 73
74 74 @property
75 75 def modified(self):
76 76 '''files that have been modified'''
77 77 return self[0]
78 78
79 79 @property
80 80 def added(self):
81 81 '''files that have been added'''
82 82 return self[1]
83 83
84 84 @property
85 85 def removed(self):
86 86 '''files that have been removed'''
87 87 return self[2]
88 88
89 89 @property
90 90 def deleted(self):
91 91 '''files that are in the dirstate, but have been deleted from the
92 92 working copy (aka "missing")
93 93 '''
94 94 return self[3]
95 95
96 96 @property
97 97 def unknown(self):
98 98 '''files not in the dirstate that are not ignored'''
99 99 return self[4]
100 100
101 101 @property
102 102 def ignored(self):
103 103 '''files not in the dirstate that are ignored (by _dirignore())'''
104 104 return self[5]
105 105
106 106 @property
107 107 def clean(self):
108 108 '''files that have not been modified'''
109 109 return self[6]
110 110
111 111 def __repr__(self, *args, **kwargs):
112 112 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 113 r'unknown=%s, ignored=%s, clean=%s>') %
114 114 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115 115
116 116 def itersubrepos(ctx1, ctx2):
117 117 """find subrepos in ctx1 or ctx2"""
118 118 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 119 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 120 # has been modified (in ctx2) but not yet committed (in ctx1).
121 121 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 122 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123 123
124 124 missing = set()
125 125
126 126 for subpath in ctx2.substate:
127 127 if subpath not in ctx1.substate:
128 128 del subpaths[subpath]
129 129 missing.add(subpath)
130 130
131 131 for subpath, ctx in sorted(subpaths.iteritems()):
132 132 yield subpath, ctx.sub(subpath)
133 133
134 134 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 135 # status and diff will have an accurate result when it does
136 136 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 137 # against itself.
138 138 for subpath in missing:
139 139 yield subpath, ctx2.nullsub(subpath, ctx1)
140 140
141 141 def nochangesfound(ui, repo, excluded=None):
142 142 '''Report no changes for push/pull, excluded is None or a list of
143 143 nodes excluded from the push/pull.
144 144 '''
145 145 secretlist = []
146 146 if excluded:
147 147 for n in excluded:
148 148 ctx = repo[n]
149 149 if ctx.phase() >= phases.secret and not ctx.extinct():
150 150 secretlist.append(n)
151 151
152 152 if secretlist:
153 153 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 154 % len(secretlist))
155 155 else:
156 156 ui.status(_("no changes found\n"))
157 157
158 158 def callcatch(ui, func):
159 159 """call func() with global exception handling
160 160
161 161 return func() if no exception happens. otherwise do some error handling
162 162 and return an exit code accordingly. does not handle all exceptions.
163 163 """
164 164 try:
165 165 try:
166 166 return func()
167 167 except: # re-raises
168 168 ui.traceback()
169 169 raise
170 170 # Global exception handling, alphabetically
171 171 # Mercurial-specific first, followed by built-in and library exceptions
172 172 except error.LockHeld as inst:
173 173 if inst.errno == errno.ETIMEDOUT:
174 174 reason = _('timed out waiting for lock held by %r') % (
175 175 pycompat.bytestr(inst.locker))
176 176 else:
177 177 reason = _('lock held by %r') % inst.locker
178 178 ui.error(_("abort: %s: %s\n") % (
179 179 inst.desc or stringutil.forcebytestr(inst.filename), reason))
180 180 if not inst.locker:
181 181 ui.error(_("(lock might be very busy)\n"))
182 182 except error.LockUnavailable as inst:
183 183 ui.error(_("abort: could not lock %s: %s\n") %
184 184 (inst.desc or stringutil.forcebytestr(inst.filename),
185 185 encoding.strtolocal(inst.strerror)))
186 186 except error.OutOfBandError as inst:
187 187 if inst.args:
188 188 msg = _("abort: remote error:\n")
189 189 else:
190 190 msg = _("abort: remote error\n")
191 191 ui.error(msg)
192 192 if inst.args:
193 193 ui.error(''.join(inst.args))
194 194 if inst.hint:
195 195 ui.error('(%s)\n' % inst.hint)
196 196 except error.RepoError as inst:
197 197 ui.error(_("abort: %s!\n") % inst)
198 198 if inst.hint:
199 199 ui.error(_("(%s)\n") % inst.hint)
200 200 except error.ResponseError as inst:
201 201 ui.error(_("abort: %s") % inst.args[0])
202 202 msg = inst.args[1]
203 203 if isinstance(msg, type(u'')):
204 204 msg = pycompat.sysbytes(msg)
205 205 if not isinstance(msg, bytes):
206 206 ui.error(" %r\n" % (msg,))
207 207 elif not msg:
208 208 ui.error(_(" empty string\n"))
209 209 else:
210 210 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
211 211 except error.CensoredNodeError as inst:
212 212 ui.error(_("abort: file censored %s!\n") % inst)
213 213 except error.StorageError as inst:
214 214 ui.error(_("abort: %s!\n") % inst)
215 215 if inst.hint:
216 216 ui.error(_("(%s)\n") % inst.hint)
217 217 except error.InterventionRequired as inst:
218 218 ui.error("%s\n" % inst)
219 219 if inst.hint:
220 220 ui.error(_("(%s)\n") % inst.hint)
221 221 return 1
222 222 except error.WdirUnsupported:
223 223 ui.error(_("abort: working directory revision cannot be specified\n"))
224 224 except error.Abort as inst:
225 225 ui.error(_("abort: %s\n") % inst)
226 226 if inst.hint:
227 227 ui.error(_("(%s)\n") % inst.hint)
228 228 except ImportError as inst:
229 229 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
230 230 m = stringutil.forcebytestr(inst).split()[-1]
231 231 if m in "mpatch bdiff".split():
232 232 ui.error(_("(did you forget to compile extensions?)\n"))
233 233 elif m in "zlib".split():
234 234 ui.error(_("(is your Python install correct?)\n"))
235 235 except IOError as inst:
236 236 if util.safehasattr(inst, "code"):
237 237 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
238 238 elif util.safehasattr(inst, "reason"):
239 239 try: # usually it is in the form (errno, strerror)
240 240 reason = inst.reason.args[1]
241 241 except (AttributeError, IndexError):
242 242 # it might be anything, for example a string
243 243 reason = inst.reason
244 244 if isinstance(reason, pycompat.unicode):
245 245 # SSLError of Python 2.7.9 contains a unicode
246 246 reason = encoding.unitolocal(reason)
247 247 ui.error(_("abort: error: %s\n") % reason)
248 248 elif (util.safehasattr(inst, "args")
249 249 and inst.args and inst.args[0] == errno.EPIPE):
250 250 pass
251 251 elif getattr(inst, "strerror", None):
252 252 if getattr(inst, "filename", None):
253 253 ui.error(_("abort: %s: %s\n") % (
254 254 encoding.strtolocal(inst.strerror),
255 255 stringutil.forcebytestr(inst.filename)))
256 256 else:
257 257 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 258 else:
259 259 raise
260 260 except OSError as inst:
261 261 if getattr(inst, "filename", None) is not None:
262 262 ui.error(_("abort: %s: '%s'\n") % (
263 263 encoding.strtolocal(inst.strerror),
264 264 stringutil.forcebytestr(inst.filename)))
265 265 else:
266 266 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
267 267 except MemoryError:
268 268 ui.error(_("abort: out of memory\n"))
269 269 except SystemExit as inst:
270 270 # Commands shouldn't sys.exit directly, but give a return code.
271 271 # Just in case catch this and and pass exit code to caller.
272 272 return inst.code
273 273 except socket.error as inst:
274 274 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
275 275
276 276 return -1
277 277
278 278 def checknewlabel(repo, lbl, kind):
279 279 # Do not use the "kind" parameter in ui output.
280 280 # It makes strings difficult to translate.
281 281 if lbl in ['tip', '.', 'null']:
282 282 raise error.Abort(_("the name '%s' is reserved") % lbl)
283 283 for c in (':', '\0', '\n', '\r'):
284 284 if c in lbl:
285 285 raise error.Abort(
286 286 _("%r cannot be used in a name") % pycompat.bytestr(c))
287 287 try:
288 288 int(lbl)
289 289 raise error.Abort(_("cannot use an integer as a name"))
290 290 except ValueError:
291 291 pass
292 292 if lbl.strip() != lbl:
293 293 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
294 294
295 295 def checkfilename(f):
296 296 '''Check that the filename f is an acceptable filename for a tracked file'''
297 297 if '\r' in f or '\n' in f:
298 298 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
299 299 % pycompat.bytestr(f))
300 300
301 301 def checkportable(ui, f):
302 302 '''Check if filename f is portable and warn or abort depending on config'''
303 303 checkfilename(f)
304 304 abort, warn = checkportabilityalert(ui)
305 305 if abort or warn:
306 306 msg = util.checkwinfilename(f)
307 307 if msg:
308 308 msg = "%s: %s" % (msg, procutil.shellquote(f))
309 309 if abort:
310 310 raise error.Abort(msg)
311 311 ui.warn(_("warning: %s\n") % msg)
312 312
313 313 def checkportabilityalert(ui):
314 314 '''check if the user's config requests nothing, a warning, or abort for
315 315 non-portable filenames'''
316 316 val = ui.config('ui', 'portablefilenames')
317 317 lval = val.lower()
318 318 bval = stringutil.parsebool(val)
319 319 abort = pycompat.iswindows or lval == 'abort'
320 320 warn = bval or lval == 'warn'
321 321 if bval is None and not (warn or abort or lval == 'ignore'):
322 322 raise error.ConfigError(
323 323 _("ui.portablefilenames value is invalid ('%s')") % val)
324 324 return abort, warn
325 325
326 326 class casecollisionauditor(object):
327 327 def __init__(self, ui, abort, dirstate):
328 328 self._ui = ui
329 329 self._abort = abort
330 330 allfiles = '\0'.join(dirstate._map)
331 331 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
332 332 self._dirstate = dirstate
333 333 # The purpose of _newfiles is so that we don't complain about
334 334 # case collisions if someone were to call this object with the
335 335 # same filename twice.
336 336 self._newfiles = set()
337 337
338 338 def __call__(self, f):
339 339 if f in self._newfiles:
340 340 return
341 341 fl = encoding.lower(f)
342 342 if fl in self._loweredfiles and f not in self._dirstate:
343 343 msg = _('possible case-folding collision for %s') % f
344 344 if self._abort:
345 345 raise error.Abort(msg)
346 346 self._ui.warn(_("warning: %s\n") % msg)
347 347 self._loweredfiles.add(fl)
348 348 self._newfiles.add(f)
349 349
350 350 def filteredhash(repo, maxrev):
351 351 """build hash of filtered revisions in the current repoview.
352 352
353 353 Multiple caches perform up-to-date validation by checking that the
354 354 tiprev and tipnode stored in the cache file match the current repository.
355 355 However, this is not sufficient for validating repoviews because the set
356 356 of revisions in the view may change without the repository tiprev and
357 357 tipnode changing.
358 358
359 359 This function hashes all the revs filtered from the view and returns
360 360 that SHA-1 digest.
361 361 """
362 362 cl = repo.changelog
363 363 if not cl.filteredrevs:
364 364 return None
365 365 key = None
366 366 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
367 367 if revs:
368 368 s = hashlib.sha1()
369 369 for rev in revs:
370 370 s.update('%d;' % rev)
371 371 key = s.digest()
372 372 return key
373 373
374 374 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
375 375 '''yield every hg repository under path, always recursively.
376 376 The recurse flag will only control recursion into repo working dirs'''
377 377 def errhandler(err):
378 378 if err.filename == path:
379 379 raise err
380 380 samestat = getattr(os.path, 'samestat', None)
381 381 if followsym and samestat is not None:
382 382 def adddir(dirlst, dirname):
383 383 dirstat = os.stat(dirname)
384 384 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
385 385 if not match:
386 386 dirlst.append(dirstat)
387 387 return not match
388 388 else:
389 389 followsym = False
390 390
391 391 if (seen_dirs is None) and followsym:
392 392 seen_dirs = []
393 393 adddir(seen_dirs, path)
394 394 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
395 395 dirs.sort()
396 396 if '.hg' in dirs:
397 397 yield root # found a repository
398 398 qroot = os.path.join(root, '.hg', 'patches')
399 399 if os.path.isdir(os.path.join(qroot, '.hg')):
400 400 yield qroot # we have a patch queue repo here
401 401 if recurse:
402 402 # avoid recursing inside the .hg directory
403 403 dirs.remove('.hg')
404 404 else:
405 405 dirs[:] = [] # don't descend further
406 406 elif followsym:
407 407 newdirs = []
408 408 for d in dirs:
409 409 fname = os.path.join(root, d)
410 410 if adddir(seen_dirs, fname):
411 411 if os.path.islink(fname):
412 412 for hgname in walkrepos(fname, True, seen_dirs):
413 413 yield hgname
414 414 else:
415 415 newdirs.append(d)
416 416 dirs[:] = newdirs
417 417
418 418 def binnode(ctx):
419 419 """Return binary node id for a given basectx"""
420 420 node = ctx.node()
421 421 if node is None:
422 422 return wdirid
423 423 return node
424 424
425 425 def intrev(ctx):
426 426 """Return integer for a given basectx that can be used in comparison or
427 427 arithmetic operation"""
428 428 rev = ctx.rev()
429 429 if rev is None:
430 430 return wdirrev
431 431 return rev
432 432
433 433 def formatchangeid(ctx):
434 434 """Format changectx as '{rev}:{node|formatnode}', which is the default
435 435 template provided by logcmdutil.changesettemplater"""
436 436 repo = ctx.repo()
437 437 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
438 438
439 439 def formatrevnode(ui, rev, node):
440 440 """Format given revision and node depending on the current verbosity"""
441 441 if ui.debugflag:
442 442 hexfunc = hex
443 443 else:
444 444 hexfunc = short
445 445 return '%d:%s' % (rev, hexfunc(node))
446 446
447 447 def resolvehexnodeidprefix(repo, prefix):
448 448 if (prefix.startswith('x') and
449 449 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
450 450 prefix = prefix[1:]
451 451 try:
452 452 # Uses unfiltered repo because it's faster when prefix is ambiguous/
453 453 # This matches the shortesthexnodeidprefix() function below.
454 454 node = repo.unfiltered().changelog._partialmatch(prefix)
455 455 except error.AmbiguousPrefixLookupError:
456 456 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
457 457 if revset:
458 458 # Clear config to avoid infinite recursion
459 459 configoverrides = {('experimental',
460 460 'revisions.disambiguatewithin'): None}
461 461 with repo.ui.configoverride(configoverrides):
462 462 revs = repo.anyrevs([revset], user=True)
463 463 matches = []
464 464 for rev in revs:
465 465 node = repo.changelog.node(rev)
466 466 if hex(node).startswith(prefix):
467 467 matches.append(node)
468 468 if len(matches) == 1:
469 469 return matches[0]
470 470 raise
471 471 if node is None:
472 472 return
473 473 repo.changelog.rev(node) # make sure node isn't filtered
474 474 return node
475 475
476 476 def mayberevnum(repo, prefix):
477 477 """Checks if the given prefix may be mistaken for a revision number"""
478 478 try:
479 479 i = int(prefix)
480 480 # if we are a pure int, then starting with zero will not be
481 481 # confused as a rev; or, obviously, if the int is larger
482 482 # than the value of the tip rev. We still need to disambiguate if
483 483 # prefix == '0', since that *is* a valid revnum.
484 484 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
485 485 return False
486 486 return True
487 487 except ValueError:
488 488 return False
489 489
490 490 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
491 491 """Find the shortest unambiguous prefix that matches hexnode.
492 492
493 493 If "cache" is not None, it must be a dictionary that can be used for
494 494 caching between calls to this method.
495 495 """
496 496 # _partialmatch() of filtered changelog could take O(len(repo)) time,
497 497 # which would be unacceptably slow. so we look for hash collision in
498 498 # unfiltered space, which means some hashes may be slightly longer.
499 499
500 500 minlength=max(minlength, 1)
501 501
502 502 def disambiguate(prefix):
503 503 """Disambiguate against revnums."""
504 504 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
505 505 if mayberevnum(repo, prefix):
506 506 return 'x' + prefix
507 507 else:
508 508 return prefix
509 509
510 510 hexnode = hex(node)
511 511 for length in range(len(prefix), len(hexnode) + 1):
512 512 prefix = hexnode[:length]
513 513 if not mayberevnum(repo, prefix):
514 514 return prefix
515 515
516 516 cl = repo.unfiltered().changelog
517 517 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
518 518 if revset:
519 519 revs = None
520 520 if cache is not None:
521 521 revs = cache.get('disambiguationrevset')
522 522 if revs is None:
523 523 revs = repo.anyrevs([revset], user=True)
524 524 if cache is not None:
525 525 cache['disambiguationrevset'] = revs
526 526 if cl.rev(node) in revs:
527 527 hexnode = hex(node)
528 528 nodetree = None
529 529 if cache is not None:
530 530 nodetree = cache.get('disambiguationnodetree')
531 531 if not nodetree:
532 532 try:
533 533 nodetree = parsers.nodetree(cl.index, len(revs))
534 534 except AttributeError:
535 535 # no native nodetree
536 536 pass
537 537 else:
538 538 for r in revs:
539 539 nodetree.insert(r)
540 540 if cache is not None:
541 541 cache['disambiguationnodetree'] = nodetree
542 542 if nodetree is not None:
543 543 length = max(nodetree.shortest(node), minlength)
544 544 prefix = hexnode[:length]
545 545 return disambiguate(prefix)
546 546 for length in range(minlength, len(hexnode) + 1):
547 547 matches = []
548 548 prefix = hexnode[:length]
549 549 for rev in revs:
550 550 otherhexnode = repo[rev].hex()
551 551 if prefix == otherhexnode[:length]:
552 552 matches.append(otherhexnode)
553 553 if len(matches) == 1:
554 554 return disambiguate(prefix)
555 555
556 556 try:
557 557 return disambiguate(cl.shortest(node, minlength))
558 558 except error.LookupError:
559 559 raise error.RepoLookupError()
560 560
561 561 def isrevsymbol(repo, symbol):
562 562 """Checks if a symbol exists in the repo.
563 563
564 564 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
565 565 symbol is an ambiguous nodeid prefix.
566 566 """
567 567 try:
568 568 revsymbol(repo, symbol)
569 569 return True
570 570 except error.RepoLookupError:
571 571 return False
572 572
573 573 def revsymbol(repo, symbol):
574 574 """Returns a context given a single revision symbol (as string).
575 575
576 576 This is similar to revsingle(), but accepts only a single revision symbol,
577 577 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
578 578 not "max(public())".
579 579 """
580 580 if not isinstance(symbol, bytes):
581 581 msg = ("symbol (%s of type %s) was not a string, did you mean "
582 582 "repo[symbol]?" % (symbol, type(symbol)))
583 583 raise error.ProgrammingError(msg)
584 584 try:
585 585 if symbol in ('.', 'tip', 'null'):
586 586 return repo[symbol]
587 587
588 588 try:
589 589 r = int(symbol)
590 590 if '%d' % r != symbol:
591 591 raise ValueError
592 592 l = len(repo.changelog)
593 593 if r < 0:
594 594 r += l
595 595 if r < 0 or r >= l and r != wdirrev:
596 596 raise ValueError
597 597 return repo[r]
598 598 except error.FilteredIndexError:
599 599 raise
600 600 except (ValueError, OverflowError, IndexError):
601 601 pass
602 602
603 603 if len(symbol) == 40:
604 604 try:
605 605 node = bin(symbol)
606 606 rev = repo.changelog.rev(node)
607 607 return repo[rev]
608 608 except error.FilteredLookupError:
609 609 raise
610 610 except (TypeError, LookupError):
611 611 pass
612 612
613 613 # look up bookmarks through the name interface
614 614 try:
615 615 node = repo.names.singlenode(repo, symbol)
616 616 rev = repo.changelog.rev(node)
617 617 return repo[rev]
618 618 except KeyError:
619 619 pass
620 620
621 621 node = resolvehexnodeidprefix(repo, symbol)
622 622 if node is not None:
623 623 rev = repo.changelog.rev(node)
624 624 return repo[rev]
625 625
626 626 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
627 627
628 628 except error.WdirUnsupported:
629 629 return repo[None]
630 630 except (error.FilteredIndexError, error.FilteredLookupError,
631 631 error.FilteredRepoLookupError):
632 632 raise _filterederror(repo, symbol)
633 633
634 634 def _filterederror(repo, changeid):
635 635 """build an exception to be raised about a filtered changeid
636 636
637 637 This is extracted in a function to help extensions (eg: evolve) to
638 638 experiment with various message variants."""
639 639 if repo.filtername.startswith('visible'):
640 640
641 641 # Check if the changeset is obsolete
642 642 unfilteredrepo = repo.unfiltered()
643 643 ctx = revsymbol(unfilteredrepo, changeid)
644 644
645 645 # If the changeset is obsolete, enrich the message with the reason
646 646 # that made this changeset not visible
647 647 if ctx.obsolete():
648 648 msg = obsutil._getfilteredreason(repo, changeid, ctx)
649 649 else:
650 650 msg = _("hidden revision '%s'") % changeid
651 651
652 652 hint = _('use --hidden to access hidden revisions')
653 653
654 654 return error.FilteredRepoLookupError(msg, hint=hint)
655 655 msg = _("filtered revision '%s' (not in '%s' subset)")
656 656 msg %= (changeid, repo.filtername)
657 657 return error.FilteredRepoLookupError(msg)
658 658
659 659 def revsingle(repo, revspec, default='.', localalias=None):
660 660 if not revspec and revspec != 0:
661 661 return repo[default]
662 662
663 663 l = revrange(repo, [revspec], localalias=localalias)
664 664 if not l:
665 665 raise error.Abort(_('empty revision set'))
666 666 return repo[l.last()]
667 667
668 668 def _pairspec(revspec):
669 669 tree = revsetlang.parse(revspec)
670 670 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
671 671
672 672 def revpair(repo, revs):
673 673 if not revs:
674 674 return repo['.'], repo[None]
675 675
676 676 l = revrange(repo, revs)
677 677
678 678 if not l:
679 679 first = second = None
680 680 elif l.isascending():
681 681 first = l.min()
682 682 second = l.max()
683 683 elif l.isdescending():
684 684 first = l.max()
685 685 second = l.min()
686 686 else:
687 687 first = l.first()
688 688 second = l.last()
689 689
690 690 if first is None:
691 691 raise error.Abort(_('empty revision range'))
692 692 if (first == second and len(revs) >= 2
693 693 and not all(revrange(repo, [r]) for r in revs)):
694 694 raise error.Abort(_('empty revision on one side of range'))
695 695
696 696 # if top-level is range expression, the result must always be a pair
697 697 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
698 698 return repo[first], repo[None]
699 699
700 700 return repo[first], repo[second]
701 701
702 702 def revrange(repo, specs, localalias=None):
703 703 """Execute 1 to many revsets and return the union.
704 704
705 705 This is the preferred mechanism for executing revsets using user-specified
706 706 config options, such as revset aliases.
707 707
708 708 The revsets specified by ``specs`` will be executed via a chained ``OR``
709 709 expression. If ``specs`` is empty, an empty result is returned.
710 710
711 711 ``specs`` can contain integers, in which case they are assumed to be
712 712 revision numbers.
713 713
714 714 It is assumed the revsets are already formatted. If you have arguments
715 715 that need to be expanded in the revset, call ``revsetlang.formatspec()``
716 716 and pass the result as an element of ``specs``.
717 717
718 718 Specifying a single revset is allowed.
719 719
720 720 Returns a ``revset.abstractsmartset`` which is a list-like interface over
721 721 integer revisions.
722 722 """
723 723 allspecs = []
724 724 for spec in specs:
725 725 if isinstance(spec, int):
726 726 spec = revsetlang.formatspec('rev(%d)', spec)
727 727 allspecs.append(spec)
728 728 return repo.anyrevs(allspecs, user=True, localalias=localalias)
729 729
730 730 def meaningfulparents(repo, ctx):
731 731 """Return list of meaningful (or all if debug) parentrevs for rev.
732 732
733 733 For merges (two non-nullrev revisions) both parents are meaningful.
734 734 Otherwise the first parent revision is considered meaningful if it
735 735 is not the preceding revision.
736 736 """
737 737 parents = ctx.parents()
738 738 if len(parents) > 1:
739 739 return parents
740 740 if repo.ui.debugflag:
741 741 return [parents[0], repo[nullrev]]
742 742 if parents[0].rev() >= intrev(ctx) - 1:
743 743 return []
744 744 return parents
745 745
746 746 def expandpats(pats):
747 747 '''Expand bare globs when running on windows.
748 748 On posix we assume it already has already been done by sh.'''
749 749 if not util.expandglobs:
750 750 return list(pats)
751 751 ret = []
752 752 for kindpat in pats:
753 753 kind, pat = matchmod._patsplit(kindpat, None)
754 754 if kind is None:
755 755 try:
756 756 globbed = glob.glob(pat)
757 757 except re.error:
758 758 globbed = [pat]
759 759 if globbed:
760 760 ret.extend(globbed)
761 761 continue
762 762 ret.append(kindpat)
763 763 return ret
764 764
765 765 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
766 766 badfn=None):
767 767 '''Return a matcher and the patterns that were used.
768 768 The matcher will warn about bad matches, unless an alternate badfn callback
769 769 is provided.'''
770 770 if pats == ("",):
771 771 pats = []
772 772 if opts is None:
773 773 opts = {}
774 774 if not globbed and default == 'relpath':
775 775 pats = expandpats(pats or [])
776 776
777 777 def bad(f, msg):
778 778 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
779 779
780 780 if badfn is None:
781 781 badfn = bad
782 782
783 783 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
784 784 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
785 785
786 786 if m.always():
787 787 pats = []
788 788 return m, pats
789 789
790 790 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
791 791 badfn=None):
792 792 '''Return a matcher that will warn about bad matches.'''
793 793 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
794 794
795 795 def matchall(repo):
796 796 '''Return a matcher that will efficiently match everything.'''
797 797 return matchmod.always(repo.root, repo.getcwd())
798 798
799 799 def matchfiles(repo, files, badfn=None):
800 800 '''Return a matcher that will efficiently match exactly these files.'''
801 801 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
802 802
803 803 def parsefollowlinespattern(repo, rev, pat, msg):
804 804 """Return a file name from `pat` pattern suitable for usage in followlines
805 805 logic.
806 806 """
807 807 if not matchmod.patkind(pat):
808 808 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
809 809 else:
810 810 ctx = repo[rev]
811 811 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
812 812 files = [f for f in ctx if m(f)]
813 813 if len(files) != 1:
814 814 raise error.ParseError(msg)
815 815 return files[0]
816 816
817 817 def getorigvfs(ui, repo):
818 818 """return a vfs suitable to save 'orig' file
819 819
820 820 return None if no special directory is configured"""
821 821 origbackuppath = ui.config('ui', 'origbackuppath')
822 822 if not origbackuppath:
823 823 return None
824 824 return vfs.vfs(repo.wvfs.join(origbackuppath))
825 825
826 826 def origpath(ui, repo, filepath):
827 827 '''customize where .orig files are created
828 828
829 829 Fetch user defined path from config file: [ui] origbackuppath = <path>
830 830 Fall back to default (filepath with .orig suffix) if not specified
831 831 '''
832 832 origvfs = getorigvfs(ui, repo)
833 833 if origvfs is None:
834 834 return filepath + ".orig"
835 835
836 836 # Convert filepath from an absolute path into a path inside the repo.
837 837 filepathfromroot = util.normpath(os.path.relpath(filepath,
838 838 start=repo.root))
839 839
840 840 origbackupdir = origvfs.dirname(filepathfromroot)
841 841 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
842 842 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
843 843
844 844 # Remove any files that conflict with the backup file's path
845 845 for f in reversed(list(util.finddirs(filepathfromroot))):
846 846 if origvfs.isfileorlink(f):
847 847 ui.note(_('removing conflicting file: %s\n')
848 848 % origvfs.join(f))
849 849 origvfs.unlink(f)
850 850 break
851 851
852 852 origvfs.makedirs(origbackupdir)
853 853
854 854 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
855 855 ui.note(_('removing conflicting directory: %s\n')
856 856 % origvfs.join(filepathfromroot))
857 857 origvfs.rmtree(filepathfromroot, forcibly=True)
858 858
859 859 return origvfs.join(filepathfromroot)
860 860
861 861 class _containsnode(object):
862 862 """proxy __contains__(node) to container.__contains__ which accepts revs"""
863 863
864 864 def __init__(self, repo, revcontainer):
865 865 self._torev = repo.changelog.rev
866 866 self._revcontains = revcontainer.__contains__
867 867
868 868 def __contains__(self, node):
869 869 return self._revcontains(self._torev(node))
870 870
871 871 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
872 872 fixphase=False, targetphase=None, backup=True):
873 873 """do common cleanups when old nodes are replaced by new nodes
874 874
875 875 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
876 876 (we might also want to move working directory parent in the future)
877 877
878 878 By default, bookmark moves are calculated automatically from 'replacements',
879 879 but 'moves' can be used to override that. Also, 'moves' may include
880 880 additional bookmark moves that should not have associated obsmarkers.
881 881
882 882 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
883 883 have replacements. operation is a string, like "rebase".
884 884
885 885 metadata is dictionary containing metadata to be stored in obsmarker if
886 886 obsolescence is enabled.
887 887 """
888 888 assert fixphase or targetphase is None
889 889 if not replacements and not moves:
890 890 return
891 891
892 892 # translate mapping's other forms
893 893 if not util.safehasattr(replacements, 'items'):
894 894 replacements = {(n,): () for n in replacements}
895 895 else:
896 896 # upgrading non tuple "source" to tuple ones for BC
897 897 repls = {}
898 898 for key, value in replacements.items():
899 899 if not isinstance(key, tuple):
900 900 key = (key,)
901 901 repls[key] = value
902 902 replacements = repls
903 903
904 # Unfiltered repo is needed since nodes in replacements might be hidden.
905 unfi = repo.unfiltered()
906
904 907 # Calculate bookmark movements
905 908 if moves is None:
906 909 moves = {}
907 # Unfiltered repo is needed since nodes in replacements might be hidden.
908 unfi = repo.unfiltered()
909 910 for oldnodes, newnodes in replacements.items():
910 911 for oldnode in oldnodes:
911 912 if oldnode in moves:
912 913 continue
913 914 if len(newnodes) > 1:
914 915 # usually a split, take the one with biggest rev number
915 916 newnode = next(unfi.set('max(%ln)', newnodes)).node()
916 917 elif len(newnodes) == 0:
917 918 # move bookmark backwards
918 919 allreplaced = []
919 920 for rep in replacements:
920 921 allreplaced.extend(rep)
921 922 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
922 923 allreplaced))
923 924 if roots:
924 925 newnode = roots[0].node()
925 926 else:
926 927 newnode = nullid
927 928 else:
928 929 newnode = newnodes[0]
929 930 moves[oldnode] = newnode
930 931
931 932 allnewnodes = [n for ns in replacements.values() for n in ns]
932 933 toretract = {}
933 934 toadvance = {}
934 935 if fixphase:
935 936 precursors = {}
936 937 for oldnodes, newnodes in replacements.items():
937 938 for oldnode in oldnodes:
938 939 for newnode in newnodes:
939 940 precursors.setdefault(newnode, []).append(oldnode)
940 941
941 942 allnewnodes.sort(key=lambda n: unfi[n].rev())
942 943 newphases = {}
943 944 def phase(ctx):
944 945 return newphases.get(ctx.node(), ctx.phase())
945 946 for newnode in allnewnodes:
946 947 ctx = unfi[newnode]
947 948 parentphase = max(phase(p) for p in ctx.parents())
948 949 if targetphase is None:
949 950 oldphase = max(unfi[oldnode].phase()
950 951 for oldnode in precursors[newnode])
951 952 newphase = max(oldphase, parentphase)
952 953 else:
953 954 newphase = max(targetphase, parentphase)
954 955 newphases[newnode] = newphase
955 956 if newphase > ctx.phase():
956 957 toretract.setdefault(newphase, []).append(newnode)
957 958 elif newphase < ctx.phase():
958 959 toadvance.setdefault(newphase, []).append(newnode)
959 960
960 961 with repo.transaction('cleanup') as tr:
961 962 # Move bookmarks
962 963 bmarks = repo._bookmarks
963 964 bmarkchanges = []
964 965 for oldnode, newnode in moves.items():
965 966 oldbmarks = repo.nodebookmarks(oldnode)
966 967 if not oldbmarks:
967 968 continue
968 969 from . import bookmarks # avoid import cycle
969 970 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
970 971 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
971 972 hex(oldnode), hex(newnode)))
972 973 # Delete divergent bookmarks being parents of related newnodes
973 974 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
974 975 allnewnodes, newnode, oldnode)
975 976 deletenodes = _containsnode(repo, deleterevs)
976 977 for name in oldbmarks:
977 978 bmarkchanges.append((name, newnode))
978 979 for b in bookmarks.divergent2delete(repo, deletenodes, name):
979 980 bmarkchanges.append((b, None))
980 981
981 982 if bmarkchanges:
982 983 bmarks.applychanges(repo, tr, bmarkchanges)
983 984
984 985 for phase, nodes in toretract.items():
985 986 phases.retractboundary(repo, tr, phase, nodes)
986 987 for phase, nodes in toadvance.items():
987 988 phases.advanceboundary(repo, tr, phase, nodes)
988 989
989 990 # Obsolete or strip nodes
990 991 if obsolete.isenabled(repo, obsolete.createmarkersopt):
991 992 # If a node is already obsoleted, and we want to obsolete it
992 993 # without a successor, skip that obssolete request since it's
993 994 # unnecessary. That's the "if s or not isobs(n)" check below.
994 995 # Also sort the node in topology order, that might be useful for
995 996 # some obsstore logic.
996 997 # NOTE: the sorting might belong to createmarkers.
997 998 torev = unfi.changelog.rev
998 999 sortfunc = lambda ns: torev(ns[0][0])
999 1000 rels = []
1000 1001 for ns, s in sorted(replacements.items(), key=sortfunc):
1001 1002 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1002 1003 rels.append(rel)
1003 1004 if rels:
1004 1005 obsolete.createmarkers(repo, rels, operation=operation,
1005 1006 metadata=metadata)
1006 1007 else:
1007 1008 from . import repair # avoid import cycle
1008 1009 tostrip = list(n for ns in replacements for n in ns)
1009 1010 if tostrip:
1010 1011 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1011 1012 backup=backup)
1012 1013
1013 1014 def addremove(repo, matcher, prefix, opts=None):
1014 1015 if opts is None:
1015 1016 opts = {}
1016 1017 m = matcher
1017 1018 dry_run = opts.get('dry_run')
1018 1019 try:
1019 1020 similarity = float(opts.get('similarity') or 0)
1020 1021 except ValueError:
1021 1022 raise error.Abort(_('similarity must be a number'))
1022 1023 if similarity < 0 or similarity > 100:
1023 1024 raise error.Abort(_('similarity must be between 0 and 100'))
1024 1025 similarity /= 100.0
1025 1026
1026 1027 ret = 0
1027 1028 join = lambda f: os.path.join(prefix, f)
1028 1029
1029 1030 wctx = repo[None]
1030 1031 for subpath in sorted(wctx.substate):
1031 1032 submatch = matchmod.subdirmatcher(subpath, m)
1032 1033 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1033 1034 sub = wctx.sub(subpath)
1034 1035 try:
1035 1036 if sub.addremove(submatch, prefix, opts):
1036 1037 ret = 1
1037 1038 except error.LookupError:
1038 1039 repo.ui.status(_("skipping missing subrepository: %s\n")
1039 1040 % join(subpath))
1040 1041
1041 1042 rejected = []
1042 1043 def badfn(f, msg):
1043 1044 if f in m.files():
1044 1045 m.bad(f, msg)
1045 1046 rejected.append(f)
1046 1047
1047 1048 badmatch = matchmod.badmatch(m, badfn)
1048 1049 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1049 1050 badmatch)
1050 1051
1051 1052 unknownset = set(unknown + forgotten)
1052 1053 toprint = unknownset.copy()
1053 1054 toprint.update(deleted)
1054 1055 for abs in sorted(toprint):
1055 1056 if repo.ui.verbose or not m.exact(abs):
1056 1057 if abs in unknownset:
1057 1058 status = _('adding %s\n') % m.uipath(abs)
1058 1059 label = 'ui.addremove.added'
1059 1060 else:
1060 1061 status = _('removing %s\n') % m.uipath(abs)
1061 1062 label = 'ui.addremove.removed'
1062 1063 repo.ui.status(status, label=label)
1063 1064
1064 1065 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1065 1066 similarity)
1066 1067
1067 1068 if not dry_run:
1068 1069 _markchanges(repo, unknown + forgotten, deleted, renames)
1069 1070
1070 1071 for f in rejected:
1071 1072 if f in m.files():
1072 1073 return 1
1073 1074 return ret
1074 1075
1075 1076 def marktouched(repo, files, similarity=0.0):
1076 1077 '''Assert that files have somehow been operated upon. files are relative to
1077 1078 the repo root.'''
1078 1079 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1079 1080 rejected = []
1080 1081
1081 1082 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1082 1083
1083 1084 if repo.ui.verbose:
1084 1085 unknownset = set(unknown + forgotten)
1085 1086 toprint = unknownset.copy()
1086 1087 toprint.update(deleted)
1087 1088 for abs in sorted(toprint):
1088 1089 if abs in unknownset:
1089 1090 status = _('adding %s\n') % abs
1090 1091 else:
1091 1092 status = _('removing %s\n') % abs
1092 1093 repo.ui.status(status)
1093 1094
1094 1095 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1095 1096 similarity)
1096 1097
1097 1098 _markchanges(repo, unknown + forgotten, deleted, renames)
1098 1099
1099 1100 for f in rejected:
1100 1101 if f in m.files():
1101 1102 return 1
1102 1103 return 0
1103 1104
1104 1105 def _interestingfiles(repo, matcher):
1105 1106 '''Walk dirstate with matcher, looking for files that addremove would care
1106 1107 about.
1107 1108
1108 1109 This is different from dirstate.status because it doesn't care about
1109 1110 whether files are modified or clean.'''
1110 1111 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1111 1112 audit_path = pathutil.pathauditor(repo.root, cached=True)
1112 1113
1113 1114 ctx = repo[None]
1114 1115 dirstate = repo.dirstate
1115 1116 matcher = repo.narrowmatch(matcher, includeexact=True)
1116 1117 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1117 1118 unknown=True, ignored=False, full=False)
1118 1119 for abs, st in walkresults.iteritems():
1119 1120 dstate = dirstate[abs]
1120 1121 if dstate == '?' and audit_path.check(abs):
1121 1122 unknown.append(abs)
1122 1123 elif dstate != 'r' and not st:
1123 1124 deleted.append(abs)
1124 1125 elif dstate == 'r' and st:
1125 1126 forgotten.append(abs)
1126 1127 # for finding renames
1127 1128 elif dstate == 'r' and not st:
1128 1129 removed.append(abs)
1129 1130 elif dstate == 'a':
1130 1131 added.append(abs)
1131 1132
1132 1133 return added, unknown, deleted, removed, forgotten
1133 1134
1134 1135 def _findrenames(repo, matcher, added, removed, similarity):
1135 1136 '''Find renames from removed files to added ones.'''
1136 1137 renames = {}
1137 1138 if similarity > 0:
1138 1139 for old, new, score in similar.findrenames(repo, added, removed,
1139 1140 similarity):
1140 1141 if (repo.ui.verbose or not matcher.exact(old)
1141 1142 or not matcher.exact(new)):
1142 1143 repo.ui.status(_('recording removal of %s as rename to %s '
1143 1144 '(%d%% similar)\n') %
1144 1145 (matcher.rel(old), matcher.rel(new),
1145 1146 score * 100))
1146 1147 renames[new] = old
1147 1148 return renames
1148 1149
1149 1150 def _markchanges(repo, unknown, deleted, renames):
1150 1151 '''Marks the files in unknown as added, the files in deleted as removed,
1151 1152 and the files in renames as copied.'''
1152 1153 wctx = repo[None]
1153 1154 with repo.wlock():
1154 1155 wctx.forget(deleted)
1155 1156 wctx.add(unknown)
1156 1157 for new, old in renames.iteritems():
1157 1158 wctx.copy(old, new)
1158 1159
1159 1160 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1160 1161 """Update the dirstate to reflect the intent of copying src to dst. For
1161 1162 different reasons it might not end with dst being marked as copied from src.
1162 1163 """
1163 1164 origsrc = repo.dirstate.copied(src) or src
1164 1165 if dst == origsrc: # copying back a copy?
1165 1166 if repo.dirstate[dst] not in 'mn' and not dryrun:
1166 1167 repo.dirstate.normallookup(dst)
1167 1168 else:
1168 1169 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1169 1170 if not ui.quiet:
1170 1171 ui.warn(_("%s has not been committed yet, so no copy "
1171 1172 "data will be stored for %s.\n")
1172 1173 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1173 1174 if repo.dirstate[dst] in '?r' and not dryrun:
1174 1175 wctx.add([dst])
1175 1176 elif not dryrun:
1176 1177 wctx.copy(origsrc, dst)
1177 1178
1178 1179 def writerequires(opener, requirements):
1179 1180 with opener('requires', 'w', atomictemp=True) as fp:
1180 1181 for r in sorted(requirements):
1181 1182 fp.write("%s\n" % r)
1182 1183
1183 1184 class filecachesubentry(object):
1184 1185 def __init__(self, path, stat):
1185 1186 self.path = path
1186 1187 self.cachestat = None
1187 1188 self._cacheable = None
1188 1189
1189 1190 if stat:
1190 1191 self.cachestat = filecachesubentry.stat(self.path)
1191 1192
1192 1193 if self.cachestat:
1193 1194 self._cacheable = self.cachestat.cacheable()
1194 1195 else:
1195 1196 # None means we don't know yet
1196 1197 self._cacheable = None
1197 1198
1198 1199 def refresh(self):
1199 1200 if self.cacheable():
1200 1201 self.cachestat = filecachesubentry.stat(self.path)
1201 1202
1202 1203 def cacheable(self):
1203 1204 if self._cacheable is not None:
1204 1205 return self._cacheable
1205 1206
1206 1207 # we don't know yet, assume it is for now
1207 1208 return True
1208 1209
1209 1210 def changed(self):
1210 1211 # no point in going further if we can't cache it
1211 1212 if not self.cacheable():
1212 1213 return True
1213 1214
1214 1215 newstat = filecachesubentry.stat(self.path)
1215 1216
1216 1217 # we may not know if it's cacheable yet, check again now
1217 1218 if newstat and self._cacheable is None:
1218 1219 self._cacheable = newstat.cacheable()
1219 1220
1220 1221 # check again
1221 1222 if not self._cacheable:
1222 1223 return True
1223 1224
1224 1225 if self.cachestat != newstat:
1225 1226 self.cachestat = newstat
1226 1227 return True
1227 1228 else:
1228 1229 return False
1229 1230
1230 1231 @staticmethod
1231 1232 def stat(path):
1232 1233 try:
1233 1234 return util.cachestat(path)
1234 1235 except OSError as e:
1235 1236 if e.errno != errno.ENOENT:
1236 1237 raise
1237 1238
1238 1239 class filecacheentry(object):
1239 1240 def __init__(self, paths, stat=True):
1240 1241 self._entries = []
1241 1242 for path in paths:
1242 1243 self._entries.append(filecachesubentry(path, stat))
1243 1244
1244 1245 def changed(self):
1245 1246 '''true if any entry has changed'''
1246 1247 for entry in self._entries:
1247 1248 if entry.changed():
1248 1249 return True
1249 1250 return False
1250 1251
1251 1252 def refresh(self):
1252 1253 for entry in self._entries:
1253 1254 entry.refresh()
1254 1255
1255 1256 class filecache(object):
1256 1257 """A property like decorator that tracks files under .hg/ for updates.
1257 1258
1258 1259 On first access, the files defined as arguments are stat()ed and the
1259 1260 results cached. The decorated function is called. The results are stashed
1260 1261 away in a ``_filecache`` dict on the object whose method is decorated.
1261 1262
1262 1263 On subsequent access, the cached result is used as it is set to the
1263 1264 instance dictionary.
1264 1265
1265 1266 On external property set/delete operations, the caller must update the
1266 1267 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1267 1268 instead of directly setting <attr>.
1268 1269
1269 1270 When using the property API, the cached data is always used if available.
1270 1271 No stat() is performed to check if the file has changed.
1271 1272
1272 1273 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1273 1274 can populate an entry before the property's getter is called. In this case,
1274 1275 entries in ``_filecache`` will be used during property operations,
1275 1276 if available. If the underlying file changes, it is up to external callers
1276 1277 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1277 1278 method result as well as possibly calling ``del obj._filecache[attr]`` to
1278 1279 remove the ``filecacheentry``.
1279 1280 """
1280 1281
1281 1282 def __init__(self, *paths):
1282 1283 self.paths = paths
1283 1284
1284 1285 def join(self, obj, fname):
1285 1286 """Used to compute the runtime path of a cached file.
1286 1287
1287 1288 Users should subclass filecache and provide their own version of this
1288 1289 function to call the appropriate join function on 'obj' (an instance
1289 1290 of the class that its member function was decorated).
1290 1291 """
1291 1292 raise NotImplementedError
1292 1293
1293 1294 def __call__(self, func):
1294 1295 self.func = func
1295 1296 self.sname = func.__name__
1296 1297 self.name = pycompat.sysbytes(self.sname)
1297 1298 return self
1298 1299
1299 1300 def __get__(self, obj, type=None):
1300 1301 # if accessed on the class, return the descriptor itself.
1301 1302 if obj is None:
1302 1303 return self
1303 1304
1304 1305 assert self.sname not in obj.__dict__
1305 1306
1306 1307 entry = obj._filecache.get(self.name)
1307 1308
1308 1309 if entry:
1309 1310 if entry.changed():
1310 1311 entry.obj = self.func(obj)
1311 1312 else:
1312 1313 paths = [self.join(obj, path) for path in self.paths]
1313 1314
1314 1315 # We stat -before- creating the object so our cache doesn't lie if
1315 1316 # a writer modified between the time we read and stat
1316 1317 entry = filecacheentry(paths, True)
1317 1318 entry.obj = self.func(obj)
1318 1319
1319 1320 obj._filecache[self.name] = entry
1320 1321
1321 1322 obj.__dict__[self.sname] = entry.obj
1322 1323 return entry.obj
1323 1324
1324 1325 # don't implement __set__(), which would make __dict__ lookup as slow as
1325 1326 # function call.
1326 1327
1327 1328 def set(self, obj, value):
1328 1329 if self.name not in obj._filecache:
1329 1330 # we add an entry for the missing value because X in __dict__
1330 1331 # implies X in _filecache
1331 1332 paths = [self.join(obj, path) for path in self.paths]
1332 1333 ce = filecacheentry(paths, False)
1333 1334 obj._filecache[self.name] = ce
1334 1335 else:
1335 1336 ce = obj._filecache[self.name]
1336 1337
1337 1338 ce.obj = value # update cached copy
1338 1339 obj.__dict__[self.sname] = value # update copy returned by obj.x
1339 1340
1340 1341 def extdatasource(repo, source):
1341 1342 """Gather a map of rev -> value dict from the specified source
1342 1343
1343 1344 A source spec is treated as a URL, with a special case shell: type
1344 1345 for parsing the output from a shell command.
1345 1346
1346 1347 The data is parsed as a series of newline-separated records where
1347 1348 each record is a revision specifier optionally followed by a space
1348 1349 and a freeform string value. If the revision is known locally, it
1349 1350 is converted to a rev, otherwise the record is skipped.
1350 1351
1351 1352 Note that both key and value are treated as UTF-8 and converted to
1352 1353 the local encoding. This allows uniformity between local and
1353 1354 remote data sources.
1354 1355 """
1355 1356
1356 1357 spec = repo.ui.config("extdata", source)
1357 1358 if not spec:
1358 1359 raise error.Abort(_("unknown extdata source '%s'") % source)
1359 1360
1360 1361 data = {}
1361 1362 src = proc = None
1362 1363 try:
1363 1364 if spec.startswith("shell:"):
1364 1365 # external commands should be run relative to the repo root
1365 1366 cmd = spec[6:]
1366 1367 proc = subprocess.Popen(procutil.tonativestr(cmd),
1367 1368 shell=True, bufsize=-1,
1368 1369 close_fds=procutil.closefds,
1369 1370 stdout=subprocess.PIPE,
1370 1371 cwd=procutil.tonativestr(repo.root))
1371 1372 src = proc.stdout
1372 1373 else:
1373 1374 # treat as a URL or file
1374 1375 src = url.open(repo.ui, spec)
1375 1376 for l in src:
1376 1377 if " " in l:
1377 1378 k, v = l.strip().split(" ", 1)
1378 1379 else:
1379 1380 k, v = l.strip(), ""
1380 1381
1381 1382 k = encoding.tolocal(k)
1382 1383 try:
1383 1384 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1384 1385 except (error.LookupError, error.RepoLookupError):
1385 1386 pass # we ignore data for nodes that don't exist locally
1386 1387 finally:
1387 1388 if proc:
1388 1389 proc.communicate()
1389 1390 if src:
1390 1391 src.close()
1391 1392 if proc and proc.returncode != 0:
1392 1393 raise error.Abort(_("extdata command '%s' failed: %s")
1393 1394 % (cmd, procutil.explainexit(proc.returncode)))
1394 1395
1395 1396 return data
1396 1397
1397 1398 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1398 1399 if lock is None:
1399 1400 raise error.LockInheritanceContractViolation(
1400 1401 'lock can only be inherited while held')
1401 1402 if environ is None:
1402 1403 environ = {}
1403 1404 with lock.inherit() as locker:
1404 1405 environ[envvar] = locker
1405 1406 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1406 1407
1407 1408 def wlocksub(repo, cmd, *args, **kwargs):
1408 1409 """run cmd as a subprocess that allows inheriting repo's wlock
1409 1410
1410 1411 This can only be called while the wlock is held. This takes all the
1411 1412 arguments that ui.system does, and returns the exit code of the
1412 1413 subprocess."""
1413 1414 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1414 1415 **kwargs)
1415 1416
1416 1417 class progress(object):
1417 1418 def __init__(self, ui, topic, unit="", total=None):
1418 1419 self.ui = ui
1419 1420 self.pos = 0
1420 1421 self.topic = topic
1421 1422 self.unit = unit
1422 1423 self.total = total
1423 1424
1424 1425 def __enter__(self):
1425 1426 return self
1426 1427
1427 1428 def __exit__(self, exc_type, exc_value, exc_tb):
1428 1429 self.complete()
1429 1430
1430 1431 def update(self, pos, item="", total=None):
1431 1432 assert pos is not None
1432 1433 if total:
1433 1434 self.total = total
1434 1435 self.pos = pos
1435 1436 self._print(item)
1436 1437
1437 1438 def increment(self, step=1, item="", total=None):
1438 1439 self.update(self.pos + step, item, total)
1439 1440
1440 1441 def complete(self):
1441 1442 self.ui.progress(self.topic, None)
1442 1443
1443 1444 def _print(self, item):
1444 1445 self.ui.progress(self.topic, self.pos, item, self.unit,
1445 1446 self.total)
1446 1447
1447 1448 def gdinitconfig(ui):
1448 1449 """helper function to know if a repo should be created as general delta
1449 1450 """
1450 1451 # experimental config: format.generaldelta
1451 1452 return (ui.configbool('format', 'generaldelta')
1452 1453 or ui.configbool('format', 'usegeneraldelta')
1453 1454 or ui.configbool('format', 'sparse-revlog'))
1454 1455
1455 1456 def gddeltaconfig(ui):
1456 1457 """helper function to know if incoming delta should be optimised
1457 1458 """
1458 1459 # experimental config: format.generaldelta
1459 1460 return ui.configbool('format', 'generaldelta')
1460 1461
1461 1462 class simplekeyvaluefile(object):
1462 1463 """A simple file with key=value lines
1463 1464
1464 1465 Keys must be alphanumerics and start with a letter, values must not
1465 1466 contain '\n' characters"""
1466 1467 firstlinekey = '__firstline'
1467 1468
1468 1469 def __init__(self, vfs, path, keys=None):
1469 1470 self.vfs = vfs
1470 1471 self.path = path
1471 1472
1472 1473 def read(self, firstlinenonkeyval=False):
1473 1474 """Read the contents of a simple key-value file
1474 1475
1475 1476 'firstlinenonkeyval' indicates whether the first line of file should
1476 1477 be treated as a key-value pair or reuturned fully under the
1477 1478 __firstline key."""
1478 1479 lines = self.vfs.readlines(self.path)
1479 1480 d = {}
1480 1481 if firstlinenonkeyval:
1481 1482 if not lines:
1482 1483 e = _("empty simplekeyvalue file")
1483 1484 raise error.CorruptedState(e)
1484 1485 # we don't want to include '\n' in the __firstline
1485 1486 d[self.firstlinekey] = lines[0][:-1]
1486 1487 del lines[0]
1487 1488
1488 1489 try:
1489 1490 # the 'if line.strip()' part prevents us from failing on empty
1490 1491 # lines which only contain '\n' therefore are not skipped
1491 1492 # by 'if line'
1492 1493 updatedict = dict(line[:-1].split('=', 1) for line in lines
1493 1494 if line.strip())
1494 1495 if self.firstlinekey in updatedict:
1495 1496 e = _("%r can't be used as a key")
1496 1497 raise error.CorruptedState(e % self.firstlinekey)
1497 1498 d.update(updatedict)
1498 1499 except ValueError as e:
1499 1500 raise error.CorruptedState(str(e))
1500 1501 return d
1501 1502
1502 1503 def write(self, data, firstline=None):
1503 1504 """Write key=>value mapping to a file
1504 1505 data is a dict. Keys must be alphanumerical and start with a letter.
1505 1506 Values must not contain newline characters.
1506 1507
1507 1508 If 'firstline' is not None, it is written to file before
1508 1509 everything else, as it is, not in a key=value form"""
1509 1510 lines = []
1510 1511 if firstline is not None:
1511 1512 lines.append('%s\n' % firstline)
1512 1513
1513 1514 for k, v in data.items():
1514 1515 if k == self.firstlinekey:
1515 1516 e = "key name '%s' is reserved" % self.firstlinekey
1516 1517 raise error.ProgrammingError(e)
1517 1518 if not k[0:1].isalpha():
1518 1519 e = "keys must start with a letter in a key-value file"
1519 1520 raise error.ProgrammingError(e)
1520 1521 if not k.isalnum():
1521 1522 e = "invalid key name in a simple key-value file"
1522 1523 raise error.ProgrammingError(e)
1523 1524 if '\n' in v:
1524 1525 e = "invalid value in a simple key-value file"
1525 1526 raise error.ProgrammingError(e)
1526 1527 lines.append("%s=%s\n" % (k, v))
1527 1528 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1528 1529 fp.write(''.join(lines))
1529 1530
1530 1531 _reportobsoletedsource = [
1531 1532 'debugobsolete',
1532 1533 'pull',
1533 1534 'push',
1534 1535 'serve',
1535 1536 'unbundle',
1536 1537 ]
1537 1538
1538 1539 _reportnewcssource = [
1539 1540 'pull',
1540 1541 'unbundle',
1541 1542 ]
1542 1543
1543 1544 def prefetchfiles(repo, revs, match):
1544 1545 """Invokes the registered file prefetch functions, allowing extensions to
1545 1546 ensure the corresponding files are available locally, before the command
1546 1547 uses them."""
1547 1548 if match:
1548 1549 # The command itself will complain about files that don't exist, so
1549 1550 # don't duplicate the message.
1550 1551 match = matchmod.badmatch(match, lambda fn, msg: None)
1551 1552 else:
1552 1553 match = matchall(repo)
1553 1554
1554 1555 fileprefetchhooks(repo, revs, match)
1555 1556
1556 1557 # a list of (repo, revs, match) prefetch functions
1557 1558 fileprefetchhooks = util.hooks()
1558 1559
1559 1560 # A marker that tells the evolve extension to suppress its own reporting
1560 1561 _reportstroubledchangesets = True
1561 1562
1562 1563 def registersummarycallback(repo, otr, txnname=''):
1563 1564 """register a callback to issue a summary after the transaction is closed
1564 1565 """
1565 1566 def txmatch(sources):
1566 1567 return any(txnname.startswith(source) for source in sources)
1567 1568
1568 1569 categories = []
1569 1570
1570 1571 def reportsummary(func):
1571 1572 """decorator for report callbacks."""
1572 1573 # The repoview life cycle is shorter than the one of the actual
1573 1574 # underlying repository. So the filtered object can die before the
1574 1575 # weakref is used leading to troubles. We keep a reference to the
1575 1576 # unfiltered object and restore the filtering when retrieving the
1576 1577 # repository through the weakref.
1577 1578 filtername = repo.filtername
1578 1579 reporef = weakref.ref(repo.unfiltered())
1579 1580 def wrapped(tr):
1580 1581 repo = reporef()
1581 1582 if filtername:
1582 1583 repo = repo.filtered(filtername)
1583 1584 func(repo, tr)
1584 1585 newcat = '%02i-txnreport' % len(categories)
1585 1586 otr.addpostclose(newcat, wrapped)
1586 1587 categories.append(newcat)
1587 1588 return wrapped
1588 1589
1589 1590 if txmatch(_reportobsoletedsource):
1590 1591 @reportsummary
1591 1592 def reportobsoleted(repo, tr):
1592 1593 obsoleted = obsutil.getobsoleted(repo, tr)
1593 1594 if obsoleted:
1594 1595 repo.ui.status(_('obsoleted %i changesets\n')
1595 1596 % len(obsoleted))
1596 1597
1597 1598 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1598 1599 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1599 1600 instabilitytypes = [
1600 1601 ('orphan', 'orphan'),
1601 1602 ('phase-divergent', 'phasedivergent'),
1602 1603 ('content-divergent', 'contentdivergent'),
1603 1604 ]
1604 1605
1605 1606 def getinstabilitycounts(repo):
1606 1607 filtered = repo.changelog.filteredrevs
1607 1608 counts = {}
1608 1609 for instability, revset in instabilitytypes:
1609 1610 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1610 1611 filtered)
1611 1612 return counts
1612 1613
1613 1614 oldinstabilitycounts = getinstabilitycounts(repo)
1614 1615 @reportsummary
1615 1616 def reportnewinstabilities(repo, tr):
1616 1617 newinstabilitycounts = getinstabilitycounts(repo)
1617 1618 for instability, revset in instabilitytypes:
1618 1619 delta = (newinstabilitycounts[instability] -
1619 1620 oldinstabilitycounts[instability])
1620 1621 msg = getinstabilitymessage(delta, instability)
1621 1622 if msg:
1622 1623 repo.ui.warn(msg)
1623 1624
1624 1625 if txmatch(_reportnewcssource):
1625 1626 @reportsummary
1626 1627 def reportnewcs(repo, tr):
1627 1628 """Report the range of new revisions pulled/unbundled."""
1628 1629 origrepolen = tr.changes.get('origrepolen', len(repo))
1629 1630 unfi = repo.unfiltered()
1630 1631 if origrepolen >= len(unfi):
1631 1632 return
1632 1633
1633 1634 # Compute the bounds of new visible revisions' range.
1634 1635 revs = smartset.spanset(repo, start=origrepolen)
1635 1636 if revs:
1636 1637 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1637 1638
1638 1639 if minrev == maxrev:
1639 1640 revrange = minrev
1640 1641 else:
1641 1642 revrange = '%s:%s' % (minrev, maxrev)
1642 1643 draft = len(repo.revs('%ld and draft()', revs))
1643 1644 secret = len(repo.revs('%ld and secret()', revs))
1644 1645 if not (draft or secret):
1645 1646 msg = _('new changesets %s\n') % revrange
1646 1647 elif draft and secret:
1647 1648 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1648 1649 msg %= (revrange, draft, secret)
1649 1650 elif draft:
1650 1651 msg = _('new changesets %s (%d drafts)\n')
1651 1652 msg %= (revrange, draft)
1652 1653 elif secret:
1653 1654 msg = _('new changesets %s (%d secrets)\n')
1654 1655 msg %= (revrange, secret)
1655 1656 else:
1656 1657 errormsg = 'entered unreachable condition'
1657 1658 raise error.ProgrammingError(errormsg)
1658 1659 repo.ui.status(msg)
1659 1660
1660 1661 # search new changesets directly pulled as obsolete
1661 1662 duplicates = tr.changes.get('revduplicates', ())
1662 1663 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1663 1664 origrepolen, duplicates)
1664 1665 cl = repo.changelog
1665 1666 extinctadded = [r for r in obsadded if r not in cl]
1666 1667 if extinctadded:
1667 1668 # They are not just obsolete, but obsolete and invisible
1668 1669 # we call them "extinct" internally but the terms have not been
1669 1670 # exposed to users.
1670 1671 msg = '(%d other changesets obsolete on arrival)\n'
1671 1672 repo.ui.status(msg % len(extinctadded))
1672 1673
1673 1674 @reportsummary
1674 1675 def reportphasechanges(repo, tr):
1675 1676 """Report statistics of phase changes for changesets pre-existing
1676 1677 pull/unbundle.
1677 1678 """
1678 1679 origrepolen = tr.changes.get('origrepolen', len(repo))
1679 1680 phasetracking = tr.changes.get('phases', {})
1680 1681 if not phasetracking:
1681 1682 return
1682 1683 published = [
1683 1684 rev for rev, (old, new) in phasetracking.iteritems()
1684 1685 if new == phases.public and rev < origrepolen
1685 1686 ]
1686 1687 if not published:
1687 1688 return
1688 1689 repo.ui.status(_('%d local changesets published\n')
1689 1690 % len(published))
1690 1691
1691 1692 def getinstabilitymessage(delta, instability):
1692 1693 """function to return the message to show warning about new instabilities
1693 1694
1694 1695 exists as a separate function so that extension can wrap to show more
1695 1696 information like how to fix instabilities"""
1696 1697 if delta > 0:
1697 1698 return _('%i new %s changesets\n') % (delta, instability)
1698 1699
1699 1700 def nodesummaries(repo, nodes, maxnumnodes=4):
1700 1701 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1701 1702 return ' '.join(short(h) for h in nodes)
1702 1703 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1703 1704 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1704 1705
1705 1706 def enforcesinglehead(repo, tr, desc):
1706 1707 """check that no named branch has multiple heads"""
1707 1708 if desc in ('strip', 'repair'):
1708 1709 # skip the logic during strip
1709 1710 return
1710 1711 visible = repo.filtered('visible')
1711 1712 # possible improvement: we could restrict the check to affected branch
1712 1713 for name, heads in visible.branchmap().iteritems():
1713 1714 if len(heads) > 1:
1714 1715 msg = _('rejecting multiple heads on branch "%s"')
1715 1716 msg %= name
1716 1717 hint = _('%d heads: %s')
1717 1718 hint %= (len(heads), nodesummaries(repo, heads))
1718 1719 raise error.Abort(msg, hint=hint)
1719 1720
1720 1721 def wrapconvertsink(sink):
1721 1722 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1722 1723 before it is used, whether or not the convert extension was formally loaded.
1723 1724 """
1724 1725 return sink
1725 1726
1726 1727 def unhidehashlikerevs(repo, specs, hiddentype):
1727 1728 """parse the user specs and unhide changesets whose hash or revision number
1728 1729 is passed.
1729 1730
1730 1731 hiddentype can be: 1) 'warn': warn while unhiding changesets
1731 1732 2) 'nowarn': don't warn while unhiding changesets
1732 1733
1733 1734 returns a repo object with the required changesets unhidden
1734 1735 """
1735 1736 if not repo.filtername or not repo.ui.configbool('experimental',
1736 1737 'directaccess'):
1737 1738 return repo
1738 1739
1739 1740 if repo.filtername not in ('visible', 'visible-hidden'):
1740 1741 return repo
1741 1742
1742 1743 symbols = set()
1743 1744 for spec in specs:
1744 1745 try:
1745 1746 tree = revsetlang.parse(spec)
1746 1747 except error.ParseError: # will be reported by scmutil.revrange()
1747 1748 continue
1748 1749
1749 1750 symbols.update(revsetlang.gethashlikesymbols(tree))
1750 1751
1751 1752 if not symbols:
1752 1753 return repo
1753 1754
1754 1755 revs = _getrevsfromsymbols(repo, symbols)
1755 1756
1756 1757 if not revs:
1757 1758 return repo
1758 1759
1759 1760 if hiddentype == 'warn':
1760 1761 unfi = repo.unfiltered()
1761 1762 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1762 1763 repo.ui.warn(_("warning: accessing hidden changesets for write "
1763 1764 "operation: %s\n") % revstr)
1764 1765
1765 1766 # we have to use new filtername to separate branch/tags cache until we can
1766 1767 # disbale these cache when revisions are dynamically pinned.
1767 1768 return repo.filtered('visible-hidden', revs)
1768 1769
1769 1770 def _getrevsfromsymbols(repo, symbols):
1770 1771 """parse the list of symbols and returns a set of revision numbers of hidden
1771 1772 changesets present in symbols"""
1772 1773 revs = set()
1773 1774 unfi = repo.unfiltered()
1774 1775 unficl = unfi.changelog
1775 1776 cl = repo.changelog
1776 1777 tiprev = len(unficl)
1777 1778 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1778 1779 for s in symbols:
1779 1780 try:
1780 1781 n = int(s)
1781 1782 if n <= tiprev:
1782 1783 if not allowrevnums:
1783 1784 continue
1784 1785 else:
1785 1786 if n not in cl:
1786 1787 revs.add(n)
1787 1788 continue
1788 1789 except ValueError:
1789 1790 pass
1790 1791
1791 1792 try:
1792 1793 s = resolvehexnodeidprefix(unfi, s)
1793 1794 except (error.LookupError, error.WdirUnsupported):
1794 1795 s = None
1795 1796
1796 1797 if s is not None:
1797 1798 rev = unficl.rev(s)
1798 1799 if rev not in cl:
1799 1800 revs.add(rev)
1800 1801
1801 1802 return revs
1802 1803
1803 1804 def bookmarkrevs(repo, mark):
1804 1805 """
1805 1806 Select revisions reachable by a given bookmark
1806 1807 """
1807 1808 return repo.revs("ancestors(bookmark(%s)) - "
1808 1809 "ancestors(head() and not bookmark(%s)) - "
1809 1810 "ancestors(bookmark() and not bookmark(%s))",
1810 1811 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now