##// END OF EJS Templates
dispatch: add inline comment about possible IOError subtypes...
Yuya Nishihara -
r41464:b5169e79 default
parent child Browse files
Show More
@@ -1,1815 +1,1815
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28
29 29 from . import (
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 policy,
38 38 pycompat,
39 39 revsetlang,
40 40 similar,
41 41 smartset,
42 42 url,
43 43 util,
44 44 vfs,
45 45 )
46 46
47 47 from .utils import (
48 48 procutil,
49 49 stringutil,
50 50 )
51 51
52 52 if pycompat.iswindows:
53 53 from . import scmwindows as scmplatform
54 54 else:
55 55 from . import scmposix as scmplatform
56 56
57 57 parsers = policy.importmod(r'parsers')
58 58
59 59 termsize = scmplatform.termsize
60 60
61 61 class status(tuple):
62 62 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
63 63 and 'ignored' properties are only relevant to the working copy.
64 64 '''
65 65
66 66 __slots__ = ()
67 67
68 68 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
69 69 clean):
70 70 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
71 71 ignored, clean))
72 72
73 73 @property
74 74 def modified(self):
75 75 '''files that have been modified'''
76 76 return self[0]
77 77
78 78 @property
79 79 def added(self):
80 80 '''files that have been added'''
81 81 return self[1]
82 82
83 83 @property
84 84 def removed(self):
85 85 '''files that have been removed'''
86 86 return self[2]
87 87
88 88 @property
89 89 def deleted(self):
90 90 '''files that are in the dirstate, but have been deleted from the
91 91 working copy (aka "missing")
92 92 '''
93 93 return self[3]
94 94
95 95 @property
96 96 def unknown(self):
97 97 '''files not in the dirstate that are not ignored'''
98 98 return self[4]
99 99
100 100 @property
101 101 def ignored(self):
102 102 '''files not in the dirstate that are ignored (by _dirignore())'''
103 103 return self[5]
104 104
105 105 @property
106 106 def clean(self):
107 107 '''files that have not been modified'''
108 108 return self[6]
109 109
110 110 def __repr__(self, *args, **kwargs):
111 111 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
112 112 r'unknown=%s, ignored=%s, clean=%s>') %
113 113 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
114 114
115 115 def itersubrepos(ctx1, ctx2):
116 116 """find subrepos in ctx1 or ctx2"""
117 117 # Create a (subpath, ctx) mapping where we prefer subpaths from
118 118 # ctx1. The subpaths from ctx2 are important when the .hgsub file
119 119 # has been modified (in ctx2) but not yet committed (in ctx1).
120 120 subpaths = dict.fromkeys(ctx2.substate, ctx2)
121 121 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
122 122
123 123 missing = set()
124 124
125 125 for subpath in ctx2.substate:
126 126 if subpath not in ctx1.substate:
127 127 del subpaths[subpath]
128 128 missing.add(subpath)
129 129
130 130 for subpath, ctx in sorted(subpaths.iteritems()):
131 131 yield subpath, ctx.sub(subpath)
132 132
133 133 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
134 134 # status and diff will have an accurate result when it does
135 135 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
136 136 # against itself.
137 137 for subpath in missing:
138 138 yield subpath, ctx2.nullsub(subpath, ctx1)
139 139
140 140 def nochangesfound(ui, repo, excluded=None):
141 141 '''Report no changes for push/pull, excluded is None or a list of
142 142 nodes excluded from the push/pull.
143 143 '''
144 144 secretlist = []
145 145 if excluded:
146 146 for n in excluded:
147 147 ctx = repo[n]
148 148 if ctx.phase() >= phases.secret and not ctx.extinct():
149 149 secretlist.append(n)
150 150
151 151 if secretlist:
152 152 ui.status(_("no changes found (ignored %d secret changesets)\n")
153 153 % len(secretlist))
154 154 else:
155 155 ui.status(_("no changes found\n"))
156 156
157 157 def callcatch(ui, func):
158 158 """call func() with global exception handling
159 159
160 160 return func() if no exception happens. otherwise do some error handling
161 161 and return an exit code accordingly. does not handle all exceptions.
162 162 """
163 163 try:
164 164 try:
165 165 return func()
166 166 except: # re-raises
167 167 ui.traceback()
168 168 raise
169 169 # Global exception handling, alphabetically
170 170 # Mercurial-specific first, followed by built-in and library exceptions
171 171 except error.LockHeld as inst:
172 172 if inst.errno == errno.ETIMEDOUT:
173 173 reason = _('timed out waiting for lock held by %r') % (
174 174 pycompat.bytestr(inst.locker))
175 175 else:
176 176 reason = _('lock held by %r') % inst.locker
177 177 ui.error(_("abort: %s: %s\n") % (
178 178 inst.desc or stringutil.forcebytestr(inst.filename), reason))
179 179 if not inst.locker:
180 180 ui.error(_("(lock might be very busy)\n"))
181 181 except error.LockUnavailable as inst:
182 182 ui.error(_("abort: could not lock %s: %s\n") %
183 183 (inst.desc or stringutil.forcebytestr(inst.filename),
184 184 encoding.strtolocal(inst.strerror)))
185 185 except error.OutOfBandError as inst:
186 186 if inst.args:
187 187 msg = _("abort: remote error:\n")
188 188 else:
189 189 msg = _("abort: remote error\n")
190 190 ui.error(msg)
191 191 if inst.args:
192 192 ui.error(''.join(inst.args))
193 193 if inst.hint:
194 194 ui.error('(%s)\n' % inst.hint)
195 195 except error.RepoError as inst:
196 196 ui.error(_("abort: %s!\n") % inst)
197 197 if inst.hint:
198 198 ui.error(_("(%s)\n") % inst.hint)
199 199 except error.ResponseError as inst:
200 200 ui.error(_("abort: %s") % inst.args[0])
201 201 msg = inst.args[1]
202 202 if isinstance(msg, type(u'')):
203 203 msg = pycompat.sysbytes(msg)
204 204 if not isinstance(msg, bytes):
205 205 ui.error(" %r\n" % (msg,))
206 206 elif not msg:
207 207 ui.error(_(" empty string\n"))
208 208 else:
209 209 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
210 210 except error.CensoredNodeError as inst:
211 211 ui.error(_("abort: file censored %s!\n") % inst)
212 212 except error.StorageError as inst:
213 213 ui.error(_("abort: %s!\n") % inst)
214 214 if inst.hint:
215 215 ui.error(_("(%s)\n") % inst.hint)
216 216 except error.InterventionRequired as inst:
217 217 ui.error("%s\n" % inst)
218 218 if inst.hint:
219 219 ui.error(_("(%s)\n") % inst.hint)
220 220 return 1
221 221 except error.WdirUnsupported:
222 222 ui.error(_("abort: working directory revision cannot be specified\n"))
223 223 except error.Abort as inst:
224 224 ui.error(_("abort: %s\n") % inst)
225 225 if inst.hint:
226 226 ui.error(_("(%s)\n") % inst.hint)
227 227 except ImportError as inst:
228 228 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
229 229 m = stringutil.forcebytestr(inst).split()[-1]
230 230 if m in "mpatch bdiff".split():
231 231 ui.error(_("(did you forget to compile extensions?)\n"))
232 232 elif m in "zlib".split():
233 233 ui.error(_("(is your Python install correct?)\n"))
234 234 except IOError as inst:
235 if util.safehasattr(inst, "code"):
235 if util.safehasattr(inst, "code"): # HTTPError
236 236 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
237 elif util.safehasattr(inst, "reason"):
237 elif util.safehasattr(inst, "reason"): # URLError or SSLError
238 238 try: # usually it is in the form (errno, strerror)
239 239 reason = inst.reason.args[1]
240 240 except (AttributeError, IndexError):
241 241 # it might be anything, for example a string
242 242 reason = inst.reason
243 243 if isinstance(reason, pycompat.unicode):
244 244 # SSLError of Python 2.7.9 contains a unicode
245 245 reason = encoding.unitolocal(reason)
246 246 ui.error(_("abort: error: %s\n") % reason)
247 247 elif (util.safehasattr(inst, "args")
248 248 and inst.args and inst.args[0] == errno.EPIPE):
249 249 pass
250 elif getattr(inst, "strerror", None):
250 elif getattr(inst, "strerror", None): # common IOError
251 251 if getattr(inst, "filename", None):
252 252 ui.error(_("abort: %s: %s\n") % (
253 253 encoding.strtolocal(inst.strerror),
254 254 stringutil.forcebytestr(inst.filename)))
255 255 else:
256 256 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
257 else:
257 else: # suspicious IOError
258 258 raise
259 259 except OSError as inst:
260 260 if getattr(inst, "filename", None) is not None:
261 261 ui.error(_("abort: %s: '%s'\n") % (
262 262 encoding.strtolocal(inst.strerror),
263 263 stringutil.forcebytestr(inst.filename)))
264 264 else:
265 265 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
266 266 except MemoryError:
267 267 ui.error(_("abort: out of memory\n"))
268 268 except SystemExit as inst:
269 269 # Commands shouldn't sys.exit directly, but give a return code.
270 270 # Just in case catch this and and pass exit code to caller.
271 271 return inst.code
272 272
273 273 return -1
274 274
275 275 def checknewlabel(repo, lbl, kind):
276 276 # Do not use the "kind" parameter in ui output.
277 277 # It makes strings difficult to translate.
278 278 if lbl in ['tip', '.', 'null']:
279 279 raise error.Abort(_("the name '%s' is reserved") % lbl)
280 280 for c in (':', '\0', '\n', '\r'):
281 281 if c in lbl:
282 282 raise error.Abort(
283 283 _("%r cannot be used in a name") % pycompat.bytestr(c))
284 284 try:
285 285 int(lbl)
286 286 raise error.Abort(_("cannot use an integer as a name"))
287 287 except ValueError:
288 288 pass
289 289 if lbl.strip() != lbl:
290 290 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
291 291
292 292 def checkfilename(f):
293 293 '''Check that the filename f is an acceptable filename for a tracked file'''
294 294 if '\r' in f or '\n' in f:
295 295 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
296 296 % pycompat.bytestr(f))
297 297
298 298 def checkportable(ui, f):
299 299 '''Check if filename f is portable and warn or abort depending on config'''
300 300 checkfilename(f)
301 301 abort, warn = checkportabilityalert(ui)
302 302 if abort or warn:
303 303 msg = util.checkwinfilename(f)
304 304 if msg:
305 305 msg = "%s: %s" % (msg, procutil.shellquote(f))
306 306 if abort:
307 307 raise error.Abort(msg)
308 308 ui.warn(_("warning: %s\n") % msg)
309 309
310 310 def checkportabilityalert(ui):
311 311 '''check if the user's config requests nothing, a warning, or abort for
312 312 non-portable filenames'''
313 313 val = ui.config('ui', 'portablefilenames')
314 314 lval = val.lower()
315 315 bval = stringutil.parsebool(val)
316 316 abort = pycompat.iswindows or lval == 'abort'
317 317 warn = bval or lval == 'warn'
318 318 if bval is None and not (warn or abort or lval == 'ignore'):
319 319 raise error.ConfigError(
320 320 _("ui.portablefilenames value is invalid ('%s')") % val)
321 321 return abort, warn
322 322
323 323 class casecollisionauditor(object):
324 324 def __init__(self, ui, abort, dirstate):
325 325 self._ui = ui
326 326 self._abort = abort
327 327 allfiles = '\0'.join(dirstate._map)
328 328 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
329 329 self._dirstate = dirstate
330 330 # The purpose of _newfiles is so that we don't complain about
331 331 # case collisions if someone were to call this object with the
332 332 # same filename twice.
333 333 self._newfiles = set()
334 334
335 335 def __call__(self, f):
336 336 if f in self._newfiles:
337 337 return
338 338 fl = encoding.lower(f)
339 339 if fl in self._loweredfiles and f not in self._dirstate:
340 340 msg = _('possible case-folding collision for %s') % f
341 341 if self._abort:
342 342 raise error.Abort(msg)
343 343 self._ui.warn(_("warning: %s\n") % msg)
344 344 self._loweredfiles.add(fl)
345 345 self._newfiles.add(f)
346 346
347 347 def filteredhash(repo, maxrev):
348 348 """build hash of filtered revisions in the current repoview.
349 349
350 350 Multiple caches perform up-to-date validation by checking that the
351 351 tiprev and tipnode stored in the cache file match the current repository.
352 352 However, this is not sufficient for validating repoviews because the set
353 353 of revisions in the view may change without the repository tiprev and
354 354 tipnode changing.
355 355
356 356 This function hashes all the revs filtered from the view and returns
357 357 that SHA-1 digest.
358 358 """
359 359 cl = repo.changelog
360 360 if not cl.filteredrevs:
361 361 return None
362 362 key = None
363 363 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
364 364 if revs:
365 365 s = hashlib.sha1()
366 366 for rev in revs:
367 367 s.update('%d;' % rev)
368 368 key = s.digest()
369 369 return key
370 370
371 371 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
372 372 '''yield every hg repository under path, always recursively.
373 373 The recurse flag will only control recursion into repo working dirs'''
374 374 def errhandler(err):
375 375 if err.filename == path:
376 376 raise err
377 377 samestat = getattr(os.path, 'samestat', None)
378 378 if followsym and samestat is not None:
379 379 def adddir(dirlst, dirname):
380 380 dirstat = os.stat(dirname)
381 381 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
382 382 if not match:
383 383 dirlst.append(dirstat)
384 384 return not match
385 385 else:
386 386 followsym = False
387 387
388 388 if (seen_dirs is None) and followsym:
389 389 seen_dirs = []
390 390 adddir(seen_dirs, path)
391 391 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
392 392 dirs.sort()
393 393 if '.hg' in dirs:
394 394 yield root # found a repository
395 395 qroot = os.path.join(root, '.hg', 'patches')
396 396 if os.path.isdir(os.path.join(qroot, '.hg')):
397 397 yield qroot # we have a patch queue repo here
398 398 if recurse:
399 399 # avoid recursing inside the .hg directory
400 400 dirs.remove('.hg')
401 401 else:
402 402 dirs[:] = [] # don't descend further
403 403 elif followsym:
404 404 newdirs = []
405 405 for d in dirs:
406 406 fname = os.path.join(root, d)
407 407 if adddir(seen_dirs, fname):
408 408 if os.path.islink(fname):
409 409 for hgname in walkrepos(fname, True, seen_dirs):
410 410 yield hgname
411 411 else:
412 412 newdirs.append(d)
413 413 dirs[:] = newdirs
414 414
415 415 def binnode(ctx):
416 416 """Return binary node id for a given basectx"""
417 417 node = ctx.node()
418 418 if node is None:
419 419 return wdirid
420 420 return node
421 421
422 422 def intrev(ctx):
423 423 """Return integer for a given basectx that can be used in comparison or
424 424 arithmetic operation"""
425 425 rev = ctx.rev()
426 426 if rev is None:
427 427 return wdirrev
428 428 return rev
429 429
430 430 def formatchangeid(ctx):
431 431 """Format changectx as '{rev}:{node|formatnode}', which is the default
432 432 template provided by logcmdutil.changesettemplater"""
433 433 repo = ctx.repo()
434 434 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
435 435
436 436 def formatrevnode(ui, rev, node):
437 437 """Format given revision and node depending on the current verbosity"""
438 438 if ui.debugflag:
439 439 hexfunc = hex
440 440 else:
441 441 hexfunc = short
442 442 return '%d:%s' % (rev, hexfunc(node))
443 443
444 444 def resolvehexnodeidprefix(repo, prefix):
445 445 if (prefix.startswith('x') and
446 446 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
447 447 prefix = prefix[1:]
448 448 try:
449 449 # Uses unfiltered repo because it's faster when prefix is ambiguous/
450 450 # This matches the shortesthexnodeidprefix() function below.
451 451 node = repo.unfiltered().changelog._partialmatch(prefix)
452 452 except error.AmbiguousPrefixLookupError:
453 453 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
454 454 if revset:
455 455 # Clear config to avoid infinite recursion
456 456 configoverrides = {('experimental',
457 457 'revisions.disambiguatewithin'): None}
458 458 with repo.ui.configoverride(configoverrides):
459 459 revs = repo.anyrevs([revset], user=True)
460 460 matches = []
461 461 for rev in revs:
462 462 node = repo.changelog.node(rev)
463 463 if hex(node).startswith(prefix):
464 464 matches.append(node)
465 465 if len(matches) == 1:
466 466 return matches[0]
467 467 raise
468 468 if node is None:
469 469 return
470 470 repo.changelog.rev(node) # make sure node isn't filtered
471 471 return node
472 472
473 473 def mayberevnum(repo, prefix):
474 474 """Checks if the given prefix may be mistaken for a revision number"""
475 475 try:
476 476 i = int(prefix)
477 477 # if we are a pure int, then starting with zero will not be
478 478 # confused as a rev; or, obviously, if the int is larger
479 479 # than the value of the tip rev. We still need to disambiguate if
480 480 # prefix == '0', since that *is* a valid revnum.
481 481 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
482 482 return False
483 483 return True
484 484 except ValueError:
485 485 return False
486 486
487 487 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
488 488 """Find the shortest unambiguous prefix that matches hexnode.
489 489
490 490 If "cache" is not None, it must be a dictionary that can be used for
491 491 caching between calls to this method.
492 492 """
493 493 # _partialmatch() of filtered changelog could take O(len(repo)) time,
494 494 # which would be unacceptably slow. so we look for hash collision in
495 495 # unfiltered space, which means some hashes may be slightly longer.
496 496
497 497 minlength=max(minlength, 1)
498 498
499 499 def disambiguate(prefix):
500 500 """Disambiguate against revnums."""
501 501 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
502 502 if mayberevnum(repo, prefix):
503 503 return 'x' + prefix
504 504 else:
505 505 return prefix
506 506
507 507 hexnode = hex(node)
508 508 for length in range(len(prefix), len(hexnode) + 1):
509 509 prefix = hexnode[:length]
510 510 if not mayberevnum(repo, prefix):
511 511 return prefix
512 512
513 513 cl = repo.unfiltered().changelog
514 514 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
515 515 if revset:
516 516 revs = None
517 517 if cache is not None:
518 518 revs = cache.get('disambiguationrevset')
519 519 if revs is None:
520 520 revs = repo.anyrevs([revset], user=True)
521 521 if cache is not None:
522 522 cache['disambiguationrevset'] = revs
523 523 if cl.rev(node) in revs:
524 524 hexnode = hex(node)
525 525 nodetree = None
526 526 if cache is not None:
527 527 nodetree = cache.get('disambiguationnodetree')
528 528 if not nodetree:
529 529 try:
530 530 nodetree = parsers.nodetree(cl.index, len(revs))
531 531 except AttributeError:
532 532 # no native nodetree
533 533 pass
534 534 else:
535 535 for r in revs:
536 536 nodetree.insert(r)
537 537 if cache is not None:
538 538 cache['disambiguationnodetree'] = nodetree
539 539 if nodetree is not None:
540 540 length = max(nodetree.shortest(node), minlength)
541 541 prefix = hexnode[:length]
542 542 return disambiguate(prefix)
543 543 for length in range(minlength, len(hexnode) + 1):
544 544 matches = []
545 545 prefix = hexnode[:length]
546 546 for rev in revs:
547 547 otherhexnode = repo[rev].hex()
548 548 if prefix == otherhexnode[:length]:
549 549 matches.append(otherhexnode)
550 550 if len(matches) == 1:
551 551 return disambiguate(prefix)
552 552
553 553 try:
554 554 return disambiguate(cl.shortest(node, minlength))
555 555 except error.LookupError:
556 556 raise error.RepoLookupError()
557 557
558 558 def isrevsymbol(repo, symbol):
559 559 """Checks if a symbol exists in the repo.
560 560
561 561 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
562 562 symbol is an ambiguous nodeid prefix.
563 563 """
564 564 try:
565 565 revsymbol(repo, symbol)
566 566 return True
567 567 except error.RepoLookupError:
568 568 return False
569 569
570 570 def revsymbol(repo, symbol):
571 571 """Returns a context given a single revision symbol (as string).
572 572
573 573 This is similar to revsingle(), but accepts only a single revision symbol,
574 574 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
575 575 not "max(public())".
576 576 """
577 577 if not isinstance(symbol, bytes):
578 578 msg = ("symbol (%s of type %s) was not a string, did you mean "
579 579 "repo[symbol]?" % (symbol, type(symbol)))
580 580 raise error.ProgrammingError(msg)
581 581 try:
582 582 if symbol in ('.', 'tip', 'null'):
583 583 return repo[symbol]
584 584
585 585 try:
586 586 r = int(symbol)
587 587 if '%d' % r != symbol:
588 588 raise ValueError
589 589 l = len(repo.changelog)
590 590 if r < 0:
591 591 r += l
592 592 if r < 0 or r >= l and r != wdirrev:
593 593 raise ValueError
594 594 return repo[r]
595 595 except error.FilteredIndexError:
596 596 raise
597 597 except (ValueError, OverflowError, IndexError):
598 598 pass
599 599
600 600 if len(symbol) == 40:
601 601 try:
602 602 node = bin(symbol)
603 603 rev = repo.changelog.rev(node)
604 604 return repo[rev]
605 605 except error.FilteredLookupError:
606 606 raise
607 607 except (TypeError, LookupError):
608 608 pass
609 609
610 610 # look up bookmarks through the name interface
611 611 try:
612 612 node = repo.names.singlenode(repo, symbol)
613 613 rev = repo.changelog.rev(node)
614 614 return repo[rev]
615 615 except KeyError:
616 616 pass
617 617
618 618 node = resolvehexnodeidprefix(repo, symbol)
619 619 if node is not None:
620 620 rev = repo.changelog.rev(node)
621 621 return repo[rev]
622 622
623 623 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
624 624
625 625 except error.WdirUnsupported:
626 626 return repo[None]
627 627 except (error.FilteredIndexError, error.FilteredLookupError,
628 628 error.FilteredRepoLookupError):
629 629 raise _filterederror(repo, symbol)
630 630
631 631 def _filterederror(repo, changeid):
632 632 """build an exception to be raised about a filtered changeid
633 633
634 634 This is extracted in a function to help extensions (eg: evolve) to
635 635 experiment with various message variants."""
636 636 if repo.filtername.startswith('visible'):
637 637
638 638 # Check if the changeset is obsolete
639 639 unfilteredrepo = repo.unfiltered()
640 640 ctx = revsymbol(unfilteredrepo, changeid)
641 641
642 642 # If the changeset is obsolete, enrich the message with the reason
643 643 # that made this changeset not visible
644 644 if ctx.obsolete():
645 645 msg = obsutil._getfilteredreason(repo, changeid, ctx)
646 646 else:
647 647 msg = _("hidden revision '%s'") % changeid
648 648
649 649 hint = _('use --hidden to access hidden revisions')
650 650
651 651 return error.FilteredRepoLookupError(msg, hint=hint)
652 652 msg = _("filtered revision '%s' (not in '%s' subset)")
653 653 msg %= (changeid, repo.filtername)
654 654 return error.FilteredRepoLookupError(msg)
655 655
656 656 def revsingle(repo, revspec, default='.', localalias=None):
657 657 if not revspec and revspec != 0:
658 658 return repo[default]
659 659
660 660 l = revrange(repo, [revspec], localalias=localalias)
661 661 if not l:
662 662 raise error.Abort(_('empty revision set'))
663 663 return repo[l.last()]
664 664
665 665 def _pairspec(revspec):
666 666 tree = revsetlang.parse(revspec)
667 667 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
668 668
669 669 def revpair(repo, revs):
670 670 if not revs:
671 671 return repo['.'], repo[None]
672 672
673 673 l = revrange(repo, revs)
674 674
675 675 if not l:
676 676 raise error.Abort(_('empty revision range'))
677 677
678 678 first = l.first()
679 679 second = l.last()
680 680
681 681 if (first == second and len(revs) >= 2
682 682 and not all(revrange(repo, [r]) for r in revs)):
683 683 raise error.Abort(_('empty revision on one side of range'))
684 684
685 685 # if top-level is range expression, the result must always be a pair
686 686 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
687 687 return repo[first], repo[None]
688 688
689 689 return repo[first], repo[second]
690 690
691 691 def revrange(repo, specs, localalias=None):
692 692 """Execute 1 to many revsets and return the union.
693 693
694 694 This is the preferred mechanism for executing revsets using user-specified
695 695 config options, such as revset aliases.
696 696
697 697 The revsets specified by ``specs`` will be executed via a chained ``OR``
698 698 expression. If ``specs`` is empty, an empty result is returned.
699 699
700 700 ``specs`` can contain integers, in which case they are assumed to be
701 701 revision numbers.
702 702
703 703 It is assumed the revsets are already formatted. If you have arguments
704 704 that need to be expanded in the revset, call ``revsetlang.formatspec()``
705 705 and pass the result as an element of ``specs``.
706 706
707 707 Specifying a single revset is allowed.
708 708
709 709 Returns a ``revset.abstractsmartset`` which is a list-like interface over
710 710 integer revisions.
711 711 """
712 712 allspecs = []
713 713 for spec in specs:
714 714 if isinstance(spec, int):
715 715 spec = revsetlang.formatspec('%d', spec)
716 716 allspecs.append(spec)
717 717 return repo.anyrevs(allspecs, user=True, localalias=localalias)
718 718
719 719 def meaningfulparents(repo, ctx):
720 720 """Return list of meaningful (or all if debug) parentrevs for rev.
721 721
722 722 For merges (two non-nullrev revisions) both parents are meaningful.
723 723 Otherwise the first parent revision is considered meaningful if it
724 724 is not the preceding revision.
725 725 """
726 726 parents = ctx.parents()
727 727 if len(parents) > 1:
728 728 return parents
729 729 if repo.ui.debugflag:
730 730 return [parents[0], repo[nullrev]]
731 731 if parents[0].rev() >= intrev(ctx) - 1:
732 732 return []
733 733 return parents
734 734
735 735 def expandpats(pats):
736 736 '''Expand bare globs when running on windows.
737 737 On posix we assume it already has already been done by sh.'''
738 738 if not util.expandglobs:
739 739 return list(pats)
740 740 ret = []
741 741 for kindpat in pats:
742 742 kind, pat = matchmod._patsplit(kindpat, None)
743 743 if kind is None:
744 744 try:
745 745 globbed = glob.glob(pat)
746 746 except re.error:
747 747 globbed = [pat]
748 748 if globbed:
749 749 ret.extend(globbed)
750 750 continue
751 751 ret.append(kindpat)
752 752 return ret
753 753
754 754 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
755 755 badfn=None):
756 756 '''Return a matcher and the patterns that were used.
757 757 The matcher will warn about bad matches, unless an alternate badfn callback
758 758 is provided.'''
759 759 if pats == ("",):
760 760 pats = []
761 761 if opts is None:
762 762 opts = {}
763 763 if not globbed and default == 'relpath':
764 764 pats = expandpats(pats or [])
765 765
766 766 def bad(f, msg):
767 767 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
768 768
769 769 if badfn is None:
770 770 badfn = bad
771 771
772 772 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
773 773 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
774 774
775 775 if m.always():
776 776 pats = []
777 777 return m, pats
778 778
779 779 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
780 780 badfn=None):
781 781 '''Return a matcher that will warn about bad matches.'''
782 782 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
783 783
784 784 def matchall(repo):
785 785 '''Return a matcher that will efficiently match everything.'''
786 786 return matchmod.always(repo.root, repo.getcwd())
787 787
788 788 def matchfiles(repo, files, badfn=None):
789 789 '''Return a matcher that will efficiently match exactly these files.'''
790 790 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
791 791
792 792 def parsefollowlinespattern(repo, rev, pat, msg):
793 793 """Return a file name from `pat` pattern suitable for usage in followlines
794 794 logic.
795 795 """
796 796 if not matchmod.patkind(pat):
797 797 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
798 798 else:
799 799 ctx = repo[rev]
800 800 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
801 801 files = [f for f in ctx if m(f)]
802 802 if len(files) != 1:
803 803 raise error.ParseError(msg)
804 804 return files[0]
805 805
806 806 def getorigvfs(ui, repo):
807 807 """return a vfs suitable to save 'orig' file
808 808
809 809 return None if no special directory is configured"""
810 810 origbackuppath = ui.config('ui', 'origbackuppath')
811 811 if not origbackuppath:
812 812 return None
813 813 return vfs.vfs(repo.wvfs.join(origbackuppath))
814 814
815 815 def origpath(ui, repo, filepath):
816 816 '''customize where .orig files are created
817 817
818 818 Fetch user defined path from config file: [ui] origbackuppath = <path>
819 819 Fall back to default (filepath with .orig suffix) if not specified
820 820 '''
821 821 origvfs = getorigvfs(ui, repo)
822 822 if origvfs is None:
823 823 return filepath + ".orig"
824 824
825 825 # Convert filepath from an absolute path into a path inside the repo.
826 826 filepathfromroot = util.normpath(os.path.relpath(filepath,
827 827 start=repo.root))
828 828
829 829 origbackupdir = origvfs.dirname(filepathfromroot)
830 830 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
831 831 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
832 832
833 833 # Remove any files that conflict with the backup file's path
834 834 for f in reversed(list(util.finddirs(filepathfromroot))):
835 835 if origvfs.isfileorlink(f):
836 836 ui.note(_('removing conflicting file: %s\n')
837 837 % origvfs.join(f))
838 838 origvfs.unlink(f)
839 839 break
840 840
841 841 origvfs.makedirs(origbackupdir)
842 842
843 843 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
844 844 ui.note(_('removing conflicting directory: %s\n')
845 845 % origvfs.join(filepathfromroot))
846 846 origvfs.rmtree(filepathfromroot, forcibly=True)
847 847
848 848 return origvfs.join(filepathfromroot)
849 849
850 850 class _containsnode(object):
851 851 """proxy __contains__(node) to container.__contains__ which accepts revs"""
852 852
853 853 def __init__(self, repo, revcontainer):
854 854 self._torev = repo.changelog.rev
855 855 self._revcontains = revcontainer.__contains__
856 856
857 857 def __contains__(self, node):
858 858 return self._revcontains(self._torev(node))
859 859
860 860 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
861 861 fixphase=False, targetphase=None, backup=True):
862 862 """do common cleanups when old nodes are replaced by new nodes
863 863
864 864 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
865 865 (we might also want to move working directory parent in the future)
866 866
867 867 By default, bookmark moves are calculated automatically from 'replacements',
868 868 but 'moves' can be used to override that. Also, 'moves' may include
869 869 additional bookmark moves that should not have associated obsmarkers.
870 870
871 871 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
872 872 have replacements. operation is a string, like "rebase".
873 873
874 874 metadata is dictionary containing metadata to be stored in obsmarker if
875 875 obsolescence is enabled.
876 876 """
877 877 assert fixphase or targetphase is None
878 878 if not replacements and not moves:
879 879 return
880 880
881 881 # translate mapping's other forms
882 882 if not util.safehasattr(replacements, 'items'):
883 883 replacements = {(n,): () for n in replacements}
884 884 else:
885 885 # upgrading non tuple "source" to tuple ones for BC
886 886 repls = {}
887 887 for key, value in replacements.items():
888 888 if not isinstance(key, tuple):
889 889 key = (key,)
890 890 repls[key] = value
891 891 replacements = repls
892 892
893 893 # Unfiltered repo is needed since nodes in replacements might be hidden.
894 894 unfi = repo.unfiltered()
895 895
896 896 # Calculate bookmark movements
897 897 if moves is None:
898 898 moves = {}
899 899 for oldnodes, newnodes in replacements.items():
900 900 for oldnode in oldnodes:
901 901 if oldnode in moves:
902 902 continue
903 903 if len(newnodes) > 1:
904 904 # usually a split, take the one with biggest rev number
905 905 newnode = next(unfi.set('max(%ln)', newnodes)).node()
906 906 elif len(newnodes) == 0:
907 907 # move bookmark backwards
908 908 allreplaced = []
909 909 for rep in replacements:
910 910 allreplaced.extend(rep)
911 911 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
912 912 allreplaced))
913 913 if roots:
914 914 newnode = roots[0].node()
915 915 else:
916 916 newnode = nullid
917 917 else:
918 918 newnode = newnodes[0]
919 919 moves[oldnode] = newnode
920 920
921 921 allnewnodes = [n for ns in replacements.values() for n in ns]
922 922 toretract = {}
923 923 toadvance = {}
924 924 if fixphase:
925 925 precursors = {}
926 926 for oldnodes, newnodes in replacements.items():
927 927 for oldnode in oldnodes:
928 928 for newnode in newnodes:
929 929 precursors.setdefault(newnode, []).append(oldnode)
930 930
931 931 allnewnodes.sort(key=lambda n: unfi[n].rev())
932 932 newphases = {}
933 933 def phase(ctx):
934 934 return newphases.get(ctx.node(), ctx.phase())
935 935 for newnode in allnewnodes:
936 936 ctx = unfi[newnode]
937 937 parentphase = max(phase(p) for p in ctx.parents())
938 938 if targetphase is None:
939 939 oldphase = max(unfi[oldnode].phase()
940 940 for oldnode in precursors[newnode])
941 941 newphase = max(oldphase, parentphase)
942 942 else:
943 943 newphase = max(targetphase, parentphase)
944 944 newphases[newnode] = newphase
945 945 if newphase > ctx.phase():
946 946 toretract.setdefault(newphase, []).append(newnode)
947 947 elif newphase < ctx.phase():
948 948 toadvance.setdefault(newphase, []).append(newnode)
949 949
950 950 with repo.transaction('cleanup') as tr:
951 951 # Move bookmarks
952 952 bmarks = repo._bookmarks
953 953 bmarkchanges = []
954 954 for oldnode, newnode in moves.items():
955 955 oldbmarks = repo.nodebookmarks(oldnode)
956 956 if not oldbmarks:
957 957 continue
958 958 from . import bookmarks # avoid import cycle
959 959 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
960 960 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
961 961 hex(oldnode), hex(newnode)))
962 962 # Delete divergent bookmarks being parents of related newnodes
963 963 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
964 964 allnewnodes, newnode, oldnode)
965 965 deletenodes = _containsnode(repo, deleterevs)
966 966 for name in oldbmarks:
967 967 bmarkchanges.append((name, newnode))
968 968 for b in bookmarks.divergent2delete(repo, deletenodes, name):
969 969 bmarkchanges.append((b, None))
970 970
971 971 if bmarkchanges:
972 972 bmarks.applychanges(repo, tr, bmarkchanges)
973 973
974 974 for phase, nodes in toretract.items():
975 975 phases.retractboundary(repo, tr, phase, nodes)
976 976 for phase, nodes in toadvance.items():
977 977 phases.advanceboundary(repo, tr, phase, nodes)
978 978
979 979 # Obsolete or strip nodes
980 980 if obsolete.isenabled(repo, obsolete.createmarkersopt):
981 981 # If a node is already obsoleted, and we want to obsolete it
982 982 # without a successor, skip that obssolete request since it's
983 983 # unnecessary. That's the "if s or not isobs(n)" check below.
984 984 # Also sort the node in topology order, that might be useful for
985 985 # some obsstore logic.
986 986 # NOTE: the sorting might belong to createmarkers.
987 987 torev = unfi.changelog.rev
988 988 sortfunc = lambda ns: torev(ns[0][0])
989 989 rels = []
990 990 for ns, s in sorted(replacements.items(), key=sortfunc):
991 991 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
992 992 rels.append(rel)
993 993 if rels:
994 994 obsolete.createmarkers(repo, rels, operation=operation,
995 995 metadata=metadata)
996 996 else:
997 997 from . import repair # avoid import cycle
998 998 tostrip = list(n for ns in replacements for n in ns)
999 999 if tostrip:
1000 1000 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1001 1001 backup=backup)
1002 1002
1003 1003 def addremove(repo, matcher, prefix, opts=None):
1004 1004 if opts is None:
1005 1005 opts = {}
1006 1006 m = matcher
1007 1007 dry_run = opts.get('dry_run')
1008 1008 try:
1009 1009 similarity = float(opts.get('similarity') or 0)
1010 1010 except ValueError:
1011 1011 raise error.Abort(_('similarity must be a number'))
1012 1012 if similarity < 0 or similarity > 100:
1013 1013 raise error.Abort(_('similarity must be between 0 and 100'))
1014 1014 similarity /= 100.0
1015 1015
1016 1016 ret = 0
1017 1017 join = lambda f: os.path.join(prefix, f)
1018 1018
1019 1019 wctx = repo[None]
1020 1020 for subpath in sorted(wctx.substate):
1021 1021 submatch = matchmod.subdirmatcher(subpath, m)
1022 1022 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1023 1023 sub = wctx.sub(subpath)
1024 1024 try:
1025 1025 if sub.addremove(submatch, prefix, opts):
1026 1026 ret = 1
1027 1027 except error.LookupError:
1028 1028 repo.ui.status(_("skipping missing subrepository: %s\n")
1029 1029 % join(subpath))
1030 1030
1031 1031 rejected = []
1032 1032 def badfn(f, msg):
1033 1033 if f in m.files():
1034 1034 m.bad(f, msg)
1035 1035 rejected.append(f)
1036 1036
1037 1037 badmatch = matchmod.badmatch(m, badfn)
1038 1038 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1039 1039 badmatch)
1040 1040
1041 1041 unknownset = set(unknown + forgotten)
1042 1042 toprint = unknownset.copy()
1043 1043 toprint.update(deleted)
1044 1044 for abs in sorted(toprint):
1045 1045 if repo.ui.verbose or not m.exact(abs):
1046 1046 if abs in unknownset:
1047 1047 status = _('adding %s\n') % m.uipath(abs)
1048 1048 label = 'ui.addremove.added'
1049 1049 else:
1050 1050 status = _('removing %s\n') % m.uipath(abs)
1051 1051 label = 'ui.addremove.removed'
1052 1052 repo.ui.status(status, label=label)
1053 1053
1054 1054 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1055 1055 similarity)
1056 1056
1057 1057 if not dry_run:
1058 1058 _markchanges(repo, unknown + forgotten, deleted, renames)
1059 1059
1060 1060 for f in rejected:
1061 1061 if f in m.files():
1062 1062 return 1
1063 1063 return ret
1064 1064
1065 1065 def marktouched(repo, files, similarity=0.0):
1066 1066 '''Assert that files have somehow been operated upon. files are relative to
1067 1067 the repo root.'''
1068 1068 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1069 1069 rejected = []
1070 1070
1071 1071 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1072 1072
1073 1073 if repo.ui.verbose:
1074 1074 unknownset = set(unknown + forgotten)
1075 1075 toprint = unknownset.copy()
1076 1076 toprint.update(deleted)
1077 1077 for abs in sorted(toprint):
1078 1078 if abs in unknownset:
1079 1079 status = _('adding %s\n') % abs
1080 1080 else:
1081 1081 status = _('removing %s\n') % abs
1082 1082 repo.ui.status(status)
1083 1083
1084 1084 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1085 1085 similarity)
1086 1086
1087 1087 _markchanges(repo, unknown + forgotten, deleted, renames)
1088 1088
1089 1089 for f in rejected:
1090 1090 if f in m.files():
1091 1091 return 1
1092 1092 return 0
1093 1093
1094 1094 def _interestingfiles(repo, matcher):
1095 1095 '''Walk dirstate with matcher, looking for files that addremove would care
1096 1096 about.
1097 1097
1098 1098 This is different from dirstate.status because it doesn't care about
1099 1099 whether files are modified or clean.'''
1100 1100 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1101 1101 audit_path = pathutil.pathauditor(repo.root, cached=True)
1102 1102
1103 1103 ctx = repo[None]
1104 1104 dirstate = repo.dirstate
1105 1105 matcher = repo.narrowmatch(matcher, includeexact=True)
1106 1106 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1107 1107 unknown=True, ignored=False, full=False)
1108 1108 for abs, st in walkresults.iteritems():
1109 1109 dstate = dirstate[abs]
1110 1110 if dstate == '?' and audit_path.check(abs):
1111 1111 unknown.append(abs)
1112 1112 elif dstate != 'r' and not st:
1113 1113 deleted.append(abs)
1114 1114 elif dstate == 'r' and st:
1115 1115 forgotten.append(abs)
1116 1116 # for finding renames
1117 1117 elif dstate == 'r' and not st:
1118 1118 removed.append(abs)
1119 1119 elif dstate == 'a':
1120 1120 added.append(abs)
1121 1121
1122 1122 return added, unknown, deleted, removed, forgotten
1123 1123
1124 1124 def _findrenames(repo, matcher, added, removed, similarity):
1125 1125 '''Find renames from removed files to added ones.'''
1126 1126 renames = {}
1127 1127 if similarity > 0:
1128 1128 for old, new, score in similar.findrenames(repo, added, removed,
1129 1129 similarity):
1130 1130 if (repo.ui.verbose or not matcher.exact(old)
1131 1131 or not matcher.exact(new)):
1132 1132 repo.ui.status(_('recording removal of %s as rename to %s '
1133 1133 '(%d%% similar)\n') %
1134 1134 (matcher.rel(old), matcher.rel(new),
1135 1135 score * 100))
1136 1136 renames[new] = old
1137 1137 return renames
1138 1138
1139 1139 def _markchanges(repo, unknown, deleted, renames):
1140 1140 '''Marks the files in unknown as added, the files in deleted as removed,
1141 1141 and the files in renames as copied.'''
1142 1142 wctx = repo[None]
1143 1143 with repo.wlock():
1144 1144 wctx.forget(deleted)
1145 1145 wctx.add(unknown)
1146 1146 for new, old in renames.iteritems():
1147 1147 wctx.copy(old, new)
1148 1148
1149 1149 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1150 1150 """Update the dirstate to reflect the intent of copying src to dst. For
1151 1151 different reasons it might not end with dst being marked as copied from src.
1152 1152 """
1153 1153 origsrc = repo.dirstate.copied(src) or src
1154 1154 if dst == origsrc: # copying back a copy?
1155 1155 if repo.dirstate[dst] not in 'mn' and not dryrun:
1156 1156 repo.dirstate.normallookup(dst)
1157 1157 else:
1158 1158 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1159 1159 if not ui.quiet:
1160 1160 ui.warn(_("%s has not been committed yet, so no copy "
1161 1161 "data will be stored for %s.\n")
1162 1162 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1163 1163 if repo.dirstate[dst] in '?r' and not dryrun:
1164 1164 wctx.add([dst])
1165 1165 elif not dryrun:
1166 1166 wctx.copy(origsrc, dst)
1167 1167
1168 1168 def writerequires(opener, requirements):
1169 1169 with opener('requires', 'w', atomictemp=True) as fp:
1170 1170 for r in sorted(requirements):
1171 1171 fp.write("%s\n" % r)
1172 1172
1173 1173 class filecachesubentry(object):
1174 1174 def __init__(self, path, stat):
1175 1175 self.path = path
1176 1176 self.cachestat = None
1177 1177 self._cacheable = None
1178 1178
1179 1179 if stat:
1180 1180 self.cachestat = filecachesubentry.stat(self.path)
1181 1181
1182 1182 if self.cachestat:
1183 1183 self._cacheable = self.cachestat.cacheable()
1184 1184 else:
1185 1185 # None means we don't know yet
1186 1186 self._cacheable = None
1187 1187
1188 1188 def refresh(self):
1189 1189 if self.cacheable():
1190 1190 self.cachestat = filecachesubentry.stat(self.path)
1191 1191
1192 1192 def cacheable(self):
1193 1193 if self._cacheable is not None:
1194 1194 return self._cacheable
1195 1195
1196 1196 # we don't know yet, assume it is for now
1197 1197 return True
1198 1198
1199 1199 def changed(self):
1200 1200 # no point in going further if we can't cache it
1201 1201 if not self.cacheable():
1202 1202 return True
1203 1203
1204 1204 newstat = filecachesubentry.stat(self.path)
1205 1205
1206 1206 # we may not know if it's cacheable yet, check again now
1207 1207 if newstat and self._cacheable is None:
1208 1208 self._cacheable = newstat.cacheable()
1209 1209
1210 1210 # check again
1211 1211 if not self._cacheable:
1212 1212 return True
1213 1213
1214 1214 if self.cachestat != newstat:
1215 1215 self.cachestat = newstat
1216 1216 return True
1217 1217 else:
1218 1218 return False
1219 1219
1220 1220 @staticmethod
1221 1221 def stat(path):
1222 1222 try:
1223 1223 return util.cachestat(path)
1224 1224 except OSError as e:
1225 1225 if e.errno != errno.ENOENT:
1226 1226 raise
1227 1227
1228 1228 class filecacheentry(object):
1229 1229 def __init__(self, paths, stat=True):
1230 1230 self._entries = []
1231 1231 for path in paths:
1232 1232 self._entries.append(filecachesubentry(path, stat))
1233 1233
1234 1234 def changed(self):
1235 1235 '''true if any entry has changed'''
1236 1236 for entry in self._entries:
1237 1237 if entry.changed():
1238 1238 return True
1239 1239 return False
1240 1240
1241 1241 def refresh(self):
1242 1242 for entry in self._entries:
1243 1243 entry.refresh()
1244 1244
1245 1245 class filecache(object):
1246 1246 """A property like decorator that tracks files under .hg/ for updates.
1247 1247
1248 1248 On first access, the files defined as arguments are stat()ed and the
1249 1249 results cached. The decorated function is called. The results are stashed
1250 1250 away in a ``_filecache`` dict on the object whose method is decorated.
1251 1251
1252 1252 On subsequent access, the cached result is used as it is set to the
1253 1253 instance dictionary.
1254 1254
1255 1255 On external property set/delete operations, the caller must update the
1256 1256 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1257 1257 instead of directly setting <attr>.
1258 1258
1259 1259 When using the property API, the cached data is always used if available.
1260 1260 No stat() is performed to check if the file has changed.
1261 1261
1262 1262 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1263 1263 can populate an entry before the property's getter is called. In this case,
1264 1264 entries in ``_filecache`` will be used during property operations,
1265 1265 if available. If the underlying file changes, it is up to external callers
1266 1266 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1267 1267 method result as well as possibly calling ``del obj._filecache[attr]`` to
1268 1268 remove the ``filecacheentry``.
1269 1269 """
1270 1270
1271 1271 def __init__(self, *paths):
1272 1272 self.paths = paths
1273 1273
1274 1274 def join(self, obj, fname):
1275 1275 """Used to compute the runtime path of a cached file.
1276 1276
1277 1277 Users should subclass filecache and provide their own version of this
1278 1278 function to call the appropriate join function on 'obj' (an instance
1279 1279 of the class that its member function was decorated).
1280 1280 """
1281 1281 raise NotImplementedError
1282 1282
1283 1283 def __call__(self, func):
1284 1284 self.func = func
1285 1285 self.sname = func.__name__
1286 1286 self.name = pycompat.sysbytes(self.sname)
1287 1287 return self
1288 1288
1289 1289 def __get__(self, obj, type=None):
1290 1290 # if accessed on the class, return the descriptor itself.
1291 1291 if obj is None:
1292 1292 return self
1293 1293
1294 1294 assert self.sname not in obj.__dict__
1295 1295
1296 1296 entry = obj._filecache.get(self.name)
1297 1297
1298 1298 if entry:
1299 1299 if entry.changed():
1300 1300 entry.obj = self.func(obj)
1301 1301 else:
1302 1302 paths = [self.join(obj, path) for path in self.paths]
1303 1303
1304 1304 # We stat -before- creating the object so our cache doesn't lie if
1305 1305 # a writer modified between the time we read and stat
1306 1306 entry = filecacheentry(paths, True)
1307 1307 entry.obj = self.func(obj)
1308 1308
1309 1309 obj._filecache[self.name] = entry
1310 1310
1311 1311 obj.__dict__[self.sname] = entry.obj
1312 1312 return entry.obj
1313 1313
1314 1314 # don't implement __set__(), which would make __dict__ lookup as slow as
1315 1315 # function call.
1316 1316
1317 1317 def set(self, obj, value):
1318 1318 if self.name not in obj._filecache:
1319 1319 # we add an entry for the missing value because X in __dict__
1320 1320 # implies X in _filecache
1321 1321 paths = [self.join(obj, path) for path in self.paths]
1322 1322 ce = filecacheentry(paths, False)
1323 1323 obj._filecache[self.name] = ce
1324 1324 else:
1325 1325 ce = obj._filecache[self.name]
1326 1326
1327 1327 ce.obj = value # update cached copy
1328 1328 obj.__dict__[self.sname] = value # update copy returned by obj.x
1329 1329
1330 1330 def extdatasource(repo, source):
1331 1331 """Gather a map of rev -> value dict from the specified source
1332 1332
1333 1333 A source spec is treated as a URL, with a special case shell: type
1334 1334 for parsing the output from a shell command.
1335 1335
1336 1336 The data is parsed as a series of newline-separated records where
1337 1337 each record is a revision specifier optionally followed by a space
1338 1338 and a freeform string value. If the revision is known locally, it
1339 1339 is converted to a rev, otherwise the record is skipped.
1340 1340
1341 1341 Note that both key and value are treated as UTF-8 and converted to
1342 1342 the local encoding. This allows uniformity between local and
1343 1343 remote data sources.
1344 1344 """
1345 1345
1346 1346 spec = repo.ui.config("extdata", source)
1347 1347 if not spec:
1348 1348 raise error.Abort(_("unknown extdata source '%s'") % source)
1349 1349
1350 1350 data = {}
1351 1351 src = proc = None
1352 1352 try:
1353 1353 if spec.startswith("shell:"):
1354 1354 # external commands should be run relative to the repo root
1355 1355 cmd = spec[6:]
1356 1356 proc = subprocess.Popen(procutil.tonativestr(cmd),
1357 1357 shell=True, bufsize=-1,
1358 1358 close_fds=procutil.closefds,
1359 1359 stdout=subprocess.PIPE,
1360 1360 cwd=procutil.tonativestr(repo.root))
1361 1361 src = proc.stdout
1362 1362 else:
1363 1363 # treat as a URL or file
1364 1364 src = url.open(repo.ui, spec)
1365 1365 for l in src:
1366 1366 if " " in l:
1367 1367 k, v = l.strip().split(" ", 1)
1368 1368 else:
1369 1369 k, v = l.strip(), ""
1370 1370
1371 1371 k = encoding.tolocal(k)
1372 1372 try:
1373 1373 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1374 1374 except (error.LookupError, error.RepoLookupError):
1375 1375 pass # we ignore data for nodes that don't exist locally
1376 1376 finally:
1377 1377 if proc:
1378 1378 proc.communicate()
1379 1379 if src:
1380 1380 src.close()
1381 1381 if proc and proc.returncode != 0:
1382 1382 raise error.Abort(_("extdata command '%s' failed: %s")
1383 1383 % (cmd, procutil.explainexit(proc.returncode)))
1384 1384
1385 1385 return data
1386 1386
1387 1387 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1388 1388 if lock is None:
1389 1389 raise error.LockInheritanceContractViolation(
1390 1390 'lock can only be inherited while held')
1391 1391 if environ is None:
1392 1392 environ = {}
1393 1393 with lock.inherit() as locker:
1394 1394 environ[envvar] = locker
1395 1395 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1396 1396
1397 1397 def wlocksub(repo, cmd, *args, **kwargs):
1398 1398 """run cmd as a subprocess that allows inheriting repo's wlock
1399 1399
1400 1400 This can only be called while the wlock is held. This takes all the
1401 1401 arguments that ui.system does, and returns the exit code of the
1402 1402 subprocess."""
1403 1403 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1404 1404 **kwargs)
1405 1405
1406 1406 class progress(object):
1407 1407 def __init__(self, ui, updatebar, topic, unit="", total=None):
1408 1408 self.ui = ui
1409 1409 self.pos = 0
1410 1410 self.topic = topic
1411 1411 self.unit = unit
1412 1412 self.total = total
1413 1413 self.debug = ui.configbool('progress', 'debug')
1414 1414 self._updatebar = updatebar
1415 1415
1416 1416 def __enter__(self):
1417 1417 return self
1418 1418
1419 1419 def __exit__(self, exc_type, exc_value, exc_tb):
1420 1420 self.complete()
1421 1421
1422 1422 def update(self, pos, item="", total=None):
1423 1423 assert pos is not None
1424 1424 if total:
1425 1425 self.total = total
1426 1426 self.pos = pos
1427 1427 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1428 1428 if self.debug:
1429 1429 self._printdebug(item)
1430 1430
1431 1431 def increment(self, step=1, item="", total=None):
1432 1432 self.update(self.pos + step, item, total)
1433 1433
1434 1434 def complete(self):
1435 1435 self.pos = None
1436 1436 self.unit = ""
1437 1437 self.total = None
1438 1438 self._updatebar(self.topic, self.pos, "", self.unit, self.total)
1439 1439
1440 1440 def _printdebug(self, item):
1441 1441 if self.unit:
1442 1442 unit = ' ' + self.unit
1443 1443 if item:
1444 1444 item = ' ' + item
1445 1445
1446 1446 if self.total:
1447 1447 pct = 100.0 * self.pos / self.total
1448 1448 self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n'
1449 1449 % (self.topic, item, self.pos, self.total, unit, pct))
1450 1450 else:
1451 1451 self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1452 1452
1453 1453 def gdinitconfig(ui):
1454 1454 """helper function to know if a repo should be created as general delta
1455 1455 """
1456 1456 # experimental config: format.generaldelta
1457 1457 return (ui.configbool('format', 'generaldelta')
1458 1458 or ui.configbool('format', 'usegeneraldelta'))
1459 1459
1460 1460 def gddeltaconfig(ui):
1461 1461 """helper function to know if incoming delta should be optimised
1462 1462 """
1463 1463 # experimental config: format.generaldelta
1464 1464 return ui.configbool('format', 'generaldelta')
1465 1465
1466 1466 class simplekeyvaluefile(object):
1467 1467 """A simple file with key=value lines
1468 1468
1469 1469 Keys must be alphanumerics and start with a letter, values must not
1470 1470 contain '\n' characters"""
1471 1471 firstlinekey = '__firstline'
1472 1472
1473 1473 def __init__(self, vfs, path, keys=None):
1474 1474 self.vfs = vfs
1475 1475 self.path = path
1476 1476
1477 1477 def read(self, firstlinenonkeyval=False):
1478 1478 """Read the contents of a simple key-value file
1479 1479
1480 1480 'firstlinenonkeyval' indicates whether the first line of file should
1481 1481 be treated as a key-value pair or reuturned fully under the
1482 1482 __firstline key."""
1483 1483 lines = self.vfs.readlines(self.path)
1484 1484 d = {}
1485 1485 if firstlinenonkeyval:
1486 1486 if not lines:
1487 1487 e = _("empty simplekeyvalue file")
1488 1488 raise error.CorruptedState(e)
1489 1489 # we don't want to include '\n' in the __firstline
1490 1490 d[self.firstlinekey] = lines[0][:-1]
1491 1491 del lines[0]
1492 1492
1493 1493 try:
1494 1494 # the 'if line.strip()' part prevents us from failing on empty
1495 1495 # lines which only contain '\n' therefore are not skipped
1496 1496 # by 'if line'
1497 1497 updatedict = dict(line[:-1].split('=', 1) for line in lines
1498 1498 if line.strip())
1499 1499 if self.firstlinekey in updatedict:
1500 1500 e = _("%r can't be used as a key")
1501 1501 raise error.CorruptedState(e % self.firstlinekey)
1502 1502 d.update(updatedict)
1503 1503 except ValueError as e:
1504 1504 raise error.CorruptedState(str(e))
1505 1505 return d
1506 1506
1507 1507 def write(self, data, firstline=None):
1508 1508 """Write key=>value mapping to a file
1509 1509 data is a dict. Keys must be alphanumerical and start with a letter.
1510 1510 Values must not contain newline characters.
1511 1511
1512 1512 If 'firstline' is not None, it is written to file before
1513 1513 everything else, as it is, not in a key=value form"""
1514 1514 lines = []
1515 1515 if firstline is not None:
1516 1516 lines.append('%s\n' % firstline)
1517 1517
1518 1518 for k, v in data.items():
1519 1519 if k == self.firstlinekey:
1520 1520 e = "key name '%s' is reserved" % self.firstlinekey
1521 1521 raise error.ProgrammingError(e)
1522 1522 if not k[0:1].isalpha():
1523 1523 e = "keys must start with a letter in a key-value file"
1524 1524 raise error.ProgrammingError(e)
1525 1525 if not k.isalnum():
1526 1526 e = "invalid key name in a simple key-value file"
1527 1527 raise error.ProgrammingError(e)
1528 1528 if '\n' in v:
1529 1529 e = "invalid value in a simple key-value file"
1530 1530 raise error.ProgrammingError(e)
1531 1531 lines.append("%s=%s\n" % (k, v))
1532 1532 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1533 1533 fp.write(''.join(lines))
1534 1534
1535 1535 _reportobsoletedsource = [
1536 1536 'debugobsolete',
1537 1537 'pull',
1538 1538 'push',
1539 1539 'serve',
1540 1540 'unbundle',
1541 1541 ]
1542 1542
1543 1543 _reportnewcssource = [
1544 1544 'pull',
1545 1545 'unbundle',
1546 1546 ]
1547 1547
1548 1548 def prefetchfiles(repo, revs, match):
1549 1549 """Invokes the registered file prefetch functions, allowing extensions to
1550 1550 ensure the corresponding files are available locally, before the command
1551 1551 uses them."""
1552 1552 if match:
1553 1553 # The command itself will complain about files that don't exist, so
1554 1554 # don't duplicate the message.
1555 1555 match = matchmod.badmatch(match, lambda fn, msg: None)
1556 1556 else:
1557 1557 match = matchall(repo)
1558 1558
1559 1559 fileprefetchhooks(repo, revs, match)
1560 1560
1561 1561 # a list of (repo, revs, match) prefetch functions
1562 1562 fileprefetchhooks = util.hooks()
1563 1563
1564 1564 # A marker that tells the evolve extension to suppress its own reporting
1565 1565 _reportstroubledchangesets = True
1566 1566
1567 1567 def registersummarycallback(repo, otr, txnname=''):
1568 1568 """register a callback to issue a summary after the transaction is closed
1569 1569 """
1570 1570 def txmatch(sources):
1571 1571 return any(txnname.startswith(source) for source in sources)
1572 1572
1573 1573 categories = []
1574 1574
1575 1575 def reportsummary(func):
1576 1576 """decorator for report callbacks."""
1577 1577 # The repoview life cycle is shorter than the one of the actual
1578 1578 # underlying repository. So the filtered object can die before the
1579 1579 # weakref is used leading to troubles. We keep a reference to the
1580 1580 # unfiltered object and restore the filtering when retrieving the
1581 1581 # repository through the weakref.
1582 1582 filtername = repo.filtername
1583 1583 reporef = weakref.ref(repo.unfiltered())
1584 1584 def wrapped(tr):
1585 1585 repo = reporef()
1586 1586 if filtername:
1587 1587 repo = repo.filtered(filtername)
1588 1588 func(repo, tr)
1589 1589 newcat = '%02i-txnreport' % len(categories)
1590 1590 otr.addpostclose(newcat, wrapped)
1591 1591 categories.append(newcat)
1592 1592 return wrapped
1593 1593
1594 1594 if txmatch(_reportobsoletedsource):
1595 1595 @reportsummary
1596 1596 def reportobsoleted(repo, tr):
1597 1597 obsoleted = obsutil.getobsoleted(repo, tr)
1598 1598 if obsoleted:
1599 1599 repo.ui.status(_('obsoleted %i changesets\n')
1600 1600 % len(obsoleted))
1601 1601
1602 1602 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1603 1603 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1604 1604 instabilitytypes = [
1605 1605 ('orphan', 'orphan'),
1606 1606 ('phase-divergent', 'phasedivergent'),
1607 1607 ('content-divergent', 'contentdivergent'),
1608 1608 ]
1609 1609
1610 1610 def getinstabilitycounts(repo):
1611 1611 filtered = repo.changelog.filteredrevs
1612 1612 counts = {}
1613 1613 for instability, revset in instabilitytypes:
1614 1614 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1615 1615 filtered)
1616 1616 return counts
1617 1617
1618 1618 oldinstabilitycounts = getinstabilitycounts(repo)
1619 1619 @reportsummary
1620 1620 def reportnewinstabilities(repo, tr):
1621 1621 newinstabilitycounts = getinstabilitycounts(repo)
1622 1622 for instability, revset in instabilitytypes:
1623 1623 delta = (newinstabilitycounts[instability] -
1624 1624 oldinstabilitycounts[instability])
1625 1625 msg = getinstabilitymessage(delta, instability)
1626 1626 if msg:
1627 1627 repo.ui.warn(msg)
1628 1628
1629 1629 if txmatch(_reportnewcssource):
1630 1630 @reportsummary
1631 1631 def reportnewcs(repo, tr):
1632 1632 """Report the range of new revisions pulled/unbundled."""
1633 1633 origrepolen = tr.changes.get('origrepolen', len(repo))
1634 1634 unfi = repo.unfiltered()
1635 1635 if origrepolen >= len(unfi):
1636 1636 return
1637 1637
1638 1638 # Compute the bounds of new visible revisions' range.
1639 1639 revs = smartset.spanset(repo, start=origrepolen)
1640 1640 if revs:
1641 1641 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1642 1642
1643 1643 if minrev == maxrev:
1644 1644 revrange = minrev
1645 1645 else:
1646 1646 revrange = '%s:%s' % (minrev, maxrev)
1647 1647 draft = len(repo.revs('%ld and draft()', revs))
1648 1648 secret = len(repo.revs('%ld and secret()', revs))
1649 1649 if not (draft or secret):
1650 1650 msg = _('new changesets %s\n') % revrange
1651 1651 elif draft and secret:
1652 1652 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1653 1653 msg %= (revrange, draft, secret)
1654 1654 elif draft:
1655 1655 msg = _('new changesets %s (%d drafts)\n')
1656 1656 msg %= (revrange, draft)
1657 1657 elif secret:
1658 1658 msg = _('new changesets %s (%d secrets)\n')
1659 1659 msg %= (revrange, secret)
1660 1660 else:
1661 1661 errormsg = 'entered unreachable condition'
1662 1662 raise error.ProgrammingError(errormsg)
1663 1663 repo.ui.status(msg)
1664 1664
1665 1665 # search new changesets directly pulled as obsolete
1666 1666 duplicates = tr.changes.get('revduplicates', ())
1667 1667 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1668 1668 origrepolen, duplicates)
1669 1669 cl = repo.changelog
1670 1670 extinctadded = [r for r in obsadded if r not in cl]
1671 1671 if extinctadded:
1672 1672 # They are not just obsolete, but obsolete and invisible
1673 1673 # we call them "extinct" internally but the terms have not been
1674 1674 # exposed to users.
1675 1675 msg = '(%d other changesets obsolete on arrival)\n'
1676 1676 repo.ui.status(msg % len(extinctadded))
1677 1677
1678 1678 @reportsummary
1679 1679 def reportphasechanges(repo, tr):
1680 1680 """Report statistics of phase changes for changesets pre-existing
1681 1681 pull/unbundle.
1682 1682 """
1683 1683 origrepolen = tr.changes.get('origrepolen', len(repo))
1684 1684 phasetracking = tr.changes.get('phases', {})
1685 1685 if not phasetracking:
1686 1686 return
1687 1687 published = [
1688 1688 rev for rev, (old, new) in phasetracking.iteritems()
1689 1689 if new == phases.public and rev < origrepolen
1690 1690 ]
1691 1691 if not published:
1692 1692 return
1693 1693 repo.ui.status(_('%d local changesets published\n')
1694 1694 % len(published))
1695 1695
1696 1696 def getinstabilitymessage(delta, instability):
1697 1697 """function to return the message to show warning about new instabilities
1698 1698
1699 1699 exists as a separate function so that extension can wrap to show more
1700 1700 information like how to fix instabilities"""
1701 1701 if delta > 0:
1702 1702 return _('%i new %s changesets\n') % (delta, instability)
1703 1703
1704 1704 def nodesummaries(repo, nodes, maxnumnodes=4):
1705 1705 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1706 1706 return ' '.join(short(h) for h in nodes)
1707 1707 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1708 1708 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1709 1709
1710 1710 def enforcesinglehead(repo, tr, desc):
1711 1711 """check that no named branch has multiple heads"""
1712 1712 if desc in ('strip', 'repair'):
1713 1713 # skip the logic during strip
1714 1714 return
1715 1715 visible = repo.filtered('visible')
1716 1716 # possible improvement: we could restrict the check to affected branch
1717 1717 for name, heads in visible.branchmap().iteritems():
1718 1718 if len(heads) > 1:
1719 1719 msg = _('rejecting multiple heads on branch "%s"')
1720 1720 msg %= name
1721 1721 hint = _('%d heads: %s')
1722 1722 hint %= (len(heads), nodesummaries(repo, heads))
1723 1723 raise error.Abort(msg, hint=hint)
1724 1724
1725 1725 def wrapconvertsink(sink):
1726 1726 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1727 1727 before it is used, whether or not the convert extension was formally loaded.
1728 1728 """
1729 1729 return sink
1730 1730
1731 1731 def unhidehashlikerevs(repo, specs, hiddentype):
1732 1732 """parse the user specs and unhide changesets whose hash or revision number
1733 1733 is passed.
1734 1734
1735 1735 hiddentype can be: 1) 'warn': warn while unhiding changesets
1736 1736 2) 'nowarn': don't warn while unhiding changesets
1737 1737
1738 1738 returns a repo object with the required changesets unhidden
1739 1739 """
1740 1740 if not repo.filtername or not repo.ui.configbool('experimental',
1741 1741 'directaccess'):
1742 1742 return repo
1743 1743
1744 1744 if repo.filtername not in ('visible', 'visible-hidden'):
1745 1745 return repo
1746 1746
1747 1747 symbols = set()
1748 1748 for spec in specs:
1749 1749 try:
1750 1750 tree = revsetlang.parse(spec)
1751 1751 except error.ParseError: # will be reported by scmutil.revrange()
1752 1752 continue
1753 1753
1754 1754 symbols.update(revsetlang.gethashlikesymbols(tree))
1755 1755
1756 1756 if not symbols:
1757 1757 return repo
1758 1758
1759 1759 revs = _getrevsfromsymbols(repo, symbols)
1760 1760
1761 1761 if not revs:
1762 1762 return repo
1763 1763
1764 1764 if hiddentype == 'warn':
1765 1765 unfi = repo.unfiltered()
1766 1766 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1767 1767 repo.ui.warn(_("warning: accessing hidden changesets for write "
1768 1768 "operation: %s\n") % revstr)
1769 1769
1770 1770 # we have to use new filtername to separate branch/tags cache until we can
1771 1771 # disbale these cache when revisions are dynamically pinned.
1772 1772 return repo.filtered('visible-hidden', revs)
1773 1773
1774 1774 def _getrevsfromsymbols(repo, symbols):
1775 1775 """parse the list of symbols and returns a set of revision numbers of hidden
1776 1776 changesets present in symbols"""
1777 1777 revs = set()
1778 1778 unfi = repo.unfiltered()
1779 1779 unficl = unfi.changelog
1780 1780 cl = repo.changelog
1781 1781 tiprev = len(unficl)
1782 1782 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1783 1783 for s in symbols:
1784 1784 try:
1785 1785 n = int(s)
1786 1786 if n <= tiprev:
1787 1787 if not allowrevnums:
1788 1788 continue
1789 1789 else:
1790 1790 if n not in cl:
1791 1791 revs.add(n)
1792 1792 continue
1793 1793 except ValueError:
1794 1794 pass
1795 1795
1796 1796 try:
1797 1797 s = resolvehexnodeidprefix(unfi, s)
1798 1798 except (error.LookupError, error.WdirUnsupported):
1799 1799 s = None
1800 1800
1801 1801 if s is not None:
1802 1802 rev = unficl.rev(s)
1803 1803 if rev not in cl:
1804 1804 revs.add(rev)
1805 1805
1806 1806 return revs
1807 1807
1808 1808 def bookmarkrevs(repo, mark):
1809 1809 """
1810 1810 Select revisions reachable by a given bookmark
1811 1811 """
1812 1812 return repo.revs("ancestors(bookmark(%s)) - "
1813 1813 "ancestors(head() and not bookmark(%s)) - "
1814 1814 "ancestors(bookmark() and not bookmark(%s))",
1815 1815 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now