##// END OF EJS Templates
progress: split up _print() method in bar-updating and debug-printing...
Martin von Zweigbergk -
r41180:3025fd3c default
parent child Browse files
Show More
@@ -1,1839 +1,1839 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29
30 30 from . import (
31 31 encoding,
32 32 error,
33 33 match as matchmod,
34 34 obsolete,
35 35 obsutil,
36 36 pathutil,
37 37 phases,
38 38 policy,
39 39 pycompat,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 procutil,
50 50 stringutil,
51 51 )
52 52
53 53 if pycompat.iswindows:
54 54 from . import scmwindows as scmplatform
55 55 else:
56 56 from . import scmposix as scmplatform
57 57
58 58 parsers = policy.importmod(r'parsers')
59 59
60 60 termsize = scmplatform.termsize
61 61
62 62 class status(tuple):
63 63 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 64 and 'ignored' properties are only relevant to the working copy.
65 65 '''
66 66
67 67 __slots__ = ()
68 68
69 69 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 70 clean):
71 71 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 72 ignored, clean))
73 73
74 74 @property
75 75 def modified(self):
76 76 '''files that have been modified'''
77 77 return self[0]
78 78
79 79 @property
80 80 def added(self):
81 81 '''files that have been added'''
82 82 return self[1]
83 83
84 84 @property
85 85 def removed(self):
86 86 '''files that have been removed'''
87 87 return self[2]
88 88
89 89 @property
90 90 def deleted(self):
91 91 '''files that are in the dirstate, but have been deleted from the
92 92 working copy (aka "missing")
93 93 '''
94 94 return self[3]
95 95
96 96 @property
97 97 def unknown(self):
98 98 '''files not in the dirstate that are not ignored'''
99 99 return self[4]
100 100
101 101 @property
102 102 def ignored(self):
103 103 '''files not in the dirstate that are ignored (by _dirignore())'''
104 104 return self[5]
105 105
106 106 @property
107 107 def clean(self):
108 108 '''files that have not been modified'''
109 109 return self[6]
110 110
111 111 def __repr__(self, *args, **kwargs):
112 112 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 113 r'unknown=%s, ignored=%s, clean=%s>') %
114 114 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115 115
116 116 def itersubrepos(ctx1, ctx2):
117 117 """find subrepos in ctx1 or ctx2"""
118 118 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 119 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 120 # has been modified (in ctx2) but not yet committed (in ctx1).
121 121 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 122 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123 123
124 124 missing = set()
125 125
126 126 for subpath in ctx2.substate:
127 127 if subpath not in ctx1.substate:
128 128 del subpaths[subpath]
129 129 missing.add(subpath)
130 130
131 131 for subpath, ctx in sorted(subpaths.iteritems()):
132 132 yield subpath, ctx.sub(subpath)
133 133
134 134 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 135 # status and diff will have an accurate result when it does
136 136 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 137 # against itself.
138 138 for subpath in missing:
139 139 yield subpath, ctx2.nullsub(subpath, ctx1)
140 140
141 141 def nochangesfound(ui, repo, excluded=None):
142 142 '''Report no changes for push/pull, excluded is None or a list of
143 143 nodes excluded from the push/pull.
144 144 '''
145 145 secretlist = []
146 146 if excluded:
147 147 for n in excluded:
148 148 ctx = repo[n]
149 149 if ctx.phase() >= phases.secret and not ctx.extinct():
150 150 secretlist.append(n)
151 151
152 152 if secretlist:
153 153 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 154 % len(secretlist))
155 155 else:
156 156 ui.status(_("no changes found\n"))
157 157
158 158 def callcatch(ui, func):
159 159 """call func() with global exception handling
160 160
161 161 return func() if no exception happens. otherwise do some error handling
162 162 and return an exit code accordingly. does not handle all exceptions.
163 163 """
164 164 try:
165 165 try:
166 166 return func()
167 167 except: # re-raises
168 168 ui.traceback()
169 169 raise
170 170 # Global exception handling, alphabetically
171 171 # Mercurial-specific first, followed by built-in and library exceptions
172 172 except error.LockHeld as inst:
173 173 if inst.errno == errno.ETIMEDOUT:
174 174 reason = _('timed out waiting for lock held by %r') % (
175 175 pycompat.bytestr(inst.locker))
176 176 else:
177 177 reason = _('lock held by %r') % inst.locker
178 178 ui.error(_("abort: %s: %s\n") % (
179 179 inst.desc or stringutil.forcebytestr(inst.filename), reason))
180 180 if not inst.locker:
181 181 ui.error(_("(lock might be very busy)\n"))
182 182 except error.LockUnavailable as inst:
183 183 ui.error(_("abort: could not lock %s: %s\n") %
184 184 (inst.desc or stringutil.forcebytestr(inst.filename),
185 185 encoding.strtolocal(inst.strerror)))
186 186 except error.OutOfBandError as inst:
187 187 if inst.args:
188 188 msg = _("abort: remote error:\n")
189 189 else:
190 190 msg = _("abort: remote error\n")
191 191 ui.error(msg)
192 192 if inst.args:
193 193 ui.error(''.join(inst.args))
194 194 if inst.hint:
195 195 ui.error('(%s)\n' % inst.hint)
196 196 except error.RepoError as inst:
197 197 ui.error(_("abort: %s!\n") % inst)
198 198 if inst.hint:
199 199 ui.error(_("(%s)\n") % inst.hint)
200 200 except error.ResponseError as inst:
201 201 ui.error(_("abort: %s") % inst.args[0])
202 202 msg = inst.args[1]
203 203 if isinstance(msg, type(u'')):
204 204 msg = pycompat.sysbytes(msg)
205 205 if not isinstance(msg, bytes):
206 206 ui.error(" %r\n" % (msg,))
207 207 elif not msg:
208 208 ui.error(_(" empty string\n"))
209 209 else:
210 210 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
211 211 except error.CensoredNodeError as inst:
212 212 ui.error(_("abort: file censored %s!\n") % inst)
213 213 except error.StorageError as inst:
214 214 ui.error(_("abort: %s!\n") % inst)
215 215 if inst.hint:
216 216 ui.error(_("(%s)\n") % inst.hint)
217 217 except error.InterventionRequired as inst:
218 218 ui.error("%s\n" % inst)
219 219 if inst.hint:
220 220 ui.error(_("(%s)\n") % inst.hint)
221 221 return 1
222 222 except error.WdirUnsupported:
223 223 ui.error(_("abort: working directory revision cannot be specified\n"))
224 224 except error.Abort as inst:
225 225 ui.error(_("abort: %s\n") % inst)
226 226 if inst.hint:
227 227 ui.error(_("(%s)\n") % inst.hint)
228 228 except ImportError as inst:
229 229 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
230 230 m = stringutil.forcebytestr(inst).split()[-1]
231 231 if m in "mpatch bdiff".split():
232 232 ui.error(_("(did you forget to compile extensions?)\n"))
233 233 elif m in "zlib".split():
234 234 ui.error(_("(is your Python install correct?)\n"))
235 235 except IOError as inst:
236 236 if util.safehasattr(inst, "code"):
237 237 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
238 238 elif util.safehasattr(inst, "reason"):
239 239 try: # usually it is in the form (errno, strerror)
240 240 reason = inst.reason.args[1]
241 241 except (AttributeError, IndexError):
242 242 # it might be anything, for example a string
243 243 reason = inst.reason
244 244 if isinstance(reason, pycompat.unicode):
245 245 # SSLError of Python 2.7.9 contains a unicode
246 246 reason = encoding.unitolocal(reason)
247 247 ui.error(_("abort: error: %s\n") % reason)
248 248 elif (util.safehasattr(inst, "args")
249 249 and inst.args and inst.args[0] == errno.EPIPE):
250 250 pass
251 251 elif getattr(inst, "strerror", None):
252 252 if getattr(inst, "filename", None):
253 253 ui.error(_("abort: %s: %s\n") % (
254 254 encoding.strtolocal(inst.strerror),
255 255 stringutil.forcebytestr(inst.filename)))
256 256 else:
257 257 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 258 else:
259 259 raise
260 260 except OSError as inst:
261 261 if getattr(inst, "filename", None) is not None:
262 262 ui.error(_("abort: %s: '%s'\n") % (
263 263 encoding.strtolocal(inst.strerror),
264 264 stringutil.forcebytestr(inst.filename)))
265 265 else:
266 266 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
267 267 except MemoryError:
268 268 ui.error(_("abort: out of memory\n"))
269 269 except SystemExit as inst:
270 270 # Commands shouldn't sys.exit directly, but give a return code.
271 271 # Just in case catch this and and pass exit code to caller.
272 272 return inst.code
273 273 except socket.error as inst:
274 274 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
275 275
276 276 return -1
277 277
278 278 def checknewlabel(repo, lbl, kind):
279 279 # Do not use the "kind" parameter in ui output.
280 280 # It makes strings difficult to translate.
281 281 if lbl in ['tip', '.', 'null']:
282 282 raise error.Abort(_("the name '%s' is reserved") % lbl)
283 283 for c in (':', '\0', '\n', '\r'):
284 284 if c in lbl:
285 285 raise error.Abort(
286 286 _("%r cannot be used in a name") % pycompat.bytestr(c))
287 287 try:
288 288 int(lbl)
289 289 raise error.Abort(_("cannot use an integer as a name"))
290 290 except ValueError:
291 291 pass
292 292 if lbl.strip() != lbl:
293 293 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
294 294
295 295 def checkfilename(f):
296 296 '''Check that the filename f is an acceptable filename for a tracked file'''
297 297 if '\r' in f or '\n' in f:
298 298 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
299 299 % pycompat.bytestr(f))
300 300
301 301 def checkportable(ui, f):
302 302 '''Check if filename f is portable and warn or abort depending on config'''
303 303 checkfilename(f)
304 304 abort, warn = checkportabilityalert(ui)
305 305 if abort or warn:
306 306 msg = util.checkwinfilename(f)
307 307 if msg:
308 308 msg = "%s: %s" % (msg, procutil.shellquote(f))
309 309 if abort:
310 310 raise error.Abort(msg)
311 311 ui.warn(_("warning: %s\n") % msg)
312 312
313 313 def checkportabilityalert(ui):
314 314 '''check if the user's config requests nothing, a warning, or abort for
315 315 non-portable filenames'''
316 316 val = ui.config('ui', 'portablefilenames')
317 317 lval = val.lower()
318 318 bval = stringutil.parsebool(val)
319 319 abort = pycompat.iswindows or lval == 'abort'
320 320 warn = bval or lval == 'warn'
321 321 if bval is None and not (warn or abort or lval == 'ignore'):
322 322 raise error.ConfigError(
323 323 _("ui.portablefilenames value is invalid ('%s')") % val)
324 324 return abort, warn
325 325
326 326 class casecollisionauditor(object):
327 327 def __init__(self, ui, abort, dirstate):
328 328 self._ui = ui
329 329 self._abort = abort
330 330 allfiles = '\0'.join(dirstate._map)
331 331 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
332 332 self._dirstate = dirstate
333 333 # The purpose of _newfiles is so that we don't complain about
334 334 # case collisions if someone were to call this object with the
335 335 # same filename twice.
336 336 self._newfiles = set()
337 337
338 338 def __call__(self, f):
339 339 if f in self._newfiles:
340 340 return
341 341 fl = encoding.lower(f)
342 342 if fl in self._loweredfiles and f not in self._dirstate:
343 343 msg = _('possible case-folding collision for %s') % f
344 344 if self._abort:
345 345 raise error.Abort(msg)
346 346 self._ui.warn(_("warning: %s\n") % msg)
347 347 self._loweredfiles.add(fl)
348 348 self._newfiles.add(f)
349 349
350 350 def filteredhash(repo, maxrev):
351 351 """build hash of filtered revisions in the current repoview.
352 352
353 353 Multiple caches perform up-to-date validation by checking that the
354 354 tiprev and tipnode stored in the cache file match the current repository.
355 355 However, this is not sufficient for validating repoviews because the set
356 356 of revisions in the view may change without the repository tiprev and
357 357 tipnode changing.
358 358
359 359 This function hashes all the revs filtered from the view and returns
360 360 that SHA-1 digest.
361 361 """
362 362 cl = repo.changelog
363 363 if not cl.filteredrevs:
364 364 return None
365 365 key = None
366 366 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
367 367 if revs:
368 368 s = hashlib.sha1()
369 369 for rev in revs:
370 370 s.update('%d;' % rev)
371 371 key = s.digest()
372 372 return key
373 373
374 374 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
375 375 '''yield every hg repository under path, always recursively.
376 376 The recurse flag will only control recursion into repo working dirs'''
377 377 def errhandler(err):
378 378 if err.filename == path:
379 379 raise err
380 380 samestat = getattr(os.path, 'samestat', None)
381 381 if followsym and samestat is not None:
382 382 def adddir(dirlst, dirname):
383 383 dirstat = os.stat(dirname)
384 384 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
385 385 if not match:
386 386 dirlst.append(dirstat)
387 387 return not match
388 388 else:
389 389 followsym = False
390 390
391 391 if (seen_dirs is None) and followsym:
392 392 seen_dirs = []
393 393 adddir(seen_dirs, path)
394 394 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
395 395 dirs.sort()
396 396 if '.hg' in dirs:
397 397 yield root # found a repository
398 398 qroot = os.path.join(root, '.hg', 'patches')
399 399 if os.path.isdir(os.path.join(qroot, '.hg')):
400 400 yield qroot # we have a patch queue repo here
401 401 if recurse:
402 402 # avoid recursing inside the .hg directory
403 403 dirs.remove('.hg')
404 404 else:
405 405 dirs[:] = [] # don't descend further
406 406 elif followsym:
407 407 newdirs = []
408 408 for d in dirs:
409 409 fname = os.path.join(root, d)
410 410 if adddir(seen_dirs, fname):
411 411 if os.path.islink(fname):
412 412 for hgname in walkrepos(fname, True, seen_dirs):
413 413 yield hgname
414 414 else:
415 415 newdirs.append(d)
416 416 dirs[:] = newdirs
417 417
418 418 def binnode(ctx):
419 419 """Return binary node id for a given basectx"""
420 420 node = ctx.node()
421 421 if node is None:
422 422 return wdirid
423 423 return node
424 424
425 425 def intrev(ctx):
426 426 """Return integer for a given basectx that can be used in comparison or
427 427 arithmetic operation"""
428 428 rev = ctx.rev()
429 429 if rev is None:
430 430 return wdirrev
431 431 return rev
432 432
433 433 def formatchangeid(ctx):
434 434 """Format changectx as '{rev}:{node|formatnode}', which is the default
435 435 template provided by logcmdutil.changesettemplater"""
436 436 repo = ctx.repo()
437 437 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
438 438
439 439 def formatrevnode(ui, rev, node):
440 440 """Format given revision and node depending on the current verbosity"""
441 441 if ui.debugflag:
442 442 hexfunc = hex
443 443 else:
444 444 hexfunc = short
445 445 return '%d:%s' % (rev, hexfunc(node))
446 446
447 447 def resolvehexnodeidprefix(repo, prefix):
448 448 if (prefix.startswith('x') and
449 449 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
450 450 prefix = prefix[1:]
451 451 try:
452 452 # Uses unfiltered repo because it's faster when prefix is ambiguous/
453 453 # This matches the shortesthexnodeidprefix() function below.
454 454 node = repo.unfiltered().changelog._partialmatch(prefix)
455 455 except error.AmbiguousPrefixLookupError:
456 456 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
457 457 if revset:
458 458 # Clear config to avoid infinite recursion
459 459 configoverrides = {('experimental',
460 460 'revisions.disambiguatewithin'): None}
461 461 with repo.ui.configoverride(configoverrides):
462 462 revs = repo.anyrevs([revset], user=True)
463 463 matches = []
464 464 for rev in revs:
465 465 node = repo.changelog.node(rev)
466 466 if hex(node).startswith(prefix):
467 467 matches.append(node)
468 468 if len(matches) == 1:
469 469 return matches[0]
470 470 raise
471 471 if node is None:
472 472 return
473 473 repo.changelog.rev(node) # make sure node isn't filtered
474 474 return node
475 475
476 476 def mayberevnum(repo, prefix):
477 477 """Checks if the given prefix may be mistaken for a revision number"""
478 478 try:
479 479 i = int(prefix)
480 480 # if we are a pure int, then starting with zero will not be
481 481 # confused as a rev; or, obviously, if the int is larger
482 482 # than the value of the tip rev. We still need to disambiguate if
483 483 # prefix == '0', since that *is* a valid revnum.
484 484 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
485 485 return False
486 486 return True
487 487 except ValueError:
488 488 return False
489 489
490 490 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
491 491 """Find the shortest unambiguous prefix that matches hexnode.
492 492
493 493 If "cache" is not None, it must be a dictionary that can be used for
494 494 caching between calls to this method.
495 495 """
496 496 # _partialmatch() of filtered changelog could take O(len(repo)) time,
497 497 # which would be unacceptably slow. so we look for hash collision in
498 498 # unfiltered space, which means some hashes may be slightly longer.
499 499
500 500 minlength=max(minlength, 1)
501 501
502 502 def disambiguate(prefix):
503 503 """Disambiguate against revnums."""
504 504 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
505 505 if mayberevnum(repo, prefix):
506 506 return 'x' + prefix
507 507 else:
508 508 return prefix
509 509
510 510 hexnode = hex(node)
511 511 for length in range(len(prefix), len(hexnode) + 1):
512 512 prefix = hexnode[:length]
513 513 if not mayberevnum(repo, prefix):
514 514 return prefix
515 515
516 516 cl = repo.unfiltered().changelog
517 517 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
518 518 if revset:
519 519 revs = None
520 520 if cache is not None:
521 521 revs = cache.get('disambiguationrevset')
522 522 if revs is None:
523 523 revs = repo.anyrevs([revset], user=True)
524 524 if cache is not None:
525 525 cache['disambiguationrevset'] = revs
526 526 if cl.rev(node) in revs:
527 527 hexnode = hex(node)
528 528 nodetree = None
529 529 if cache is not None:
530 530 nodetree = cache.get('disambiguationnodetree')
531 531 if not nodetree:
532 532 try:
533 533 nodetree = parsers.nodetree(cl.index, len(revs))
534 534 except AttributeError:
535 535 # no native nodetree
536 536 pass
537 537 else:
538 538 for r in revs:
539 539 nodetree.insert(r)
540 540 if cache is not None:
541 541 cache['disambiguationnodetree'] = nodetree
542 542 if nodetree is not None:
543 543 length = max(nodetree.shortest(node), minlength)
544 544 prefix = hexnode[:length]
545 545 return disambiguate(prefix)
546 546 for length in range(minlength, len(hexnode) + 1):
547 547 matches = []
548 548 prefix = hexnode[:length]
549 549 for rev in revs:
550 550 otherhexnode = repo[rev].hex()
551 551 if prefix == otherhexnode[:length]:
552 552 matches.append(otherhexnode)
553 553 if len(matches) == 1:
554 554 return disambiguate(prefix)
555 555
556 556 try:
557 557 return disambiguate(cl.shortest(node, minlength))
558 558 except error.LookupError:
559 559 raise error.RepoLookupError()
560 560
561 561 def isrevsymbol(repo, symbol):
562 562 """Checks if a symbol exists in the repo.
563 563
564 564 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
565 565 symbol is an ambiguous nodeid prefix.
566 566 """
567 567 try:
568 568 revsymbol(repo, symbol)
569 569 return True
570 570 except error.RepoLookupError:
571 571 return False
572 572
573 573 def revsymbol(repo, symbol):
574 574 """Returns a context given a single revision symbol (as string).
575 575
576 576 This is similar to revsingle(), but accepts only a single revision symbol,
577 577 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
578 578 not "max(public())".
579 579 """
580 580 if not isinstance(symbol, bytes):
581 581 msg = ("symbol (%s of type %s) was not a string, did you mean "
582 582 "repo[symbol]?" % (symbol, type(symbol)))
583 583 raise error.ProgrammingError(msg)
584 584 try:
585 585 if symbol in ('.', 'tip', 'null'):
586 586 return repo[symbol]
587 587
588 588 try:
589 589 r = int(symbol)
590 590 if '%d' % r != symbol:
591 591 raise ValueError
592 592 l = len(repo.changelog)
593 593 if r < 0:
594 594 r += l
595 595 if r < 0 or r >= l and r != wdirrev:
596 596 raise ValueError
597 597 return repo[r]
598 598 except error.FilteredIndexError:
599 599 raise
600 600 except (ValueError, OverflowError, IndexError):
601 601 pass
602 602
603 603 if len(symbol) == 40:
604 604 try:
605 605 node = bin(symbol)
606 606 rev = repo.changelog.rev(node)
607 607 return repo[rev]
608 608 except error.FilteredLookupError:
609 609 raise
610 610 except (TypeError, LookupError):
611 611 pass
612 612
613 613 # look up bookmarks through the name interface
614 614 try:
615 615 node = repo.names.singlenode(repo, symbol)
616 616 rev = repo.changelog.rev(node)
617 617 return repo[rev]
618 618 except KeyError:
619 619 pass
620 620
621 621 node = resolvehexnodeidprefix(repo, symbol)
622 622 if node is not None:
623 623 rev = repo.changelog.rev(node)
624 624 return repo[rev]
625 625
626 626 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
627 627
628 628 except error.WdirUnsupported:
629 629 return repo[None]
630 630 except (error.FilteredIndexError, error.FilteredLookupError,
631 631 error.FilteredRepoLookupError):
632 632 raise _filterederror(repo, symbol)
633 633
634 634 def _filterederror(repo, changeid):
635 635 """build an exception to be raised about a filtered changeid
636 636
637 637 This is extracted in a function to help extensions (eg: evolve) to
638 638 experiment with various message variants."""
639 639 if repo.filtername.startswith('visible'):
640 640
641 641 # Check if the changeset is obsolete
642 642 unfilteredrepo = repo.unfiltered()
643 643 ctx = revsymbol(unfilteredrepo, changeid)
644 644
645 645 # If the changeset is obsolete, enrich the message with the reason
646 646 # that made this changeset not visible
647 647 if ctx.obsolete():
648 648 msg = obsutil._getfilteredreason(repo, changeid, ctx)
649 649 else:
650 650 msg = _("hidden revision '%s'") % changeid
651 651
652 652 hint = _('use --hidden to access hidden revisions')
653 653
654 654 return error.FilteredRepoLookupError(msg, hint=hint)
655 655 msg = _("filtered revision '%s' (not in '%s' subset)")
656 656 msg %= (changeid, repo.filtername)
657 657 return error.FilteredRepoLookupError(msg)
658 658
659 659 def revsingle(repo, revspec, default='.', localalias=None):
660 660 if not revspec and revspec != 0:
661 661 return repo[default]
662 662
663 663 l = revrange(repo, [revspec], localalias=localalias)
664 664 if not l:
665 665 raise error.Abort(_('empty revision set'))
666 666 return repo[l.last()]
667 667
668 668 def _pairspec(revspec):
669 669 tree = revsetlang.parse(revspec)
670 670 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
671 671
672 672 def revpair(repo, revs):
673 673 if not revs:
674 674 return repo['.'], repo[None]
675 675
676 676 l = revrange(repo, revs)
677 677
678 678 if not l:
679 679 first = second = None
680 680 elif l.isascending():
681 681 first = l.min()
682 682 second = l.max()
683 683 elif l.isdescending():
684 684 first = l.max()
685 685 second = l.min()
686 686 else:
687 687 first = l.first()
688 688 second = l.last()
689 689
690 690 if first is None:
691 691 raise error.Abort(_('empty revision range'))
692 692 if (first == second and len(revs) >= 2
693 693 and not all(revrange(repo, [r]) for r in revs)):
694 694 raise error.Abort(_('empty revision on one side of range'))
695 695
696 696 # if top-level is range expression, the result must always be a pair
697 697 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
698 698 return repo[first], repo[None]
699 699
700 700 return repo[first], repo[second]
701 701
702 702 def revrange(repo, specs, localalias=None):
703 703 """Execute 1 to many revsets and return the union.
704 704
705 705 This is the preferred mechanism for executing revsets using user-specified
706 706 config options, such as revset aliases.
707 707
708 708 The revsets specified by ``specs`` will be executed via a chained ``OR``
709 709 expression. If ``specs`` is empty, an empty result is returned.
710 710
711 711 ``specs`` can contain integers, in which case they are assumed to be
712 712 revision numbers.
713 713
714 714 It is assumed the revsets are already formatted. If you have arguments
715 715 that need to be expanded in the revset, call ``revsetlang.formatspec()``
716 716 and pass the result as an element of ``specs``.
717 717
718 718 Specifying a single revset is allowed.
719 719
720 720 Returns a ``revset.abstractsmartset`` which is a list-like interface over
721 721 integer revisions.
722 722 """
723 723 allspecs = []
724 724 for spec in specs:
725 725 if isinstance(spec, int):
726 726 spec = revsetlang.formatspec('rev(%d)', spec)
727 727 allspecs.append(spec)
728 728 return repo.anyrevs(allspecs, user=True, localalias=localalias)
729 729
730 730 def meaningfulparents(repo, ctx):
731 731 """Return list of meaningful (or all if debug) parentrevs for rev.
732 732
733 733 For merges (two non-nullrev revisions) both parents are meaningful.
734 734 Otherwise the first parent revision is considered meaningful if it
735 735 is not the preceding revision.
736 736 """
737 737 parents = ctx.parents()
738 738 if len(parents) > 1:
739 739 return parents
740 740 if repo.ui.debugflag:
741 741 return [parents[0], repo[nullrev]]
742 742 if parents[0].rev() >= intrev(ctx) - 1:
743 743 return []
744 744 return parents
745 745
746 746 def expandpats(pats):
747 747 '''Expand bare globs when running on windows.
748 748 On posix we assume it already has already been done by sh.'''
749 749 if not util.expandglobs:
750 750 return list(pats)
751 751 ret = []
752 752 for kindpat in pats:
753 753 kind, pat = matchmod._patsplit(kindpat, None)
754 754 if kind is None:
755 755 try:
756 756 globbed = glob.glob(pat)
757 757 except re.error:
758 758 globbed = [pat]
759 759 if globbed:
760 760 ret.extend(globbed)
761 761 continue
762 762 ret.append(kindpat)
763 763 return ret
764 764
765 765 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
766 766 badfn=None):
767 767 '''Return a matcher and the patterns that were used.
768 768 The matcher will warn about bad matches, unless an alternate badfn callback
769 769 is provided.'''
770 770 if pats == ("",):
771 771 pats = []
772 772 if opts is None:
773 773 opts = {}
774 774 if not globbed and default == 'relpath':
775 775 pats = expandpats(pats or [])
776 776
777 777 def bad(f, msg):
778 778 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
779 779
780 780 if badfn is None:
781 781 badfn = bad
782 782
783 783 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
784 784 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
785 785
786 786 if m.always():
787 787 pats = []
788 788 return m, pats
789 789
790 790 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
791 791 badfn=None):
792 792 '''Return a matcher that will warn about bad matches.'''
793 793 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
794 794
795 795 def matchall(repo):
796 796 '''Return a matcher that will efficiently match everything.'''
797 797 return matchmod.always(repo.root, repo.getcwd())
798 798
799 799 def matchfiles(repo, files, badfn=None):
800 800 '''Return a matcher that will efficiently match exactly these files.'''
801 801 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
802 802
803 803 def parsefollowlinespattern(repo, rev, pat, msg):
804 804 """Return a file name from `pat` pattern suitable for usage in followlines
805 805 logic.
806 806 """
807 807 if not matchmod.patkind(pat):
808 808 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
809 809 else:
810 810 ctx = repo[rev]
811 811 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
812 812 files = [f for f in ctx if m(f)]
813 813 if len(files) != 1:
814 814 raise error.ParseError(msg)
815 815 return files[0]
816 816
817 817 def getorigvfs(ui, repo):
818 818 """return a vfs suitable to save 'orig' file
819 819
820 820 return None if no special directory is configured"""
821 821 origbackuppath = ui.config('ui', 'origbackuppath')
822 822 if not origbackuppath:
823 823 return None
824 824 return vfs.vfs(repo.wvfs.join(origbackuppath))
825 825
826 826 def origpath(ui, repo, filepath):
827 827 '''customize where .orig files are created
828 828
829 829 Fetch user defined path from config file: [ui] origbackuppath = <path>
830 830 Fall back to default (filepath with .orig suffix) if not specified
831 831 '''
832 832 origvfs = getorigvfs(ui, repo)
833 833 if origvfs is None:
834 834 return filepath + ".orig"
835 835
836 836 # Convert filepath from an absolute path into a path inside the repo.
837 837 filepathfromroot = util.normpath(os.path.relpath(filepath,
838 838 start=repo.root))
839 839
840 840 origbackupdir = origvfs.dirname(filepathfromroot)
841 841 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
842 842 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
843 843
844 844 # Remove any files that conflict with the backup file's path
845 845 for f in reversed(list(util.finddirs(filepathfromroot))):
846 846 if origvfs.isfileorlink(f):
847 847 ui.note(_('removing conflicting file: %s\n')
848 848 % origvfs.join(f))
849 849 origvfs.unlink(f)
850 850 break
851 851
852 852 origvfs.makedirs(origbackupdir)
853 853
854 854 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
855 855 ui.note(_('removing conflicting directory: %s\n')
856 856 % origvfs.join(filepathfromroot))
857 857 origvfs.rmtree(filepathfromroot, forcibly=True)
858 858
859 859 return origvfs.join(filepathfromroot)
860 860
861 861 class _containsnode(object):
862 862 """proxy __contains__(node) to container.__contains__ which accepts revs"""
863 863
864 864 def __init__(self, repo, revcontainer):
865 865 self._torev = repo.changelog.rev
866 866 self._revcontains = revcontainer.__contains__
867 867
868 868 def __contains__(self, node):
869 869 return self._revcontains(self._torev(node))
870 870
871 871 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
872 872 fixphase=False, targetphase=None, backup=True):
873 873 """do common cleanups when old nodes are replaced by new nodes
874 874
875 875 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
876 876 (we might also want to move working directory parent in the future)
877 877
878 878 By default, bookmark moves are calculated automatically from 'replacements',
879 879 but 'moves' can be used to override that. Also, 'moves' may include
880 880 additional bookmark moves that should not have associated obsmarkers.
881 881
882 882 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
883 883 have replacements. operation is a string, like "rebase".
884 884
885 885 metadata is dictionary containing metadata to be stored in obsmarker if
886 886 obsolescence is enabled.
887 887 """
888 888 assert fixphase or targetphase is None
889 889 if not replacements and not moves:
890 890 return
891 891
892 892 # translate mapping's other forms
893 893 if not util.safehasattr(replacements, 'items'):
894 894 replacements = {(n,): () for n in replacements}
895 895 else:
896 896 # upgrading non tuple "source" to tuple ones for BC
897 897 repls = {}
898 898 for key, value in replacements.items():
899 899 if not isinstance(key, tuple):
900 900 key = (key,)
901 901 repls[key] = value
902 902 replacements = repls
903 903
904 904 # Unfiltered repo is needed since nodes in replacements might be hidden.
905 905 unfi = repo.unfiltered()
906 906
907 907 # Calculate bookmark movements
908 908 if moves is None:
909 909 moves = {}
910 910 for oldnodes, newnodes in replacements.items():
911 911 for oldnode in oldnodes:
912 912 if oldnode in moves:
913 913 continue
914 914 if len(newnodes) > 1:
915 915 # usually a split, take the one with biggest rev number
916 916 newnode = next(unfi.set('max(%ln)', newnodes)).node()
917 917 elif len(newnodes) == 0:
918 918 # move bookmark backwards
919 919 allreplaced = []
920 920 for rep in replacements:
921 921 allreplaced.extend(rep)
922 922 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
923 923 allreplaced))
924 924 if roots:
925 925 newnode = roots[0].node()
926 926 else:
927 927 newnode = nullid
928 928 else:
929 929 newnode = newnodes[0]
930 930 moves[oldnode] = newnode
931 931
932 932 allnewnodes = [n for ns in replacements.values() for n in ns]
933 933 toretract = {}
934 934 toadvance = {}
935 935 if fixphase:
936 936 precursors = {}
937 937 for oldnodes, newnodes in replacements.items():
938 938 for oldnode in oldnodes:
939 939 for newnode in newnodes:
940 940 precursors.setdefault(newnode, []).append(oldnode)
941 941
942 942 allnewnodes.sort(key=lambda n: unfi[n].rev())
943 943 newphases = {}
944 944 def phase(ctx):
945 945 return newphases.get(ctx.node(), ctx.phase())
946 946 for newnode in allnewnodes:
947 947 ctx = unfi[newnode]
948 948 parentphase = max(phase(p) for p in ctx.parents())
949 949 if targetphase is None:
950 950 oldphase = max(unfi[oldnode].phase()
951 951 for oldnode in precursors[newnode])
952 952 newphase = max(oldphase, parentphase)
953 953 else:
954 954 newphase = max(targetphase, parentphase)
955 955 newphases[newnode] = newphase
956 956 if newphase > ctx.phase():
957 957 toretract.setdefault(newphase, []).append(newnode)
958 958 elif newphase < ctx.phase():
959 959 toadvance.setdefault(newphase, []).append(newnode)
960 960
961 961 with repo.transaction('cleanup') as tr:
962 962 # Move bookmarks
963 963 bmarks = repo._bookmarks
964 964 bmarkchanges = []
965 965 for oldnode, newnode in moves.items():
966 966 oldbmarks = repo.nodebookmarks(oldnode)
967 967 if not oldbmarks:
968 968 continue
969 969 from . import bookmarks # avoid import cycle
970 970 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
971 971 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
972 972 hex(oldnode), hex(newnode)))
973 973 # Delete divergent bookmarks being parents of related newnodes
974 974 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
975 975 allnewnodes, newnode, oldnode)
976 976 deletenodes = _containsnode(repo, deleterevs)
977 977 for name in oldbmarks:
978 978 bmarkchanges.append((name, newnode))
979 979 for b in bookmarks.divergent2delete(repo, deletenodes, name):
980 980 bmarkchanges.append((b, None))
981 981
982 982 if bmarkchanges:
983 983 bmarks.applychanges(repo, tr, bmarkchanges)
984 984
985 985 for phase, nodes in toretract.items():
986 986 phases.retractboundary(repo, tr, phase, nodes)
987 987 for phase, nodes in toadvance.items():
988 988 phases.advanceboundary(repo, tr, phase, nodes)
989 989
990 990 # Obsolete or strip nodes
991 991 if obsolete.isenabled(repo, obsolete.createmarkersopt):
992 992 # If a node is already obsoleted, and we want to obsolete it
993 993 # without a successor, skip that obssolete request since it's
994 994 # unnecessary. That's the "if s or not isobs(n)" check below.
995 995 # Also sort the node in topology order, that might be useful for
996 996 # some obsstore logic.
997 997 # NOTE: the sorting might belong to createmarkers.
998 998 torev = unfi.changelog.rev
999 999 sortfunc = lambda ns: torev(ns[0][0])
1000 1000 rels = []
1001 1001 for ns, s in sorted(replacements.items(), key=sortfunc):
1002 1002 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1003 1003 rels.append(rel)
1004 1004 if rels:
1005 1005 obsolete.createmarkers(repo, rels, operation=operation,
1006 1006 metadata=metadata)
1007 1007 else:
1008 1008 from . import repair # avoid import cycle
1009 1009 tostrip = list(n for ns in replacements for n in ns)
1010 1010 if tostrip:
1011 1011 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1012 1012 backup=backup)
1013 1013
1014 1014 def addremove(repo, matcher, prefix, opts=None):
1015 1015 if opts is None:
1016 1016 opts = {}
1017 1017 m = matcher
1018 1018 dry_run = opts.get('dry_run')
1019 1019 try:
1020 1020 similarity = float(opts.get('similarity') or 0)
1021 1021 except ValueError:
1022 1022 raise error.Abort(_('similarity must be a number'))
1023 1023 if similarity < 0 or similarity > 100:
1024 1024 raise error.Abort(_('similarity must be between 0 and 100'))
1025 1025 similarity /= 100.0
1026 1026
1027 1027 ret = 0
1028 1028 join = lambda f: os.path.join(prefix, f)
1029 1029
1030 1030 wctx = repo[None]
1031 1031 for subpath in sorted(wctx.substate):
1032 1032 submatch = matchmod.subdirmatcher(subpath, m)
1033 1033 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1034 1034 sub = wctx.sub(subpath)
1035 1035 try:
1036 1036 if sub.addremove(submatch, prefix, opts):
1037 1037 ret = 1
1038 1038 except error.LookupError:
1039 1039 repo.ui.status(_("skipping missing subrepository: %s\n")
1040 1040 % join(subpath))
1041 1041
1042 1042 rejected = []
1043 1043 def badfn(f, msg):
1044 1044 if f in m.files():
1045 1045 m.bad(f, msg)
1046 1046 rejected.append(f)
1047 1047
1048 1048 badmatch = matchmod.badmatch(m, badfn)
1049 1049 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1050 1050 badmatch)
1051 1051
1052 1052 unknownset = set(unknown + forgotten)
1053 1053 toprint = unknownset.copy()
1054 1054 toprint.update(deleted)
1055 1055 for abs in sorted(toprint):
1056 1056 if repo.ui.verbose or not m.exact(abs):
1057 1057 if abs in unknownset:
1058 1058 status = _('adding %s\n') % m.uipath(abs)
1059 1059 label = 'ui.addremove.added'
1060 1060 else:
1061 1061 status = _('removing %s\n') % m.uipath(abs)
1062 1062 label = 'ui.addremove.removed'
1063 1063 repo.ui.status(status, label=label)
1064 1064
1065 1065 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1066 1066 similarity)
1067 1067
1068 1068 if not dry_run:
1069 1069 _markchanges(repo, unknown + forgotten, deleted, renames)
1070 1070
1071 1071 for f in rejected:
1072 1072 if f in m.files():
1073 1073 return 1
1074 1074 return ret
1075 1075
1076 1076 def marktouched(repo, files, similarity=0.0):
1077 1077 '''Assert that files have somehow been operated upon. files are relative to
1078 1078 the repo root.'''
1079 1079 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1080 1080 rejected = []
1081 1081
1082 1082 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1083 1083
1084 1084 if repo.ui.verbose:
1085 1085 unknownset = set(unknown + forgotten)
1086 1086 toprint = unknownset.copy()
1087 1087 toprint.update(deleted)
1088 1088 for abs in sorted(toprint):
1089 1089 if abs in unknownset:
1090 1090 status = _('adding %s\n') % abs
1091 1091 else:
1092 1092 status = _('removing %s\n') % abs
1093 1093 repo.ui.status(status)
1094 1094
1095 1095 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1096 1096 similarity)
1097 1097
1098 1098 _markchanges(repo, unknown + forgotten, deleted, renames)
1099 1099
1100 1100 for f in rejected:
1101 1101 if f in m.files():
1102 1102 return 1
1103 1103 return 0
1104 1104
1105 1105 def _interestingfiles(repo, matcher):
1106 1106 '''Walk dirstate with matcher, looking for files that addremove would care
1107 1107 about.
1108 1108
1109 1109 This is different from dirstate.status because it doesn't care about
1110 1110 whether files are modified or clean.'''
1111 1111 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1112 1112 audit_path = pathutil.pathauditor(repo.root, cached=True)
1113 1113
1114 1114 ctx = repo[None]
1115 1115 dirstate = repo.dirstate
1116 1116 matcher = repo.narrowmatch(matcher, includeexact=True)
1117 1117 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1118 1118 unknown=True, ignored=False, full=False)
1119 1119 for abs, st in walkresults.iteritems():
1120 1120 dstate = dirstate[abs]
1121 1121 if dstate == '?' and audit_path.check(abs):
1122 1122 unknown.append(abs)
1123 1123 elif dstate != 'r' and not st:
1124 1124 deleted.append(abs)
1125 1125 elif dstate == 'r' and st:
1126 1126 forgotten.append(abs)
1127 1127 # for finding renames
1128 1128 elif dstate == 'r' and not st:
1129 1129 removed.append(abs)
1130 1130 elif dstate == 'a':
1131 1131 added.append(abs)
1132 1132
1133 1133 return added, unknown, deleted, removed, forgotten
1134 1134
1135 1135 def _findrenames(repo, matcher, added, removed, similarity):
1136 1136 '''Find renames from removed files to added ones.'''
1137 1137 renames = {}
1138 1138 if similarity > 0:
1139 1139 for old, new, score in similar.findrenames(repo, added, removed,
1140 1140 similarity):
1141 1141 if (repo.ui.verbose or not matcher.exact(old)
1142 1142 or not matcher.exact(new)):
1143 1143 repo.ui.status(_('recording removal of %s as rename to %s '
1144 1144 '(%d%% similar)\n') %
1145 1145 (matcher.rel(old), matcher.rel(new),
1146 1146 score * 100))
1147 1147 renames[new] = old
1148 1148 return renames
1149 1149
1150 1150 def _markchanges(repo, unknown, deleted, renames):
1151 1151 '''Marks the files in unknown as added, the files in deleted as removed,
1152 1152 and the files in renames as copied.'''
1153 1153 wctx = repo[None]
1154 1154 with repo.wlock():
1155 1155 wctx.forget(deleted)
1156 1156 wctx.add(unknown)
1157 1157 for new, old in renames.iteritems():
1158 1158 wctx.copy(old, new)
1159 1159
1160 1160 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1161 1161 """Update the dirstate to reflect the intent of copying src to dst. For
1162 1162 different reasons it might not end with dst being marked as copied from src.
1163 1163 """
1164 1164 origsrc = repo.dirstate.copied(src) or src
1165 1165 if dst == origsrc: # copying back a copy?
1166 1166 if repo.dirstate[dst] not in 'mn' and not dryrun:
1167 1167 repo.dirstate.normallookup(dst)
1168 1168 else:
1169 1169 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1170 1170 if not ui.quiet:
1171 1171 ui.warn(_("%s has not been committed yet, so no copy "
1172 1172 "data will be stored for %s.\n")
1173 1173 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1174 1174 if repo.dirstate[dst] in '?r' and not dryrun:
1175 1175 wctx.add([dst])
1176 1176 elif not dryrun:
1177 1177 wctx.copy(origsrc, dst)
1178 1178
1179 1179 def writerequires(opener, requirements):
1180 1180 with opener('requires', 'w', atomictemp=True) as fp:
1181 1181 for r in sorted(requirements):
1182 1182 fp.write("%s\n" % r)
1183 1183
1184 1184 class filecachesubentry(object):
1185 1185 def __init__(self, path, stat):
1186 1186 self.path = path
1187 1187 self.cachestat = None
1188 1188 self._cacheable = None
1189 1189
1190 1190 if stat:
1191 1191 self.cachestat = filecachesubentry.stat(self.path)
1192 1192
1193 1193 if self.cachestat:
1194 1194 self._cacheable = self.cachestat.cacheable()
1195 1195 else:
1196 1196 # None means we don't know yet
1197 1197 self._cacheable = None
1198 1198
1199 1199 def refresh(self):
1200 1200 if self.cacheable():
1201 1201 self.cachestat = filecachesubentry.stat(self.path)
1202 1202
1203 1203 def cacheable(self):
1204 1204 if self._cacheable is not None:
1205 1205 return self._cacheable
1206 1206
1207 1207 # we don't know yet, assume it is for now
1208 1208 return True
1209 1209
1210 1210 def changed(self):
1211 1211 # no point in going further if we can't cache it
1212 1212 if not self.cacheable():
1213 1213 return True
1214 1214
1215 1215 newstat = filecachesubentry.stat(self.path)
1216 1216
1217 1217 # we may not know if it's cacheable yet, check again now
1218 1218 if newstat and self._cacheable is None:
1219 1219 self._cacheable = newstat.cacheable()
1220 1220
1221 1221 # check again
1222 1222 if not self._cacheable:
1223 1223 return True
1224 1224
1225 1225 if self.cachestat != newstat:
1226 1226 self.cachestat = newstat
1227 1227 return True
1228 1228 else:
1229 1229 return False
1230 1230
1231 1231 @staticmethod
1232 1232 def stat(path):
1233 1233 try:
1234 1234 return util.cachestat(path)
1235 1235 except OSError as e:
1236 1236 if e.errno != errno.ENOENT:
1237 1237 raise
1238 1238
1239 1239 class filecacheentry(object):
1240 1240 def __init__(self, paths, stat=True):
1241 1241 self._entries = []
1242 1242 for path in paths:
1243 1243 self._entries.append(filecachesubentry(path, stat))
1244 1244
1245 1245 def changed(self):
1246 1246 '''true if any entry has changed'''
1247 1247 for entry in self._entries:
1248 1248 if entry.changed():
1249 1249 return True
1250 1250 return False
1251 1251
1252 1252 def refresh(self):
1253 1253 for entry in self._entries:
1254 1254 entry.refresh()
1255 1255
1256 1256 class filecache(object):
1257 1257 """A property like decorator that tracks files under .hg/ for updates.
1258 1258
1259 1259 On first access, the files defined as arguments are stat()ed and the
1260 1260 results cached. The decorated function is called. The results are stashed
1261 1261 away in a ``_filecache`` dict on the object whose method is decorated.
1262 1262
1263 1263 On subsequent access, the cached result is used as it is set to the
1264 1264 instance dictionary.
1265 1265
1266 1266 On external property set/delete operations, the caller must update the
1267 1267 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1268 1268 instead of directly setting <attr>.
1269 1269
1270 1270 When using the property API, the cached data is always used if available.
1271 1271 No stat() is performed to check if the file has changed.
1272 1272
1273 1273 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1274 1274 can populate an entry before the property's getter is called. In this case,
1275 1275 entries in ``_filecache`` will be used during property operations,
1276 1276 if available. If the underlying file changes, it is up to external callers
1277 1277 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1278 1278 method result as well as possibly calling ``del obj._filecache[attr]`` to
1279 1279 remove the ``filecacheentry``.
1280 1280 """
1281 1281
1282 1282 def __init__(self, *paths):
1283 1283 self.paths = paths
1284 1284
1285 1285 def join(self, obj, fname):
1286 1286 """Used to compute the runtime path of a cached file.
1287 1287
1288 1288 Users should subclass filecache and provide their own version of this
1289 1289 function to call the appropriate join function on 'obj' (an instance
1290 1290 of the class that its member function was decorated).
1291 1291 """
1292 1292 raise NotImplementedError
1293 1293
1294 1294 def __call__(self, func):
1295 1295 self.func = func
1296 1296 self.sname = func.__name__
1297 1297 self.name = pycompat.sysbytes(self.sname)
1298 1298 return self
1299 1299
1300 1300 def __get__(self, obj, type=None):
1301 1301 # if accessed on the class, return the descriptor itself.
1302 1302 if obj is None:
1303 1303 return self
1304 1304
1305 1305 assert self.sname not in obj.__dict__
1306 1306
1307 1307 entry = obj._filecache.get(self.name)
1308 1308
1309 1309 if entry:
1310 1310 if entry.changed():
1311 1311 entry.obj = self.func(obj)
1312 1312 else:
1313 1313 paths = [self.join(obj, path) for path in self.paths]
1314 1314
1315 1315 # We stat -before- creating the object so our cache doesn't lie if
1316 1316 # a writer modified between the time we read and stat
1317 1317 entry = filecacheentry(paths, True)
1318 1318 entry.obj = self.func(obj)
1319 1319
1320 1320 obj._filecache[self.name] = entry
1321 1321
1322 1322 obj.__dict__[self.sname] = entry.obj
1323 1323 return entry.obj
1324 1324
1325 1325 # don't implement __set__(), which would make __dict__ lookup as slow as
1326 1326 # function call.
1327 1327
1328 1328 def set(self, obj, value):
1329 1329 if self.name not in obj._filecache:
1330 1330 # we add an entry for the missing value because X in __dict__
1331 1331 # implies X in _filecache
1332 1332 paths = [self.join(obj, path) for path in self.paths]
1333 1333 ce = filecacheentry(paths, False)
1334 1334 obj._filecache[self.name] = ce
1335 1335 else:
1336 1336 ce = obj._filecache[self.name]
1337 1337
1338 1338 ce.obj = value # update cached copy
1339 1339 obj.__dict__[self.sname] = value # update copy returned by obj.x
1340 1340
1341 1341 def extdatasource(repo, source):
1342 1342 """Gather a map of rev -> value dict from the specified source
1343 1343
1344 1344 A source spec is treated as a URL, with a special case shell: type
1345 1345 for parsing the output from a shell command.
1346 1346
1347 1347 The data is parsed as a series of newline-separated records where
1348 1348 each record is a revision specifier optionally followed by a space
1349 1349 and a freeform string value. If the revision is known locally, it
1350 1350 is converted to a rev, otherwise the record is skipped.
1351 1351
1352 1352 Note that both key and value are treated as UTF-8 and converted to
1353 1353 the local encoding. This allows uniformity between local and
1354 1354 remote data sources.
1355 1355 """
1356 1356
1357 1357 spec = repo.ui.config("extdata", source)
1358 1358 if not spec:
1359 1359 raise error.Abort(_("unknown extdata source '%s'") % source)
1360 1360
1361 1361 data = {}
1362 1362 src = proc = None
1363 1363 try:
1364 1364 if spec.startswith("shell:"):
1365 1365 # external commands should be run relative to the repo root
1366 1366 cmd = spec[6:]
1367 1367 proc = subprocess.Popen(procutil.tonativestr(cmd),
1368 1368 shell=True, bufsize=-1,
1369 1369 close_fds=procutil.closefds,
1370 1370 stdout=subprocess.PIPE,
1371 1371 cwd=procutil.tonativestr(repo.root))
1372 1372 src = proc.stdout
1373 1373 else:
1374 1374 # treat as a URL or file
1375 1375 src = url.open(repo.ui, spec)
1376 1376 for l in src:
1377 1377 if " " in l:
1378 1378 k, v = l.strip().split(" ", 1)
1379 1379 else:
1380 1380 k, v = l.strip(), ""
1381 1381
1382 1382 k = encoding.tolocal(k)
1383 1383 try:
1384 1384 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1385 1385 except (error.LookupError, error.RepoLookupError):
1386 1386 pass # we ignore data for nodes that don't exist locally
1387 1387 finally:
1388 1388 if proc:
1389 1389 proc.communicate()
1390 1390 if src:
1391 1391 src.close()
1392 1392 if proc and proc.returncode != 0:
1393 1393 raise error.Abort(_("extdata command '%s' failed: %s")
1394 1394 % (cmd, procutil.explainexit(proc.returncode)))
1395 1395
1396 1396 return data
1397 1397
1398 1398 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1399 1399 if lock is None:
1400 1400 raise error.LockInheritanceContractViolation(
1401 1401 'lock can only be inherited while held')
1402 1402 if environ is None:
1403 1403 environ = {}
1404 1404 with lock.inherit() as locker:
1405 1405 environ[envvar] = locker
1406 1406 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1407 1407
1408 1408 def wlocksub(repo, cmd, *args, **kwargs):
1409 1409 """run cmd as a subprocess that allows inheriting repo's wlock
1410 1410
1411 1411 This can only be called while the wlock is held. This takes all the
1412 1412 arguments that ui.system does, and returns the exit code of the
1413 1413 subprocess."""
1414 1414 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1415 1415 **kwargs)
1416 1416
1417 1417 class progress(object):
1418 1418 def __init__(self, ui, topic, unit="", total=None):
1419 1419 self.ui = ui
1420 1420 self.pos = 0
1421 1421 self.topic = topic
1422 1422 self.unit = unit
1423 1423 self.total = total
1424 1424 self.debug = ui.configbool('progress', 'debug')
1425 1425
1426 1426 def __enter__(self):
1427 1427 return self
1428 1428
1429 1429 def __exit__(self, exc_type, exc_value, exc_tb):
1430 1430 self.complete()
1431 1431
1432 1432 def update(self, pos, item="", total=None):
1433 1433 assert pos is not None
1434 1434 if total:
1435 1435 self.total = total
1436 1436 self.pos = pos
1437 self._print(item)
1437 self._updatebar(item)
1438 if self.debug:
1439 self._printdebug(item)
1438 1440
1439 1441 def increment(self, step=1, item="", total=None):
1440 1442 self.update(self.pos + step, item, total)
1441 1443
1442 1444 def complete(self):
1443 1445 self.pos = None
1444 1446 self.unit = ""
1445 1447 self.total = None
1446 self._print("")
1448 self._updatebar("")
1447 1449
1448 def _print(self, item):
1450 def _updatebar(self, item):
1449 1451 if getattr(self.ui._fmsgerr, 'structured', False):
1450 1452 # channel for machine-readable output with metadata, just send
1451 1453 # raw information
1452 1454 # TODO: consider porting some useful information (e.g. estimated
1453 1455 # time) from progbar. we might want to support update delay to
1454 1456 # reduce the cost of transferring progress messages.
1455 1457 self.ui._fmsgerr.write(None, type=b'progress', topic=self.topic,
1456 1458 pos=self.pos, item=item, unit=self.unit,
1457 1459 total=self.total)
1458 1460 elif self.ui._progbar is not None:
1459 1461 self.ui._progbar.progress(self.topic, self.pos, item=item,
1460 1462 unit=self.unit, total=self.total)
1461 1463
1462 if self.pos is None or not self.debug:
1463 return
1464
1464 def _printdebug(self, item):
1465 1465 if self.unit:
1466 1466 unit = ' ' + self.unit
1467 1467 if item:
1468 1468 item = ' ' + item
1469 1469
1470 1470 if self.total:
1471 1471 pct = 100.0 * self.pos / self.total
1472 1472 self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n'
1473 1473 % (self.topic, item, self.pos, self.total, unit, pct))
1474 1474 else:
1475 1475 self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1476 1476
1477 1477 def gdinitconfig(ui):
1478 1478 """helper function to know if a repo should be created as general delta
1479 1479 """
1480 1480 # experimental config: format.generaldelta
1481 1481 return (ui.configbool('format', 'generaldelta')
1482 1482 or ui.configbool('format', 'usegeneraldelta'))
1483 1483
1484 1484 def gddeltaconfig(ui):
1485 1485 """helper function to know if incoming delta should be optimised
1486 1486 """
1487 1487 # experimental config: format.generaldelta
1488 1488 return ui.configbool('format', 'generaldelta')
1489 1489
1490 1490 class simplekeyvaluefile(object):
1491 1491 """A simple file with key=value lines
1492 1492
1493 1493 Keys must be alphanumerics and start with a letter, values must not
1494 1494 contain '\n' characters"""
1495 1495 firstlinekey = '__firstline'
1496 1496
1497 1497 def __init__(self, vfs, path, keys=None):
1498 1498 self.vfs = vfs
1499 1499 self.path = path
1500 1500
1501 1501 def read(self, firstlinenonkeyval=False):
1502 1502 """Read the contents of a simple key-value file
1503 1503
1504 1504 'firstlinenonkeyval' indicates whether the first line of file should
1505 1505 be treated as a key-value pair or reuturned fully under the
1506 1506 __firstline key."""
1507 1507 lines = self.vfs.readlines(self.path)
1508 1508 d = {}
1509 1509 if firstlinenonkeyval:
1510 1510 if not lines:
1511 1511 e = _("empty simplekeyvalue file")
1512 1512 raise error.CorruptedState(e)
1513 1513 # we don't want to include '\n' in the __firstline
1514 1514 d[self.firstlinekey] = lines[0][:-1]
1515 1515 del lines[0]
1516 1516
1517 1517 try:
1518 1518 # the 'if line.strip()' part prevents us from failing on empty
1519 1519 # lines which only contain '\n' therefore are not skipped
1520 1520 # by 'if line'
1521 1521 updatedict = dict(line[:-1].split('=', 1) for line in lines
1522 1522 if line.strip())
1523 1523 if self.firstlinekey in updatedict:
1524 1524 e = _("%r can't be used as a key")
1525 1525 raise error.CorruptedState(e % self.firstlinekey)
1526 1526 d.update(updatedict)
1527 1527 except ValueError as e:
1528 1528 raise error.CorruptedState(str(e))
1529 1529 return d
1530 1530
1531 1531 def write(self, data, firstline=None):
1532 1532 """Write key=>value mapping to a file
1533 1533 data is a dict. Keys must be alphanumerical and start with a letter.
1534 1534 Values must not contain newline characters.
1535 1535
1536 1536 If 'firstline' is not None, it is written to file before
1537 1537 everything else, as it is, not in a key=value form"""
1538 1538 lines = []
1539 1539 if firstline is not None:
1540 1540 lines.append('%s\n' % firstline)
1541 1541
1542 1542 for k, v in data.items():
1543 1543 if k == self.firstlinekey:
1544 1544 e = "key name '%s' is reserved" % self.firstlinekey
1545 1545 raise error.ProgrammingError(e)
1546 1546 if not k[0:1].isalpha():
1547 1547 e = "keys must start with a letter in a key-value file"
1548 1548 raise error.ProgrammingError(e)
1549 1549 if not k.isalnum():
1550 1550 e = "invalid key name in a simple key-value file"
1551 1551 raise error.ProgrammingError(e)
1552 1552 if '\n' in v:
1553 1553 e = "invalid value in a simple key-value file"
1554 1554 raise error.ProgrammingError(e)
1555 1555 lines.append("%s=%s\n" % (k, v))
1556 1556 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1557 1557 fp.write(''.join(lines))
1558 1558
1559 1559 _reportobsoletedsource = [
1560 1560 'debugobsolete',
1561 1561 'pull',
1562 1562 'push',
1563 1563 'serve',
1564 1564 'unbundle',
1565 1565 ]
1566 1566
1567 1567 _reportnewcssource = [
1568 1568 'pull',
1569 1569 'unbundle',
1570 1570 ]
1571 1571
1572 1572 def prefetchfiles(repo, revs, match):
1573 1573 """Invokes the registered file prefetch functions, allowing extensions to
1574 1574 ensure the corresponding files are available locally, before the command
1575 1575 uses them."""
1576 1576 if match:
1577 1577 # The command itself will complain about files that don't exist, so
1578 1578 # don't duplicate the message.
1579 1579 match = matchmod.badmatch(match, lambda fn, msg: None)
1580 1580 else:
1581 1581 match = matchall(repo)
1582 1582
1583 1583 fileprefetchhooks(repo, revs, match)
1584 1584
1585 1585 # a list of (repo, revs, match) prefetch functions
1586 1586 fileprefetchhooks = util.hooks()
1587 1587
1588 1588 # A marker that tells the evolve extension to suppress its own reporting
1589 1589 _reportstroubledchangesets = True
1590 1590
1591 1591 def registersummarycallback(repo, otr, txnname=''):
1592 1592 """register a callback to issue a summary after the transaction is closed
1593 1593 """
1594 1594 def txmatch(sources):
1595 1595 return any(txnname.startswith(source) for source in sources)
1596 1596
1597 1597 categories = []
1598 1598
1599 1599 def reportsummary(func):
1600 1600 """decorator for report callbacks."""
1601 1601 # The repoview life cycle is shorter than the one of the actual
1602 1602 # underlying repository. So the filtered object can die before the
1603 1603 # weakref is used leading to troubles. We keep a reference to the
1604 1604 # unfiltered object and restore the filtering when retrieving the
1605 1605 # repository through the weakref.
1606 1606 filtername = repo.filtername
1607 1607 reporef = weakref.ref(repo.unfiltered())
1608 1608 def wrapped(tr):
1609 1609 repo = reporef()
1610 1610 if filtername:
1611 1611 repo = repo.filtered(filtername)
1612 1612 func(repo, tr)
1613 1613 newcat = '%02i-txnreport' % len(categories)
1614 1614 otr.addpostclose(newcat, wrapped)
1615 1615 categories.append(newcat)
1616 1616 return wrapped
1617 1617
1618 1618 if txmatch(_reportobsoletedsource):
1619 1619 @reportsummary
1620 1620 def reportobsoleted(repo, tr):
1621 1621 obsoleted = obsutil.getobsoleted(repo, tr)
1622 1622 if obsoleted:
1623 1623 repo.ui.status(_('obsoleted %i changesets\n')
1624 1624 % len(obsoleted))
1625 1625
1626 1626 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1627 1627 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1628 1628 instabilitytypes = [
1629 1629 ('orphan', 'orphan'),
1630 1630 ('phase-divergent', 'phasedivergent'),
1631 1631 ('content-divergent', 'contentdivergent'),
1632 1632 ]
1633 1633
1634 1634 def getinstabilitycounts(repo):
1635 1635 filtered = repo.changelog.filteredrevs
1636 1636 counts = {}
1637 1637 for instability, revset in instabilitytypes:
1638 1638 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1639 1639 filtered)
1640 1640 return counts
1641 1641
1642 1642 oldinstabilitycounts = getinstabilitycounts(repo)
1643 1643 @reportsummary
1644 1644 def reportnewinstabilities(repo, tr):
1645 1645 newinstabilitycounts = getinstabilitycounts(repo)
1646 1646 for instability, revset in instabilitytypes:
1647 1647 delta = (newinstabilitycounts[instability] -
1648 1648 oldinstabilitycounts[instability])
1649 1649 msg = getinstabilitymessage(delta, instability)
1650 1650 if msg:
1651 1651 repo.ui.warn(msg)
1652 1652
1653 1653 if txmatch(_reportnewcssource):
1654 1654 @reportsummary
1655 1655 def reportnewcs(repo, tr):
1656 1656 """Report the range of new revisions pulled/unbundled."""
1657 1657 origrepolen = tr.changes.get('origrepolen', len(repo))
1658 1658 unfi = repo.unfiltered()
1659 1659 if origrepolen >= len(unfi):
1660 1660 return
1661 1661
1662 1662 # Compute the bounds of new visible revisions' range.
1663 1663 revs = smartset.spanset(repo, start=origrepolen)
1664 1664 if revs:
1665 1665 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1666 1666
1667 1667 if minrev == maxrev:
1668 1668 revrange = minrev
1669 1669 else:
1670 1670 revrange = '%s:%s' % (minrev, maxrev)
1671 1671 draft = len(repo.revs('%ld and draft()', revs))
1672 1672 secret = len(repo.revs('%ld and secret()', revs))
1673 1673 if not (draft or secret):
1674 1674 msg = _('new changesets %s\n') % revrange
1675 1675 elif draft and secret:
1676 1676 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1677 1677 msg %= (revrange, draft, secret)
1678 1678 elif draft:
1679 1679 msg = _('new changesets %s (%d drafts)\n')
1680 1680 msg %= (revrange, draft)
1681 1681 elif secret:
1682 1682 msg = _('new changesets %s (%d secrets)\n')
1683 1683 msg %= (revrange, secret)
1684 1684 else:
1685 1685 errormsg = 'entered unreachable condition'
1686 1686 raise error.ProgrammingError(errormsg)
1687 1687 repo.ui.status(msg)
1688 1688
1689 1689 # search new changesets directly pulled as obsolete
1690 1690 duplicates = tr.changes.get('revduplicates', ())
1691 1691 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1692 1692 origrepolen, duplicates)
1693 1693 cl = repo.changelog
1694 1694 extinctadded = [r for r in obsadded if r not in cl]
1695 1695 if extinctadded:
1696 1696 # They are not just obsolete, but obsolete and invisible
1697 1697 # we call them "extinct" internally but the terms have not been
1698 1698 # exposed to users.
1699 1699 msg = '(%d other changesets obsolete on arrival)\n'
1700 1700 repo.ui.status(msg % len(extinctadded))
1701 1701
1702 1702 @reportsummary
1703 1703 def reportphasechanges(repo, tr):
1704 1704 """Report statistics of phase changes for changesets pre-existing
1705 1705 pull/unbundle.
1706 1706 """
1707 1707 origrepolen = tr.changes.get('origrepolen', len(repo))
1708 1708 phasetracking = tr.changes.get('phases', {})
1709 1709 if not phasetracking:
1710 1710 return
1711 1711 published = [
1712 1712 rev for rev, (old, new) in phasetracking.iteritems()
1713 1713 if new == phases.public and rev < origrepolen
1714 1714 ]
1715 1715 if not published:
1716 1716 return
1717 1717 repo.ui.status(_('%d local changesets published\n')
1718 1718 % len(published))
1719 1719
1720 1720 def getinstabilitymessage(delta, instability):
1721 1721 """function to return the message to show warning about new instabilities
1722 1722
1723 1723 exists as a separate function so that extension can wrap to show more
1724 1724 information like how to fix instabilities"""
1725 1725 if delta > 0:
1726 1726 return _('%i new %s changesets\n') % (delta, instability)
1727 1727
1728 1728 def nodesummaries(repo, nodes, maxnumnodes=4):
1729 1729 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1730 1730 return ' '.join(short(h) for h in nodes)
1731 1731 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1732 1732 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1733 1733
1734 1734 def enforcesinglehead(repo, tr, desc):
1735 1735 """check that no named branch has multiple heads"""
1736 1736 if desc in ('strip', 'repair'):
1737 1737 # skip the logic during strip
1738 1738 return
1739 1739 visible = repo.filtered('visible')
1740 1740 # possible improvement: we could restrict the check to affected branch
1741 1741 for name, heads in visible.branchmap().iteritems():
1742 1742 if len(heads) > 1:
1743 1743 msg = _('rejecting multiple heads on branch "%s"')
1744 1744 msg %= name
1745 1745 hint = _('%d heads: %s')
1746 1746 hint %= (len(heads), nodesummaries(repo, heads))
1747 1747 raise error.Abort(msg, hint=hint)
1748 1748
1749 1749 def wrapconvertsink(sink):
1750 1750 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1751 1751 before it is used, whether or not the convert extension was formally loaded.
1752 1752 """
1753 1753 return sink
1754 1754
1755 1755 def unhidehashlikerevs(repo, specs, hiddentype):
1756 1756 """parse the user specs and unhide changesets whose hash or revision number
1757 1757 is passed.
1758 1758
1759 1759 hiddentype can be: 1) 'warn': warn while unhiding changesets
1760 1760 2) 'nowarn': don't warn while unhiding changesets
1761 1761
1762 1762 returns a repo object with the required changesets unhidden
1763 1763 """
1764 1764 if not repo.filtername or not repo.ui.configbool('experimental',
1765 1765 'directaccess'):
1766 1766 return repo
1767 1767
1768 1768 if repo.filtername not in ('visible', 'visible-hidden'):
1769 1769 return repo
1770 1770
1771 1771 symbols = set()
1772 1772 for spec in specs:
1773 1773 try:
1774 1774 tree = revsetlang.parse(spec)
1775 1775 except error.ParseError: # will be reported by scmutil.revrange()
1776 1776 continue
1777 1777
1778 1778 symbols.update(revsetlang.gethashlikesymbols(tree))
1779 1779
1780 1780 if not symbols:
1781 1781 return repo
1782 1782
1783 1783 revs = _getrevsfromsymbols(repo, symbols)
1784 1784
1785 1785 if not revs:
1786 1786 return repo
1787 1787
1788 1788 if hiddentype == 'warn':
1789 1789 unfi = repo.unfiltered()
1790 1790 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1791 1791 repo.ui.warn(_("warning: accessing hidden changesets for write "
1792 1792 "operation: %s\n") % revstr)
1793 1793
1794 1794 # we have to use new filtername to separate branch/tags cache until we can
1795 1795 # disbale these cache when revisions are dynamically pinned.
1796 1796 return repo.filtered('visible-hidden', revs)
1797 1797
1798 1798 def _getrevsfromsymbols(repo, symbols):
1799 1799 """parse the list of symbols and returns a set of revision numbers of hidden
1800 1800 changesets present in symbols"""
1801 1801 revs = set()
1802 1802 unfi = repo.unfiltered()
1803 1803 unficl = unfi.changelog
1804 1804 cl = repo.changelog
1805 1805 tiprev = len(unficl)
1806 1806 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1807 1807 for s in symbols:
1808 1808 try:
1809 1809 n = int(s)
1810 1810 if n <= tiprev:
1811 1811 if not allowrevnums:
1812 1812 continue
1813 1813 else:
1814 1814 if n not in cl:
1815 1815 revs.add(n)
1816 1816 continue
1817 1817 except ValueError:
1818 1818 pass
1819 1819
1820 1820 try:
1821 1821 s = resolvehexnodeidprefix(unfi, s)
1822 1822 except (error.LookupError, error.WdirUnsupported):
1823 1823 s = None
1824 1824
1825 1825 if s is not None:
1826 1826 rev = unficl.rev(s)
1827 1827 if rev not in cl:
1828 1828 revs.add(rev)
1829 1829
1830 1830 return revs
1831 1831
1832 1832 def bookmarkrevs(repo, mark):
1833 1833 """
1834 1834 Select revisions reachable by a given bookmark
1835 1835 """
1836 1836 return repo.revs("ancestors(bookmark(%s)) - "
1837 1837 "ancestors(head() and not bookmark(%s)) - "
1838 1838 "ancestors(bookmark() and not bookmark(%s))",
1839 1839 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now