##// END OF EJS Templates
scmutil: expand long "one-liner"...
Boris Feld -
r39926:1c3f1491 default
parent child Browse files
Show More
@@ -1,1774 +1,1776 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28
29 29 from . import (
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 policy,
38 38 pycompat,
39 39 revsetlang,
40 40 similar,
41 41 url,
42 42 util,
43 43 vfs,
44 44 )
45 45
46 46 from .utils import (
47 47 procutil,
48 48 stringutil,
49 49 )
50 50
51 51 if pycompat.iswindows:
52 52 from . import scmwindows as scmplatform
53 53 else:
54 54 from . import scmposix as scmplatform
55 55
56 56 parsers = policy.importmod(r'parsers')
57 57
58 58 termsize = scmplatform.termsize
59 59
60 60 class status(tuple):
61 61 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
62 62 and 'ignored' properties are only relevant to the working copy.
63 63 '''
64 64
65 65 __slots__ = ()
66 66
67 67 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
68 68 clean):
69 69 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
70 70 ignored, clean))
71 71
72 72 @property
73 73 def modified(self):
74 74 '''files that have been modified'''
75 75 return self[0]
76 76
77 77 @property
78 78 def added(self):
79 79 '''files that have been added'''
80 80 return self[1]
81 81
82 82 @property
83 83 def removed(self):
84 84 '''files that have been removed'''
85 85 return self[2]
86 86
87 87 @property
88 88 def deleted(self):
89 89 '''files that are in the dirstate, but have been deleted from the
90 90 working copy (aka "missing")
91 91 '''
92 92 return self[3]
93 93
94 94 @property
95 95 def unknown(self):
96 96 '''files not in the dirstate that are not ignored'''
97 97 return self[4]
98 98
99 99 @property
100 100 def ignored(self):
101 101 '''files not in the dirstate that are ignored (by _dirignore())'''
102 102 return self[5]
103 103
104 104 @property
105 105 def clean(self):
106 106 '''files that have not been modified'''
107 107 return self[6]
108 108
109 109 def __repr__(self, *args, **kwargs):
110 110 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
111 111 r'unknown=%s, ignored=%s, clean=%s>') %
112 112 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
113 113
114 114 def itersubrepos(ctx1, ctx2):
115 115 """find subrepos in ctx1 or ctx2"""
116 116 # Create a (subpath, ctx) mapping where we prefer subpaths from
117 117 # ctx1. The subpaths from ctx2 are important when the .hgsub file
118 118 # has been modified (in ctx2) but not yet committed (in ctx1).
119 119 subpaths = dict.fromkeys(ctx2.substate, ctx2)
120 120 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
121 121
122 122 missing = set()
123 123
124 124 for subpath in ctx2.substate:
125 125 if subpath not in ctx1.substate:
126 126 del subpaths[subpath]
127 127 missing.add(subpath)
128 128
129 129 for subpath, ctx in sorted(subpaths.iteritems()):
130 130 yield subpath, ctx.sub(subpath)
131 131
132 132 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
133 133 # status and diff will have an accurate result when it does
134 134 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
135 135 # against itself.
136 136 for subpath in missing:
137 137 yield subpath, ctx2.nullsub(subpath, ctx1)
138 138
139 139 def nochangesfound(ui, repo, excluded=None):
140 140 '''Report no changes for push/pull, excluded is None or a list of
141 141 nodes excluded from the push/pull.
142 142 '''
143 143 secretlist = []
144 144 if excluded:
145 145 for n in excluded:
146 146 ctx = repo[n]
147 147 if ctx.phase() >= phases.secret and not ctx.extinct():
148 148 secretlist.append(n)
149 149
150 150 if secretlist:
151 151 ui.status(_("no changes found (ignored %d secret changesets)\n")
152 152 % len(secretlist))
153 153 else:
154 154 ui.status(_("no changes found\n"))
155 155
156 156 def callcatch(ui, func):
157 157 """call func() with global exception handling
158 158
159 159 return func() if no exception happens. otherwise do some error handling
160 160 and return an exit code accordingly. does not handle all exceptions.
161 161 """
162 162 try:
163 163 try:
164 164 return func()
165 165 except: # re-raises
166 166 ui.traceback()
167 167 raise
168 168 # Global exception handling, alphabetically
169 169 # Mercurial-specific first, followed by built-in and library exceptions
170 170 except error.LockHeld as inst:
171 171 if inst.errno == errno.ETIMEDOUT:
172 172 reason = _('timed out waiting for lock held by %r') % inst.locker
173 173 else:
174 174 reason = _('lock held by %r') % inst.locker
175 175 ui.error(_("abort: %s: %s\n") % (
176 176 inst.desc or stringutil.forcebytestr(inst.filename), reason))
177 177 if not inst.locker:
178 178 ui.error(_("(lock might be very busy)\n"))
179 179 except error.LockUnavailable as inst:
180 180 ui.error(_("abort: could not lock %s: %s\n") %
181 181 (inst.desc or stringutil.forcebytestr(inst.filename),
182 182 encoding.strtolocal(inst.strerror)))
183 183 except error.OutOfBandError as inst:
184 184 if inst.args:
185 185 msg = _("abort: remote error:\n")
186 186 else:
187 187 msg = _("abort: remote error\n")
188 188 ui.error(msg)
189 189 if inst.args:
190 190 ui.error(''.join(inst.args))
191 191 if inst.hint:
192 192 ui.error('(%s)\n' % inst.hint)
193 193 except error.RepoError as inst:
194 194 ui.error(_("abort: %s!\n") % inst)
195 195 if inst.hint:
196 196 ui.error(_("(%s)\n") % inst.hint)
197 197 except error.ResponseError as inst:
198 198 ui.error(_("abort: %s") % inst.args[0])
199 199 msg = inst.args[1]
200 200 if isinstance(msg, type(u'')):
201 201 msg = pycompat.sysbytes(msg)
202 202 if not isinstance(msg, bytes):
203 203 ui.error(" %r\n" % (msg,))
204 204 elif not msg:
205 205 ui.error(_(" empty string\n"))
206 206 else:
207 207 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
208 208 except error.CensoredNodeError as inst:
209 209 ui.error(_("abort: file censored %s!\n") % inst)
210 210 except error.StorageError as inst:
211 211 ui.error(_("abort: %s!\n") % inst)
212 212 except error.InterventionRequired as inst:
213 213 ui.error("%s\n" % inst)
214 214 if inst.hint:
215 215 ui.error(_("(%s)\n") % inst.hint)
216 216 return 1
217 217 except error.WdirUnsupported:
218 218 ui.error(_("abort: working directory revision cannot be specified\n"))
219 219 except error.Abort as inst:
220 220 ui.error(_("abort: %s\n") % inst)
221 221 if inst.hint:
222 222 ui.error(_("(%s)\n") % inst.hint)
223 223 except ImportError as inst:
224 224 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
225 225 m = stringutil.forcebytestr(inst).split()[-1]
226 226 if m in "mpatch bdiff".split():
227 227 ui.error(_("(did you forget to compile extensions?)\n"))
228 228 elif m in "zlib".split():
229 229 ui.error(_("(is your Python install correct?)\n"))
230 230 except IOError as inst:
231 231 if util.safehasattr(inst, "code"):
232 232 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
233 233 elif util.safehasattr(inst, "reason"):
234 234 try: # usually it is in the form (errno, strerror)
235 235 reason = inst.reason.args[1]
236 236 except (AttributeError, IndexError):
237 237 # it might be anything, for example a string
238 238 reason = inst.reason
239 239 if isinstance(reason, pycompat.unicode):
240 240 # SSLError of Python 2.7.9 contains a unicode
241 241 reason = encoding.unitolocal(reason)
242 242 ui.error(_("abort: error: %s\n") % reason)
243 243 elif (util.safehasattr(inst, "args")
244 244 and inst.args and inst.args[0] == errno.EPIPE):
245 245 pass
246 246 elif getattr(inst, "strerror", None):
247 247 if getattr(inst, "filename", None):
248 248 ui.error(_("abort: %s: %s\n") % (
249 249 encoding.strtolocal(inst.strerror),
250 250 stringutil.forcebytestr(inst.filename)))
251 251 else:
252 252 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
253 253 else:
254 254 raise
255 255 except OSError as inst:
256 256 if getattr(inst, "filename", None) is not None:
257 257 ui.error(_("abort: %s: '%s'\n") % (
258 258 encoding.strtolocal(inst.strerror),
259 259 stringutil.forcebytestr(inst.filename)))
260 260 else:
261 261 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
262 262 except MemoryError:
263 263 ui.error(_("abort: out of memory\n"))
264 264 except SystemExit as inst:
265 265 # Commands shouldn't sys.exit directly, but give a return code.
266 266 # Just in case catch this and and pass exit code to caller.
267 267 return inst.code
268 268 except socket.error as inst:
269 269 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
270 270
271 271 return -1
272 272
273 273 def checknewlabel(repo, lbl, kind):
274 274 # Do not use the "kind" parameter in ui output.
275 275 # It makes strings difficult to translate.
276 276 if lbl in ['tip', '.', 'null']:
277 277 raise error.Abort(_("the name '%s' is reserved") % lbl)
278 278 for c in (':', '\0', '\n', '\r'):
279 279 if c in lbl:
280 280 raise error.Abort(
281 281 _("%r cannot be used in a name") % pycompat.bytestr(c))
282 282 try:
283 283 int(lbl)
284 284 raise error.Abort(_("cannot use an integer as a name"))
285 285 except ValueError:
286 286 pass
287 287 if lbl.strip() != lbl:
288 288 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
289 289
290 290 def checkfilename(f):
291 291 '''Check that the filename f is an acceptable filename for a tracked file'''
292 292 if '\r' in f or '\n' in f:
293 293 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
294 294 % pycompat.bytestr(f))
295 295
296 296 def checkportable(ui, f):
297 297 '''Check if filename f is portable and warn or abort depending on config'''
298 298 checkfilename(f)
299 299 abort, warn = checkportabilityalert(ui)
300 300 if abort or warn:
301 301 msg = util.checkwinfilename(f)
302 302 if msg:
303 303 msg = "%s: %s" % (msg, procutil.shellquote(f))
304 304 if abort:
305 305 raise error.Abort(msg)
306 306 ui.warn(_("warning: %s\n") % msg)
307 307
308 308 def checkportabilityalert(ui):
309 309 '''check if the user's config requests nothing, a warning, or abort for
310 310 non-portable filenames'''
311 311 val = ui.config('ui', 'portablefilenames')
312 312 lval = val.lower()
313 313 bval = stringutil.parsebool(val)
314 314 abort = pycompat.iswindows or lval == 'abort'
315 315 warn = bval or lval == 'warn'
316 316 if bval is None and not (warn or abort or lval == 'ignore'):
317 317 raise error.ConfigError(
318 318 _("ui.portablefilenames value is invalid ('%s')") % val)
319 319 return abort, warn
320 320
321 321 class casecollisionauditor(object):
322 322 def __init__(self, ui, abort, dirstate):
323 323 self._ui = ui
324 324 self._abort = abort
325 325 allfiles = '\0'.join(dirstate._map)
326 326 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
327 327 self._dirstate = dirstate
328 328 # The purpose of _newfiles is so that we don't complain about
329 329 # case collisions if someone were to call this object with the
330 330 # same filename twice.
331 331 self._newfiles = set()
332 332
333 333 def __call__(self, f):
334 334 if f in self._newfiles:
335 335 return
336 336 fl = encoding.lower(f)
337 337 if fl in self._loweredfiles and f not in self._dirstate:
338 338 msg = _('possible case-folding collision for %s') % f
339 339 if self._abort:
340 340 raise error.Abort(msg)
341 341 self._ui.warn(_("warning: %s\n") % msg)
342 342 self._loweredfiles.add(fl)
343 343 self._newfiles.add(f)
344 344
345 345 def filteredhash(repo, maxrev):
346 346 """build hash of filtered revisions in the current repoview.
347 347
348 348 Multiple caches perform up-to-date validation by checking that the
349 349 tiprev and tipnode stored in the cache file match the current repository.
350 350 However, this is not sufficient for validating repoviews because the set
351 351 of revisions in the view may change without the repository tiprev and
352 352 tipnode changing.
353 353
354 354 This function hashes all the revs filtered from the view and returns
355 355 that SHA-1 digest.
356 356 """
357 357 cl = repo.changelog
358 358 if not cl.filteredrevs:
359 359 return None
360 360 key = None
361 361 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
362 362 if revs:
363 363 s = hashlib.sha1()
364 364 for rev in revs:
365 365 s.update('%d;' % rev)
366 366 key = s.digest()
367 367 return key
368 368
369 369 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
370 370 '''yield every hg repository under path, always recursively.
371 371 The recurse flag will only control recursion into repo working dirs'''
372 372 def errhandler(err):
373 373 if err.filename == path:
374 374 raise err
375 375 samestat = getattr(os.path, 'samestat', None)
376 376 if followsym and samestat is not None:
377 377 def adddir(dirlst, dirname):
378 378 dirstat = os.stat(dirname)
379 379 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
380 380 if not match:
381 381 dirlst.append(dirstat)
382 382 return not match
383 383 else:
384 384 followsym = False
385 385
386 386 if (seen_dirs is None) and followsym:
387 387 seen_dirs = []
388 388 adddir(seen_dirs, path)
389 389 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
390 390 dirs.sort()
391 391 if '.hg' in dirs:
392 392 yield root # found a repository
393 393 qroot = os.path.join(root, '.hg', 'patches')
394 394 if os.path.isdir(os.path.join(qroot, '.hg')):
395 395 yield qroot # we have a patch queue repo here
396 396 if recurse:
397 397 # avoid recursing inside the .hg directory
398 398 dirs.remove('.hg')
399 399 else:
400 400 dirs[:] = [] # don't descend further
401 401 elif followsym:
402 402 newdirs = []
403 403 for d in dirs:
404 404 fname = os.path.join(root, d)
405 405 if adddir(seen_dirs, fname):
406 406 if os.path.islink(fname):
407 407 for hgname in walkrepos(fname, True, seen_dirs):
408 408 yield hgname
409 409 else:
410 410 newdirs.append(d)
411 411 dirs[:] = newdirs
412 412
413 413 def binnode(ctx):
414 414 """Return binary node id for a given basectx"""
415 415 node = ctx.node()
416 416 if node is None:
417 417 return wdirid
418 418 return node
419 419
420 420 def intrev(ctx):
421 421 """Return integer for a given basectx that can be used in comparison or
422 422 arithmetic operation"""
423 423 rev = ctx.rev()
424 424 if rev is None:
425 425 return wdirrev
426 426 return rev
427 427
428 428 def formatchangeid(ctx):
429 429 """Format changectx as '{rev}:{node|formatnode}', which is the default
430 430 template provided by logcmdutil.changesettemplater"""
431 431 repo = ctx.repo()
432 432 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
433 433
434 434 def formatrevnode(ui, rev, node):
435 435 """Format given revision and node depending on the current verbosity"""
436 436 if ui.debugflag:
437 437 hexfunc = hex
438 438 else:
439 439 hexfunc = short
440 440 return '%d:%s' % (rev, hexfunc(node))
441 441
442 442 def resolvehexnodeidprefix(repo, prefix):
443 443 if (prefix.startswith('x') and
444 444 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
445 445 prefix = prefix[1:]
446 446 try:
447 447 # Uses unfiltered repo because it's faster when prefix is ambiguous/
448 448 # This matches the shortesthexnodeidprefix() function below.
449 449 node = repo.unfiltered().changelog._partialmatch(prefix)
450 450 except error.AmbiguousPrefixLookupError:
451 451 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
452 452 if revset:
453 453 # Clear config to avoid infinite recursion
454 454 configoverrides = {('experimental',
455 455 'revisions.disambiguatewithin'): None}
456 456 with repo.ui.configoverride(configoverrides):
457 457 revs = repo.anyrevs([revset], user=True)
458 458 matches = []
459 459 for rev in revs:
460 460 node = repo.changelog.node(rev)
461 461 if hex(node).startswith(prefix):
462 462 matches.append(node)
463 463 if len(matches) == 1:
464 464 return matches[0]
465 465 raise
466 466 if node is None:
467 467 return
468 468 repo.changelog.rev(node) # make sure node isn't filtered
469 469 return node
470 470
471 471 def mayberevnum(repo, prefix):
472 472 """Checks if the given prefix may be mistaken for a revision number"""
473 473 try:
474 474 i = int(prefix)
475 475 # if we are a pure int, then starting with zero will not be
476 476 # confused as a rev; or, obviously, if the int is larger
477 477 # than the value of the tip rev
478 478 if prefix[0:1] == b'0' or i >= len(repo):
479 479 return False
480 480 return True
481 481 except ValueError:
482 482 return False
483 483
484 484 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
485 485 """Find the shortest unambiguous prefix that matches hexnode.
486 486
487 487 If "cache" is not None, it must be a dictionary that can be used for
488 488 caching between calls to this method.
489 489 """
490 490 # _partialmatch() of filtered changelog could take O(len(repo)) time,
491 491 # which would be unacceptably slow. so we look for hash collision in
492 492 # unfiltered space, which means some hashes may be slightly longer.
493 493
494 494 def disambiguate(prefix):
495 495 """Disambiguate against revnums."""
496 496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
497 497 if mayberevnum(repo, prefix):
498 498 return 'x' + prefix
499 499 else:
500 500 return prefix
501 501
502 502 hexnode = hex(node)
503 503 for length in range(len(prefix), len(hexnode) + 1):
504 504 prefix = hexnode[:length]
505 505 if not mayberevnum(repo, prefix):
506 506 return prefix
507 507
508 508 cl = repo.unfiltered().changelog
509 509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
510 510 if revset:
511 511 revs = None
512 512 if cache is not None:
513 513 revs = cache.get('disambiguationrevset')
514 514 if revs is None:
515 515 revs = repo.anyrevs([revset], user=True)
516 516 if cache is not None:
517 517 cache['disambiguationrevset'] = revs
518 518 if cl.rev(node) in revs:
519 519 hexnode = hex(node)
520 520 nodetree = None
521 521 if cache is not None:
522 522 nodetree = cache.get('disambiguationnodetree')
523 523 if not nodetree:
524 524 try:
525 525 nodetree = parsers.nodetree(cl.index, len(revs))
526 526 except AttributeError:
527 527 # no native nodetree
528 528 pass
529 529 else:
530 530 for r in revs:
531 531 nodetree.insert(r)
532 532 if cache is not None:
533 533 cache['disambiguationnodetree'] = nodetree
534 534 if nodetree is not None:
535 535 length = max(nodetree.shortest(node), minlength)
536 536 prefix = hexnode[:length]
537 537 return disambiguate(prefix)
538 538 for length in range(minlength, len(hexnode) + 1):
539 539 matches = []
540 540 prefix = hexnode[:length]
541 541 for rev in revs:
542 542 otherhexnode = repo[rev].hex()
543 543 if prefix == otherhexnode[:length]:
544 544 matches.append(otherhexnode)
545 545 if len(matches) == 1:
546 546 return disambiguate(prefix)
547 547
548 548 try:
549 549 return disambiguate(cl.shortest(node, minlength))
550 550 except error.LookupError:
551 551 raise error.RepoLookupError()
552 552
553 553 def isrevsymbol(repo, symbol):
554 554 """Checks if a symbol exists in the repo.
555 555
556 556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
557 557 symbol is an ambiguous nodeid prefix.
558 558 """
559 559 try:
560 560 revsymbol(repo, symbol)
561 561 return True
562 562 except error.RepoLookupError:
563 563 return False
564 564
565 565 def revsymbol(repo, symbol):
566 566 """Returns a context given a single revision symbol (as string).
567 567
568 568 This is similar to revsingle(), but accepts only a single revision symbol,
569 569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
570 570 not "max(public())".
571 571 """
572 572 if not isinstance(symbol, bytes):
573 573 msg = ("symbol (%s of type %s) was not a string, did you mean "
574 574 "repo[symbol]?" % (symbol, type(symbol)))
575 575 raise error.ProgrammingError(msg)
576 576 try:
577 577 if symbol in ('.', 'tip', 'null'):
578 578 return repo[symbol]
579 579
580 580 try:
581 581 r = int(symbol)
582 582 if '%d' % r != symbol:
583 583 raise ValueError
584 584 l = len(repo.changelog)
585 585 if r < 0:
586 586 r += l
587 587 if r < 0 or r >= l and r != wdirrev:
588 588 raise ValueError
589 589 return repo[r]
590 590 except error.FilteredIndexError:
591 591 raise
592 592 except (ValueError, OverflowError, IndexError):
593 593 pass
594 594
595 595 if len(symbol) == 40:
596 596 try:
597 597 node = bin(symbol)
598 598 rev = repo.changelog.rev(node)
599 599 return repo[rev]
600 600 except error.FilteredLookupError:
601 601 raise
602 602 except (TypeError, LookupError):
603 603 pass
604 604
605 605 # look up bookmarks through the name interface
606 606 try:
607 607 node = repo.names.singlenode(repo, symbol)
608 608 rev = repo.changelog.rev(node)
609 609 return repo[rev]
610 610 except KeyError:
611 611 pass
612 612
613 613 node = resolvehexnodeidprefix(repo, symbol)
614 614 if node is not None:
615 615 rev = repo.changelog.rev(node)
616 616 return repo[rev]
617 617
618 618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
619 619
620 620 except error.WdirUnsupported:
621 621 return repo[None]
622 622 except (error.FilteredIndexError, error.FilteredLookupError,
623 623 error.FilteredRepoLookupError):
624 624 raise _filterederror(repo, symbol)
625 625
626 626 def _filterederror(repo, changeid):
627 627 """build an exception to be raised about a filtered changeid
628 628
629 629 This is extracted in a function to help extensions (eg: evolve) to
630 630 experiment with various message variants."""
631 631 if repo.filtername.startswith('visible'):
632 632
633 633 # Check if the changeset is obsolete
634 634 unfilteredrepo = repo.unfiltered()
635 635 ctx = revsymbol(unfilteredrepo, changeid)
636 636
637 637 # If the changeset is obsolete, enrich the message with the reason
638 638 # that made this changeset not visible
639 639 if ctx.obsolete():
640 640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
641 641 else:
642 642 msg = _("hidden revision '%s'") % changeid
643 643
644 644 hint = _('use --hidden to access hidden revisions')
645 645
646 646 return error.FilteredRepoLookupError(msg, hint=hint)
647 647 msg = _("filtered revision '%s' (not in '%s' subset)")
648 648 msg %= (changeid, repo.filtername)
649 649 return error.FilteredRepoLookupError(msg)
650 650
651 651 def revsingle(repo, revspec, default='.', localalias=None):
652 652 if not revspec and revspec != 0:
653 653 return repo[default]
654 654
655 655 l = revrange(repo, [revspec], localalias=localalias)
656 656 if not l:
657 657 raise error.Abort(_('empty revision set'))
658 658 return repo[l.last()]
659 659
660 660 def _pairspec(revspec):
661 661 tree = revsetlang.parse(revspec)
662 662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
663 663
664 664 def revpair(repo, revs):
665 665 if not revs:
666 666 return repo['.'], repo[None]
667 667
668 668 l = revrange(repo, revs)
669 669
670 670 if not l:
671 671 first = second = None
672 672 elif l.isascending():
673 673 first = l.min()
674 674 second = l.max()
675 675 elif l.isdescending():
676 676 first = l.max()
677 677 second = l.min()
678 678 else:
679 679 first = l.first()
680 680 second = l.last()
681 681
682 682 if first is None:
683 683 raise error.Abort(_('empty revision range'))
684 684 if (first == second and len(revs) >= 2
685 685 and not all(revrange(repo, [r]) for r in revs)):
686 686 raise error.Abort(_('empty revision on one side of range'))
687 687
688 688 # if top-level is range expression, the result must always be a pair
689 689 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
690 690 return repo[first], repo[None]
691 691
692 692 return repo[first], repo[second]
693 693
694 694 def revrange(repo, specs, localalias=None):
695 695 """Execute 1 to many revsets and return the union.
696 696
697 697 This is the preferred mechanism for executing revsets using user-specified
698 698 config options, such as revset aliases.
699 699
700 700 The revsets specified by ``specs`` will be executed via a chained ``OR``
701 701 expression. If ``specs`` is empty, an empty result is returned.
702 702
703 703 ``specs`` can contain integers, in which case they are assumed to be
704 704 revision numbers.
705 705
706 706 It is assumed the revsets are already formatted. If you have arguments
707 707 that need to be expanded in the revset, call ``revsetlang.formatspec()``
708 708 and pass the result as an element of ``specs``.
709 709
710 710 Specifying a single revset is allowed.
711 711
712 712 Returns a ``revset.abstractsmartset`` which is a list-like interface over
713 713 integer revisions.
714 714 """
715 715 allspecs = []
716 716 for spec in specs:
717 717 if isinstance(spec, int):
718 718 spec = revsetlang.formatspec('rev(%d)', spec)
719 719 allspecs.append(spec)
720 720 return repo.anyrevs(allspecs, user=True, localalias=localalias)
721 721
722 722 def meaningfulparents(repo, ctx):
723 723 """Return list of meaningful (or all if debug) parentrevs for rev.
724 724
725 725 For merges (two non-nullrev revisions) both parents are meaningful.
726 726 Otherwise the first parent revision is considered meaningful if it
727 727 is not the preceding revision.
728 728 """
729 729 parents = ctx.parents()
730 730 if len(parents) > 1:
731 731 return parents
732 732 if repo.ui.debugflag:
733 733 return [parents[0], repo['null']]
734 734 if parents[0].rev() >= intrev(ctx) - 1:
735 735 return []
736 736 return parents
737 737
738 738 def expandpats(pats):
739 739 '''Expand bare globs when running on windows.
740 740 On posix we assume it already has already been done by sh.'''
741 741 if not util.expandglobs:
742 742 return list(pats)
743 743 ret = []
744 744 for kindpat in pats:
745 745 kind, pat = matchmod._patsplit(kindpat, None)
746 746 if kind is None:
747 747 try:
748 748 globbed = glob.glob(pat)
749 749 except re.error:
750 750 globbed = [pat]
751 751 if globbed:
752 752 ret.extend(globbed)
753 753 continue
754 754 ret.append(kindpat)
755 755 return ret
756 756
757 757 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
758 758 badfn=None):
759 759 '''Return a matcher and the patterns that were used.
760 760 The matcher will warn about bad matches, unless an alternate badfn callback
761 761 is provided.'''
762 762 if pats == ("",):
763 763 pats = []
764 764 if opts is None:
765 765 opts = {}
766 766 if not globbed and default == 'relpath':
767 767 pats = expandpats(pats or [])
768 768
769 769 def bad(f, msg):
770 770 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
771 771
772 772 if badfn is None:
773 773 badfn = bad
774 774
775 775 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
776 776 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
777 777
778 778 if m.always():
779 779 pats = []
780 780 return m, pats
781 781
782 782 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
783 783 badfn=None):
784 784 '''Return a matcher that will warn about bad matches.'''
785 785 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
786 786
787 787 def matchall(repo):
788 788 '''Return a matcher that will efficiently match everything.'''
789 789 return matchmod.always(repo.root, repo.getcwd())
790 790
791 791 def matchfiles(repo, files, badfn=None):
792 792 '''Return a matcher that will efficiently match exactly these files.'''
793 793 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
794 794
795 795 def parsefollowlinespattern(repo, rev, pat, msg):
796 796 """Return a file name from `pat` pattern suitable for usage in followlines
797 797 logic.
798 798 """
799 799 if not matchmod.patkind(pat):
800 800 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
801 801 else:
802 802 ctx = repo[rev]
803 803 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
804 804 files = [f for f in ctx if m(f)]
805 805 if len(files) != 1:
806 806 raise error.ParseError(msg)
807 807 return files[0]
808 808
809 809 def origpath(ui, repo, filepath):
810 810 '''customize where .orig files are created
811 811
812 812 Fetch user defined path from config file: [ui] origbackuppath = <path>
813 813 Fall back to default (filepath with .orig suffix) if not specified
814 814 '''
815 815 origbackuppath = ui.config('ui', 'origbackuppath')
816 816 if not origbackuppath:
817 817 return filepath + ".orig"
818 818
819 819 # Convert filepath from an absolute path into a path inside the repo.
820 820 filepathfromroot = util.normpath(os.path.relpath(filepath,
821 821 start=repo.root))
822 822
823 823 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
824 824 origbackupdir = origvfs.dirname(filepathfromroot)
825 825 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
826 826 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
827 827
828 828 # Remove any files that conflict with the backup file's path
829 829 for f in reversed(list(util.finddirs(filepathfromroot))):
830 830 if origvfs.isfileorlink(f):
831 831 ui.note(_('removing conflicting file: %s\n')
832 832 % origvfs.join(f))
833 833 origvfs.unlink(f)
834 834 break
835 835
836 836 origvfs.makedirs(origbackupdir)
837 837
838 838 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
839 839 ui.note(_('removing conflicting directory: %s\n')
840 840 % origvfs.join(filepathfromroot))
841 841 origvfs.rmtree(filepathfromroot, forcibly=True)
842 842
843 843 return origvfs.join(filepathfromroot)
844 844
845 845 class _containsnode(object):
846 846 """proxy __contains__(node) to container.__contains__ which accepts revs"""
847 847
848 848 def __init__(self, repo, revcontainer):
849 849 self._torev = repo.changelog.rev
850 850 self._revcontains = revcontainer.__contains__
851 851
852 852 def __contains__(self, node):
853 853 return self._revcontains(self._torev(node))
854 854
855 855 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
856 856 fixphase=False, targetphase=None, backup=True):
857 857 """do common cleanups when old nodes are replaced by new nodes
858 858
859 859 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
860 860 (we might also want to move working directory parent in the future)
861 861
862 862 By default, bookmark moves are calculated automatically from 'replacements',
863 863 but 'moves' can be used to override that. Also, 'moves' may include
864 864 additional bookmark moves that should not have associated obsmarkers.
865 865
866 866 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
867 867 have replacements. operation is a string, like "rebase".
868 868
869 869 metadata is dictionary containing metadata to be stored in obsmarker if
870 870 obsolescence is enabled.
871 871 """
872 872 assert fixphase or targetphase is None
873 873 if not replacements and not moves:
874 874 return
875 875
876 876 # translate mapping's other forms
877 877 if not util.safehasattr(replacements, 'items'):
878 878 replacements = {n: () for n in replacements}
879 879
880 880 # Calculate bookmark movements
881 881 if moves is None:
882 882 moves = {}
883 883 # Unfiltered repo is needed since nodes in replacements might be hidden.
884 884 unfi = repo.unfiltered()
885 885 for oldnode, newnodes in replacements.items():
886 886 if oldnode in moves:
887 887 continue
888 888 if len(newnodes) > 1:
889 889 # usually a split, take the one with biggest rev number
890 890 newnode = next(unfi.set('max(%ln)', newnodes)).node()
891 891 elif len(newnodes) == 0:
892 892 # move bookmark backwards
893 893 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
894 894 list(replacements)))
895 895 if roots:
896 896 newnode = roots[0].node()
897 897 else:
898 898 newnode = nullid
899 899 else:
900 900 newnode = newnodes[0]
901 901 moves[oldnode] = newnode
902 902
903 903 allnewnodes = [n for ns in replacements.values() for n in ns]
904 904 toretract = {}
905 905 toadvance = {}
906 906 if fixphase:
907 907 precursors = {}
908 908 for oldnode, newnodes in replacements.items():
909 909 for newnode in newnodes:
910 910 precursors.setdefault(newnode, []).append(oldnode)
911 911
912 912 allnewnodes.sort(key=lambda n: unfi[n].rev())
913 913 newphases = {}
914 914 def phase(ctx):
915 915 return newphases.get(ctx.node(), ctx.phase())
916 916 for newnode in allnewnodes:
917 917 ctx = unfi[newnode]
918 918 parentphase = max(phase(p) for p in ctx.parents())
919 919 if targetphase is None:
920 920 oldphase = max(unfi[oldnode].phase()
921 921 for oldnode in precursors[newnode])
922 922 newphase = max(oldphase, parentphase)
923 923 else:
924 924 newphase = max(targetphase, parentphase)
925 925 newphases[newnode] = newphase
926 926 if newphase > ctx.phase():
927 927 toretract.setdefault(newphase, []).append(newnode)
928 928 elif newphase < ctx.phase():
929 929 toadvance.setdefault(newphase, []).append(newnode)
930 930
931 931 with repo.transaction('cleanup') as tr:
932 932 # Move bookmarks
933 933 bmarks = repo._bookmarks
934 934 bmarkchanges = []
935 935 for oldnode, newnode in moves.items():
936 936 oldbmarks = repo.nodebookmarks(oldnode)
937 937 if not oldbmarks:
938 938 continue
939 939 from . import bookmarks # avoid import cycle
940 940 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
941 941 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
942 942 hex(oldnode), hex(newnode)))
943 943 # Delete divergent bookmarks being parents of related newnodes
944 944 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
945 945 allnewnodes, newnode, oldnode)
946 946 deletenodes = _containsnode(repo, deleterevs)
947 947 for name in oldbmarks:
948 948 bmarkchanges.append((name, newnode))
949 949 for b in bookmarks.divergent2delete(repo, deletenodes, name):
950 950 bmarkchanges.append((b, None))
951 951
952 952 if bmarkchanges:
953 953 bmarks.applychanges(repo, tr, bmarkchanges)
954 954
955 955 for phase, nodes in toretract.items():
956 956 phases.retractboundary(repo, tr, phase, nodes)
957 957 for phase, nodes in toadvance.items():
958 958 phases.advanceboundary(repo, tr, phase, nodes)
959 959
960 960 # Obsolete or strip nodes
961 961 if obsolete.isenabled(repo, obsolete.createmarkersopt):
962 962 # If a node is already obsoleted, and we want to obsolete it
963 963 # without a successor, skip that obssolete request since it's
964 964 # unnecessary. That's the "if s or not isobs(n)" check below.
965 965 # Also sort the node in topology order, that might be useful for
966 966 # some obsstore logic.
967 967 # NOTE: the filtering and sorting might belong to createmarkers.
968 968 isobs = unfi.obsstore.successors.__contains__
969 969 torev = unfi.changelog.rev
970 970 sortfunc = lambda ns: torev(ns[0])
971 rels = [(unfi[n], tuple(unfi[m] for m in s))
972 for n, s in sorted(replacements.items(), key=sortfunc)
973 if s or not isobs(n)]
971 rels = []
972 for n, s in sorted(replacements.items(), key=sortfunc):
973 if s or not isobs(n):
974 rel = (unfi[n], tuple(unfi[m] for m in s))
975 rels.append(rel)
974 976 if rels:
975 977 obsolete.createmarkers(repo, rels, operation=operation,
976 978 metadata=metadata)
977 979 else:
978 980 from . import repair # avoid import cycle
979 981 tostrip = list(replacements)
980 982 if tostrip:
981 983 repair.delayedstrip(repo.ui, repo, tostrip, operation,
982 984 backup=backup)
983 985
984 986 def addremove(repo, matcher, prefix, opts=None):
985 987 if opts is None:
986 988 opts = {}
987 989 m = matcher
988 990 dry_run = opts.get('dry_run')
989 991 try:
990 992 similarity = float(opts.get('similarity') or 0)
991 993 except ValueError:
992 994 raise error.Abort(_('similarity must be a number'))
993 995 if similarity < 0 or similarity > 100:
994 996 raise error.Abort(_('similarity must be between 0 and 100'))
995 997 similarity /= 100.0
996 998
997 999 ret = 0
998 1000 join = lambda f: os.path.join(prefix, f)
999 1001
1000 1002 wctx = repo[None]
1001 1003 for subpath in sorted(wctx.substate):
1002 1004 submatch = matchmod.subdirmatcher(subpath, m)
1003 1005 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1004 1006 sub = wctx.sub(subpath)
1005 1007 try:
1006 1008 if sub.addremove(submatch, prefix, opts):
1007 1009 ret = 1
1008 1010 except error.LookupError:
1009 1011 repo.ui.status(_("skipping missing subrepository: %s\n")
1010 1012 % join(subpath))
1011 1013
1012 1014 rejected = []
1013 1015 def badfn(f, msg):
1014 1016 if f in m.files():
1015 1017 m.bad(f, msg)
1016 1018 rejected.append(f)
1017 1019
1018 1020 badmatch = matchmod.badmatch(m, badfn)
1019 1021 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1020 1022 badmatch)
1021 1023
1022 1024 unknownset = set(unknown + forgotten)
1023 1025 toprint = unknownset.copy()
1024 1026 toprint.update(deleted)
1025 1027 for abs in sorted(toprint):
1026 1028 if repo.ui.verbose or not m.exact(abs):
1027 1029 if abs in unknownset:
1028 1030 status = _('adding %s\n') % m.uipath(abs)
1029 1031 label = 'addremove.added'
1030 1032 else:
1031 1033 status = _('removing %s\n') % m.uipath(abs)
1032 1034 label = 'addremove.removed'
1033 1035 repo.ui.status(status, label=label)
1034 1036
1035 1037 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1036 1038 similarity)
1037 1039
1038 1040 if not dry_run:
1039 1041 _markchanges(repo, unknown + forgotten, deleted, renames)
1040 1042
1041 1043 for f in rejected:
1042 1044 if f in m.files():
1043 1045 return 1
1044 1046 return ret
1045 1047
1046 1048 def marktouched(repo, files, similarity=0.0):
1047 1049 '''Assert that files have somehow been operated upon. files are relative to
1048 1050 the repo root.'''
1049 1051 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1050 1052 rejected = []
1051 1053
1052 1054 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1053 1055
1054 1056 if repo.ui.verbose:
1055 1057 unknownset = set(unknown + forgotten)
1056 1058 toprint = unknownset.copy()
1057 1059 toprint.update(deleted)
1058 1060 for abs in sorted(toprint):
1059 1061 if abs in unknownset:
1060 1062 status = _('adding %s\n') % abs
1061 1063 else:
1062 1064 status = _('removing %s\n') % abs
1063 1065 repo.ui.status(status)
1064 1066
1065 1067 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1066 1068 similarity)
1067 1069
1068 1070 _markchanges(repo, unknown + forgotten, deleted, renames)
1069 1071
1070 1072 for f in rejected:
1071 1073 if f in m.files():
1072 1074 return 1
1073 1075 return 0
1074 1076
1075 1077 def _interestingfiles(repo, matcher):
1076 1078 '''Walk dirstate with matcher, looking for files that addremove would care
1077 1079 about.
1078 1080
1079 1081 This is different from dirstate.status because it doesn't care about
1080 1082 whether files are modified or clean.'''
1081 1083 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1082 1084 audit_path = pathutil.pathauditor(repo.root, cached=True)
1083 1085
1084 1086 ctx = repo[None]
1085 1087 dirstate = repo.dirstate
1086 1088 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1087 1089 unknown=True, ignored=False, full=False)
1088 1090 for abs, st in walkresults.iteritems():
1089 1091 dstate = dirstate[abs]
1090 1092 if dstate == '?' and audit_path.check(abs):
1091 1093 unknown.append(abs)
1092 1094 elif dstate != 'r' and not st:
1093 1095 deleted.append(abs)
1094 1096 elif dstate == 'r' and st:
1095 1097 forgotten.append(abs)
1096 1098 # for finding renames
1097 1099 elif dstate == 'r' and not st:
1098 1100 removed.append(abs)
1099 1101 elif dstate == 'a':
1100 1102 added.append(abs)
1101 1103
1102 1104 return added, unknown, deleted, removed, forgotten
1103 1105
1104 1106 def _findrenames(repo, matcher, added, removed, similarity):
1105 1107 '''Find renames from removed files to added ones.'''
1106 1108 renames = {}
1107 1109 if similarity > 0:
1108 1110 for old, new, score in similar.findrenames(repo, added, removed,
1109 1111 similarity):
1110 1112 if (repo.ui.verbose or not matcher.exact(old)
1111 1113 or not matcher.exact(new)):
1112 1114 repo.ui.status(_('recording removal of %s as rename to %s '
1113 1115 '(%d%% similar)\n') %
1114 1116 (matcher.rel(old), matcher.rel(new),
1115 1117 score * 100))
1116 1118 renames[new] = old
1117 1119 return renames
1118 1120
1119 1121 def _markchanges(repo, unknown, deleted, renames):
1120 1122 '''Marks the files in unknown as added, the files in deleted as removed,
1121 1123 and the files in renames as copied.'''
1122 1124 wctx = repo[None]
1123 1125 with repo.wlock():
1124 1126 wctx.forget(deleted)
1125 1127 wctx.add(unknown)
1126 1128 for new, old in renames.iteritems():
1127 1129 wctx.copy(old, new)
1128 1130
1129 1131 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1130 1132 """Update the dirstate to reflect the intent of copying src to dst. For
1131 1133 different reasons it might not end with dst being marked as copied from src.
1132 1134 """
1133 1135 origsrc = repo.dirstate.copied(src) or src
1134 1136 if dst == origsrc: # copying back a copy?
1135 1137 if repo.dirstate[dst] not in 'mn' and not dryrun:
1136 1138 repo.dirstate.normallookup(dst)
1137 1139 else:
1138 1140 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1139 1141 if not ui.quiet:
1140 1142 ui.warn(_("%s has not been committed yet, so no copy "
1141 1143 "data will be stored for %s.\n")
1142 1144 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1143 1145 if repo.dirstate[dst] in '?r' and not dryrun:
1144 1146 wctx.add([dst])
1145 1147 elif not dryrun:
1146 1148 wctx.copy(origsrc, dst)
1147 1149
1148 1150 def writerequires(opener, requirements):
1149 1151 with opener('requires', 'w') as fp:
1150 1152 for r in sorted(requirements):
1151 1153 fp.write("%s\n" % r)
1152 1154
1153 1155 class filecachesubentry(object):
1154 1156 def __init__(self, path, stat):
1155 1157 self.path = path
1156 1158 self.cachestat = None
1157 1159 self._cacheable = None
1158 1160
1159 1161 if stat:
1160 1162 self.cachestat = filecachesubentry.stat(self.path)
1161 1163
1162 1164 if self.cachestat:
1163 1165 self._cacheable = self.cachestat.cacheable()
1164 1166 else:
1165 1167 # None means we don't know yet
1166 1168 self._cacheable = None
1167 1169
1168 1170 def refresh(self):
1169 1171 if self.cacheable():
1170 1172 self.cachestat = filecachesubentry.stat(self.path)
1171 1173
1172 1174 def cacheable(self):
1173 1175 if self._cacheable is not None:
1174 1176 return self._cacheable
1175 1177
1176 1178 # we don't know yet, assume it is for now
1177 1179 return True
1178 1180
1179 1181 def changed(self):
1180 1182 # no point in going further if we can't cache it
1181 1183 if not self.cacheable():
1182 1184 return True
1183 1185
1184 1186 newstat = filecachesubentry.stat(self.path)
1185 1187
1186 1188 # we may not know if it's cacheable yet, check again now
1187 1189 if newstat and self._cacheable is None:
1188 1190 self._cacheable = newstat.cacheable()
1189 1191
1190 1192 # check again
1191 1193 if not self._cacheable:
1192 1194 return True
1193 1195
1194 1196 if self.cachestat != newstat:
1195 1197 self.cachestat = newstat
1196 1198 return True
1197 1199 else:
1198 1200 return False
1199 1201
1200 1202 @staticmethod
1201 1203 def stat(path):
1202 1204 try:
1203 1205 return util.cachestat(path)
1204 1206 except OSError as e:
1205 1207 if e.errno != errno.ENOENT:
1206 1208 raise
1207 1209
1208 1210 class filecacheentry(object):
1209 1211 def __init__(self, paths, stat=True):
1210 1212 self._entries = []
1211 1213 for path in paths:
1212 1214 self._entries.append(filecachesubentry(path, stat))
1213 1215
1214 1216 def changed(self):
1215 1217 '''true if any entry has changed'''
1216 1218 for entry in self._entries:
1217 1219 if entry.changed():
1218 1220 return True
1219 1221 return False
1220 1222
1221 1223 def refresh(self):
1222 1224 for entry in self._entries:
1223 1225 entry.refresh()
1224 1226
1225 1227 class filecache(object):
1226 1228 """A property like decorator that tracks files under .hg/ for updates.
1227 1229
1228 1230 On first access, the files defined as arguments are stat()ed and the
1229 1231 results cached. The decorated function is called. The results are stashed
1230 1232 away in a ``_filecache`` dict on the object whose method is decorated.
1231 1233
1232 1234 On subsequent access, the cached result is returned.
1233 1235
1234 1236 On external property set operations, stat() calls are performed and the new
1235 1237 value is cached.
1236 1238
1237 1239 On property delete operations, cached data is removed.
1238 1240
1239 1241 When using the property API, cached data is always returned, if available:
1240 1242 no stat() is performed to check if the file has changed and if the function
1241 1243 needs to be called to reflect file changes.
1242 1244
1243 1245 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1244 1246 can populate an entry before the property's getter is called. In this case,
1245 1247 entries in ``_filecache`` will be used during property operations,
1246 1248 if available. If the underlying file changes, it is up to external callers
1247 1249 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1248 1250 method result as well as possibly calling ``del obj._filecache[attr]`` to
1249 1251 remove the ``filecacheentry``.
1250 1252 """
1251 1253
1252 1254 def __init__(self, *paths):
1253 1255 self.paths = paths
1254 1256
1255 1257 def join(self, obj, fname):
1256 1258 """Used to compute the runtime path of a cached file.
1257 1259
1258 1260 Users should subclass filecache and provide their own version of this
1259 1261 function to call the appropriate join function on 'obj' (an instance
1260 1262 of the class that its member function was decorated).
1261 1263 """
1262 1264 raise NotImplementedError
1263 1265
1264 1266 def __call__(self, func):
1265 1267 self.func = func
1266 1268 self.sname = func.__name__
1267 1269 self.name = pycompat.sysbytes(self.sname)
1268 1270 return self
1269 1271
1270 1272 def __get__(self, obj, type=None):
1271 1273 # if accessed on the class, return the descriptor itself.
1272 1274 if obj is None:
1273 1275 return self
1274 1276 # do we need to check if the file changed?
1275 1277 if self.sname in obj.__dict__:
1276 1278 assert self.name in obj._filecache, self.name
1277 1279 return obj.__dict__[self.sname]
1278 1280
1279 1281 entry = obj._filecache.get(self.name)
1280 1282
1281 1283 if entry:
1282 1284 if entry.changed():
1283 1285 entry.obj = self.func(obj)
1284 1286 else:
1285 1287 paths = [self.join(obj, path) for path in self.paths]
1286 1288
1287 1289 # We stat -before- creating the object so our cache doesn't lie if
1288 1290 # a writer modified between the time we read and stat
1289 1291 entry = filecacheentry(paths, True)
1290 1292 entry.obj = self.func(obj)
1291 1293
1292 1294 obj._filecache[self.name] = entry
1293 1295
1294 1296 obj.__dict__[self.sname] = entry.obj
1295 1297 return entry.obj
1296 1298
1297 1299 def __set__(self, obj, value):
1298 1300 if self.name not in obj._filecache:
1299 1301 # we add an entry for the missing value because X in __dict__
1300 1302 # implies X in _filecache
1301 1303 paths = [self.join(obj, path) for path in self.paths]
1302 1304 ce = filecacheentry(paths, False)
1303 1305 obj._filecache[self.name] = ce
1304 1306 else:
1305 1307 ce = obj._filecache[self.name]
1306 1308
1307 1309 ce.obj = value # update cached copy
1308 1310 obj.__dict__[self.sname] = value # update copy returned by obj.x
1309 1311
1310 1312 def __delete__(self, obj):
1311 1313 try:
1312 1314 del obj.__dict__[self.sname]
1313 1315 except KeyError:
1314 1316 raise AttributeError(self.sname)
1315 1317
1316 1318 def extdatasource(repo, source):
1317 1319 """Gather a map of rev -> value dict from the specified source
1318 1320
1319 1321 A source spec is treated as a URL, with a special case shell: type
1320 1322 for parsing the output from a shell command.
1321 1323
1322 1324 The data is parsed as a series of newline-separated records where
1323 1325 each record is a revision specifier optionally followed by a space
1324 1326 and a freeform string value. If the revision is known locally, it
1325 1327 is converted to a rev, otherwise the record is skipped.
1326 1328
1327 1329 Note that both key and value are treated as UTF-8 and converted to
1328 1330 the local encoding. This allows uniformity between local and
1329 1331 remote data sources.
1330 1332 """
1331 1333
1332 1334 spec = repo.ui.config("extdata", source)
1333 1335 if not spec:
1334 1336 raise error.Abort(_("unknown extdata source '%s'") % source)
1335 1337
1336 1338 data = {}
1337 1339 src = proc = None
1338 1340 try:
1339 1341 if spec.startswith("shell:"):
1340 1342 # external commands should be run relative to the repo root
1341 1343 cmd = spec[6:]
1342 1344 proc = subprocess.Popen(procutil.tonativestr(cmd),
1343 1345 shell=True, bufsize=-1,
1344 1346 close_fds=procutil.closefds,
1345 1347 stdout=subprocess.PIPE,
1346 1348 cwd=procutil.tonativestr(repo.root))
1347 1349 src = proc.stdout
1348 1350 else:
1349 1351 # treat as a URL or file
1350 1352 src = url.open(repo.ui, spec)
1351 1353 for l in src:
1352 1354 if " " in l:
1353 1355 k, v = l.strip().split(" ", 1)
1354 1356 else:
1355 1357 k, v = l.strip(), ""
1356 1358
1357 1359 k = encoding.tolocal(k)
1358 1360 try:
1359 1361 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1360 1362 except (error.LookupError, error.RepoLookupError):
1361 1363 pass # we ignore data for nodes that don't exist locally
1362 1364 finally:
1363 1365 if proc:
1364 1366 proc.communicate()
1365 1367 if src:
1366 1368 src.close()
1367 1369 if proc and proc.returncode != 0:
1368 1370 raise error.Abort(_("extdata command '%s' failed: %s")
1369 1371 % (cmd, procutil.explainexit(proc.returncode)))
1370 1372
1371 1373 return data
1372 1374
1373 1375 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1374 1376 if lock is None:
1375 1377 raise error.LockInheritanceContractViolation(
1376 1378 'lock can only be inherited while held')
1377 1379 if environ is None:
1378 1380 environ = {}
1379 1381 with lock.inherit() as locker:
1380 1382 environ[envvar] = locker
1381 1383 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1382 1384
1383 1385 def wlocksub(repo, cmd, *args, **kwargs):
1384 1386 """run cmd as a subprocess that allows inheriting repo's wlock
1385 1387
1386 1388 This can only be called while the wlock is held. This takes all the
1387 1389 arguments that ui.system does, and returns the exit code of the
1388 1390 subprocess."""
1389 1391 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1390 1392 **kwargs)
1391 1393
1392 1394 class progress(object):
1393 1395 def __init__(self, ui, topic, unit="", total=None):
1394 1396 self.ui = ui
1395 1397 self.pos = 0
1396 1398 self.topic = topic
1397 1399 self.unit = unit
1398 1400 self.total = total
1399 1401
1400 1402 def __enter__(self):
1401 1403 return self
1402 1404
1403 1405 def __exit__(self, exc_type, exc_value, exc_tb):
1404 1406 self.complete()
1405 1407
1406 1408 def update(self, pos, item="", total=None):
1407 1409 assert pos is not None
1408 1410 if total:
1409 1411 self.total = total
1410 1412 self.pos = pos
1411 1413 self._print(item)
1412 1414
1413 1415 def increment(self, step=1, item="", total=None):
1414 1416 self.update(self.pos + step, item, total)
1415 1417
1416 1418 def complete(self):
1417 1419 self.ui.progress(self.topic, None)
1418 1420
1419 1421 def _print(self, item):
1420 1422 self.ui.progress(self.topic, self.pos, item, self.unit,
1421 1423 self.total)
1422 1424
1423 1425 def gdinitconfig(ui):
1424 1426 """helper function to know if a repo should be created as general delta
1425 1427 """
1426 1428 # experimental config: format.generaldelta
1427 1429 return (ui.configbool('format', 'generaldelta')
1428 1430 or ui.configbool('format', 'usegeneraldelta')
1429 1431 or ui.configbool('format', 'sparse-revlog'))
1430 1432
1431 1433 def gddeltaconfig(ui):
1432 1434 """helper function to know if incoming delta should be optimised
1433 1435 """
1434 1436 # experimental config: format.generaldelta
1435 1437 return ui.configbool('format', 'generaldelta')
1436 1438
1437 1439 class simplekeyvaluefile(object):
1438 1440 """A simple file with key=value lines
1439 1441
1440 1442 Keys must be alphanumerics and start with a letter, values must not
1441 1443 contain '\n' characters"""
1442 1444 firstlinekey = '__firstline'
1443 1445
1444 1446 def __init__(self, vfs, path, keys=None):
1445 1447 self.vfs = vfs
1446 1448 self.path = path
1447 1449
1448 1450 def read(self, firstlinenonkeyval=False):
1449 1451 """Read the contents of a simple key-value file
1450 1452
1451 1453 'firstlinenonkeyval' indicates whether the first line of file should
1452 1454 be treated as a key-value pair or reuturned fully under the
1453 1455 __firstline key."""
1454 1456 lines = self.vfs.readlines(self.path)
1455 1457 d = {}
1456 1458 if firstlinenonkeyval:
1457 1459 if not lines:
1458 1460 e = _("empty simplekeyvalue file")
1459 1461 raise error.CorruptedState(e)
1460 1462 # we don't want to include '\n' in the __firstline
1461 1463 d[self.firstlinekey] = lines[0][:-1]
1462 1464 del lines[0]
1463 1465
1464 1466 try:
1465 1467 # the 'if line.strip()' part prevents us from failing on empty
1466 1468 # lines which only contain '\n' therefore are not skipped
1467 1469 # by 'if line'
1468 1470 updatedict = dict(line[:-1].split('=', 1) for line in lines
1469 1471 if line.strip())
1470 1472 if self.firstlinekey in updatedict:
1471 1473 e = _("%r can't be used as a key")
1472 1474 raise error.CorruptedState(e % self.firstlinekey)
1473 1475 d.update(updatedict)
1474 1476 except ValueError as e:
1475 1477 raise error.CorruptedState(str(e))
1476 1478 return d
1477 1479
1478 1480 def write(self, data, firstline=None):
1479 1481 """Write key=>value mapping to a file
1480 1482 data is a dict. Keys must be alphanumerical and start with a letter.
1481 1483 Values must not contain newline characters.
1482 1484
1483 1485 If 'firstline' is not None, it is written to file before
1484 1486 everything else, as it is, not in a key=value form"""
1485 1487 lines = []
1486 1488 if firstline is not None:
1487 1489 lines.append('%s\n' % firstline)
1488 1490
1489 1491 for k, v in data.items():
1490 1492 if k == self.firstlinekey:
1491 1493 e = "key name '%s' is reserved" % self.firstlinekey
1492 1494 raise error.ProgrammingError(e)
1493 1495 if not k[0:1].isalpha():
1494 1496 e = "keys must start with a letter in a key-value file"
1495 1497 raise error.ProgrammingError(e)
1496 1498 if not k.isalnum():
1497 1499 e = "invalid key name in a simple key-value file"
1498 1500 raise error.ProgrammingError(e)
1499 1501 if '\n' in v:
1500 1502 e = "invalid value in a simple key-value file"
1501 1503 raise error.ProgrammingError(e)
1502 1504 lines.append("%s=%s\n" % (k, v))
1503 1505 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1504 1506 fp.write(''.join(lines))
1505 1507
1506 1508 _reportobsoletedsource = [
1507 1509 'debugobsolete',
1508 1510 'pull',
1509 1511 'push',
1510 1512 'serve',
1511 1513 'unbundle',
1512 1514 ]
1513 1515
1514 1516 _reportnewcssource = [
1515 1517 'pull',
1516 1518 'unbundle',
1517 1519 ]
1518 1520
1519 1521 def prefetchfiles(repo, revs, match):
1520 1522 """Invokes the registered file prefetch functions, allowing extensions to
1521 1523 ensure the corresponding files are available locally, before the command
1522 1524 uses them."""
1523 1525 if match:
1524 1526 # The command itself will complain about files that don't exist, so
1525 1527 # don't duplicate the message.
1526 1528 match = matchmod.badmatch(match, lambda fn, msg: None)
1527 1529 else:
1528 1530 match = matchall(repo)
1529 1531
1530 1532 fileprefetchhooks(repo, revs, match)
1531 1533
1532 1534 # a list of (repo, revs, match) prefetch functions
1533 1535 fileprefetchhooks = util.hooks()
1534 1536
1535 1537 # A marker that tells the evolve extension to suppress its own reporting
1536 1538 _reportstroubledchangesets = True
1537 1539
1538 1540 def registersummarycallback(repo, otr, txnname=''):
1539 1541 """register a callback to issue a summary after the transaction is closed
1540 1542 """
1541 1543 def txmatch(sources):
1542 1544 return any(txnname.startswith(source) for source in sources)
1543 1545
1544 1546 categories = []
1545 1547
1546 1548 def reportsummary(func):
1547 1549 """decorator for report callbacks."""
1548 1550 # The repoview life cycle is shorter than the one of the actual
1549 1551 # underlying repository. So the filtered object can die before the
1550 1552 # weakref is used leading to troubles. We keep a reference to the
1551 1553 # unfiltered object and restore the filtering when retrieving the
1552 1554 # repository through the weakref.
1553 1555 filtername = repo.filtername
1554 1556 reporef = weakref.ref(repo.unfiltered())
1555 1557 def wrapped(tr):
1556 1558 repo = reporef()
1557 1559 if filtername:
1558 1560 repo = repo.filtered(filtername)
1559 1561 func(repo, tr)
1560 1562 newcat = '%02i-txnreport' % len(categories)
1561 1563 otr.addpostclose(newcat, wrapped)
1562 1564 categories.append(newcat)
1563 1565 return wrapped
1564 1566
1565 1567 if txmatch(_reportobsoletedsource):
1566 1568 @reportsummary
1567 1569 def reportobsoleted(repo, tr):
1568 1570 obsoleted = obsutil.getobsoleted(repo, tr)
1569 1571 if obsoleted:
1570 1572 repo.ui.status(_('obsoleted %i changesets\n')
1571 1573 % len(obsoleted))
1572 1574
1573 1575 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1574 1576 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1575 1577 instabilitytypes = [
1576 1578 ('orphan', 'orphan'),
1577 1579 ('phase-divergent', 'phasedivergent'),
1578 1580 ('content-divergent', 'contentdivergent'),
1579 1581 ]
1580 1582
1581 1583 def getinstabilitycounts(repo):
1582 1584 filtered = repo.changelog.filteredrevs
1583 1585 counts = {}
1584 1586 for instability, revset in instabilitytypes:
1585 1587 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1586 1588 filtered)
1587 1589 return counts
1588 1590
1589 1591 oldinstabilitycounts = getinstabilitycounts(repo)
1590 1592 @reportsummary
1591 1593 def reportnewinstabilities(repo, tr):
1592 1594 newinstabilitycounts = getinstabilitycounts(repo)
1593 1595 for instability, revset in instabilitytypes:
1594 1596 delta = (newinstabilitycounts[instability] -
1595 1597 oldinstabilitycounts[instability])
1596 1598 msg = getinstabilitymessage(delta, instability)
1597 1599 if msg:
1598 1600 repo.ui.warn(msg)
1599 1601
1600 1602 if txmatch(_reportnewcssource):
1601 1603 @reportsummary
1602 1604 def reportnewcs(repo, tr):
1603 1605 """Report the range of new revisions pulled/unbundled."""
1604 1606 origrepolen = tr.changes.get('origrepolen', len(repo))
1605 1607 if origrepolen >= len(repo):
1606 1608 return
1607 1609
1608 1610 # Compute the bounds of new revisions' range, excluding obsoletes.
1609 1611 unfi = repo.unfiltered()
1610 1612 revs = unfi.revs('%d: and not obsolete()', origrepolen)
1611 1613 if not revs:
1612 1614 # Got only obsoletes.
1613 1615 return
1614 1616 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1615 1617
1616 1618 if minrev == maxrev:
1617 1619 revrange = minrev
1618 1620 else:
1619 1621 revrange = '%s:%s' % (minrev, maxrev)
1620 1622 draft = len(repo.revs('%ld and draft()', revs))
1621 1623 secret = len(repo.revs('%ld and secret()', revs))
1622 1624 if not (draft or secret):
1623 1625 msg = _('new changesets %s\n') % revrange
1624 1626 elif draft and secret:
1625 1627 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1626 1628 msg %= (revrange, draft, secret)
1627 1629 elif draft:
1628 1630 msg = _('new changesets %s (%d drafts)\n')
1629 1631 msg %= (revrange, draft)
1630 1632 elif secret:
1631 1633 msg = _('new changesets %s (%d secrets)\n')
1632 1634 msg %= (revrange, secret)
1633 1635 else:
1634 1636 raise error.ProgrammingError('entered unreachable condition')
1635 1637 repo.ui.status(msg)
1636 1638
1637 1639 @reportsummary
1638 1640 def reportphasechanges(repo, tr):
1639 1641 """Report statistics of phase changes for changesets pre-existing
1640 1642 pull/unbundle.
1641 1643 """
1642 1644 origrepolen = tr.changes.get('origrepolen', len(repo))
1643 1645 phasetracking = tr.changes.get('phases', {})
1644 1646 if not phasetracking:
1645 1647 return
1646 1648 published = [
1647 1649 rev for rev, (old, new) in phasetracking.iteritems()
1648 1650 if new == phases.public and rev < origrepolen
1649 1651 ]
1650 1652 if not published:
1651 1653 return
1652 1654 repo.ui.status(_('%d local changesets published\n')
1653 1655 % len(published))
1654 1656
1655 1657 def getinstabilitymessage(delta, instability):
1656 1658 """function to return the message to show warning about new instabilities
1657 1659
1658 1660 exists as a separate function so that extension can wrap to show more
1659 1661 information like how to fix instabilities"""
1660 1662 if delta > 0:
1661 1663 return _('%i new %s changesets\n') % (delta, instability)
1662 1664
1663 1665 def nodesummaries(repo, nodes, maxnumnodes=4):
1664 1666 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1665 1667 return ' '.join(short(h) for h in nodes)
1666 1668 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1667 1669 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1668 1670
1669 1671 def enforcesinglehead(repo, tr, desc):
1670 1672 """check that no named branch has multiple heads"""
1671 1673 if desc in ('strip', 'repair'):
1672 1674 # skip the logic during strip
1673 1675 return
1674 1676 visible = repo.filtered('visible')
1675 1677 # possible improvement: we could restrict the check to affected branch
1676 1678 for name, heads in visible.branchmap().iteritems():
1677 1679 if len(heads) > 1:
1678 1680 msg = _('rejecting multiple heads on branch "%s"')
1679 1681 msg %= name
1680 1682 hint = _('%d heads: %s')
1681 1683 hint %= (len(heads), nodesummaries(repo, heads))
1682 1684 raise error.Abort(msg, hint=hint)
1683 1685
1684 1686 def wrapconvertsink(sink):
1685 1687 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1686 1688 before it is used, whether or not the convert extension was formally loaded.
1687 1689 """
1688 1690 return sink
1689 1691
1690 1692 def unhidehashlikerevs(repo, specs, hiddentype):
1691 1693 """parse the user specs and unhide changesets whose hash or revision number
1692 1694 is passed.
1693 1695
1694 1696 hiddentype can be: 1) 'warn': warn while unhiding changesets
1695 1697 2) 'nowarn': don't warn while unhiding changesets
1696 1698
1697 1699 returns a repo object with the required changesets unhidden
1698 1700 """
1699 1701 if not repo.filtername or not repo.ui.configbool('experimental',
1700 1702 'directaccess'):
1701 1703 return repo
1702 1704
1703 1705 if repo.filtername not in ('visible', 'visible-hidden'):
1704 1706 return repo
1705 1707
1706 1708 symbols = set()
1707 1709 for spec in specs:
1708 1710 try:
1709 1711 tree = revsetlang.parse(spec)
1710 1712 except error.ParseError: # will be reported by scmutil.revrange()
1711 1713 continue
1712 1714
1713 1715 symbols.update(revsetlang.gethashlikesymbols(tree))
1714 1716
1715 1717 if not symbols:
1716 1718 return repo
1717 1719
1718 1720 revs = _getrevsfromsymbols(repo, symbols)
1719 1721
1720 1722 if not revs:
1721 1723 return repo
1722 1724
1723 1725 if hiddentype == 'warn':
1724 1726 unfi = repo.unfiltered()
1725 1727 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1726 1728 repo.ui.warn(_("warning: accessing hidden changesets for write "
1727 1729 "operation: %s\n") % revstr)
1728 1730
1729 1731 # we have to use new filtername to separate branch/tags cache until we can
1730 1732 # disbale these cache when revisions are dynamically pinned.
1731 1733 return repo.filtered('visible-hidden', revs)
1732 1734
1733 1735 def _getrevsfromsymbols(repo, symbols):
1734 1736 """parse the list of symbols and returns a set of revision numbers of hidden
1735 1737 changesets present in symbols"""
1736 1738 revs = set()
1737 1739 unfi = repo.unfiltered()
1738 1740 unficl = unfi.changelog
1739 1741 cl = repo.changelog
1740 1742 tiprev = len(unficl)
1741 1743 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1742 1744 for s in symbols:
1743 1745 try:
1744 1746 n = int(s)
1745 1747 if n <= tiprev:
1746 1748 if not allowrevnums:
1747 1749 continue
1748 1750 else:
1749 1751 if n not in cl:
1750 1752 revs.add(n)
1751 1753 continue
1752 1754 except ValueError:
1753 1755 pass
1754 1756
1755 1757 try:
1756 1758 s = resolvehexnodeidprefix(unfi, s)
1757 1759 except (error.LookupError, error.WdirUnsupported):
1758 1760 s = None
1759 1761
1760 1762 if s is not None:
1761 1763 rev = unficl.rev(s)
1762 1764 if rev not in cl:
1763 1765 revs.add(rev)
1764 1766
1765 1767 return revs
1766 1768
1767 1769 def bookmarkrevs(repo, mark):
1768 1770 """
1769 1771 Select revisions reachable by a given bookmark
1770 1772 """
1771 1773 return repo.revs("ancestors(bookmark(%s)) - "
1772 1774 "ancestors(head() and not bookmark(%s)) - "
1773 1775 "ancestors(bookmark() and not bookmark(%s))",
1774 1776 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now