##// END OF EJS Templates
context: handle partial nodeids in revsymbol()...
Martin von Zweigbergk -
r37769:35b34202 default
parent child Browse files
Show More
@@ -1,1538 +1,1543 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28
29 29 from . import (
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 revsetlang,
39 39 similar,
40 40 url,
41 41 util,
42 42 vfs,
43 43 )
44 44
45 45 from .utils import (
46 46 procutil,
47 47 stringutil,
48 48 )
49 49
50 50 if pycompat.iswindows:
51 51 from . import scmwindows as scmplatform
52 52 else:
53 53 from . import scmposix as scmplatform
54 54
55 55 termsize = scmplatform.termsize
56 56
57 57 class status(tuple):
58 58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
59 59 and 'ignored' properties are only relevant to the working copy.
60 60 '''
61 61
62 62 __slots__ = ()
63 63
64 64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
65 65 clean):
66 66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
67 67 ignored, clean))
68 68
69 69 @property
70 70 def modified(self):
71 71 '''files that have been modified'''
72 72 return self[0]
73 73
74 74 @property
75 75 def added(self):
76 76 '''files that have been added'''
77 77 return self[1]
78 78
79 79 @property
80 80 def removed(self):
81 81 '''files that have been removed'''
82 82 return self[2]
83 83
84 84 @property
85 85 def deleted(self):
86 86 '''files that are in the dirstate, but have been deleted from the
87 87 working copy (aka "missing")
88 88 '''
89 89 return self[3]
90 90
91 91 @property
92 92 def unknown(self):
93 93 '''files not in the dirstate that are not ignored'''
94 94 return self[4]
95 95
96 96 @property
97 97 def ignored(self):
98 98 '''files not in the dirstate that are ignored (by _dirignore())'''
99 99 return self[5]
100 100
101 101 @property
102 102 def clean(self):
103 103 '''files that have not been modified'''
104 104 return self[6]
105 105
106 106 def __repr__(self, *args, **kwargs):
107 107 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
108 108 'unknown=%r, ignored=%r, clean=%r>') % self)
109 109
110 110 def itersubrepos(ctx1, ctx2):
111 111 """find subrepos in ctx1 or ctx2"""
112 112 # Create a (subpath, ctx) mapping where we prefer subpaths from
113 113 # ctx1. The subpaths from ctx2 are important when the .hgsub file
114 114 # has been modified (in ctx2) but not yet committed (in ctx1).
115 115 subpaths = dict.fromkeys(ctx2.substate, ctx2)
116 116 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
117 117
118 118 missing = set()
119 119
120 120 for subpath in ctx2.substate:
121 121 if subpath not in ctx1.substate:
122 122 del subpaths[subpath]
123 123 missing.add(subpath)
124 124
125 125 for subpath, ctx in sorted(subpaths.iteritems()):
126 126 yield subpath, ctx.sub(subpath)
127 127
128 128 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
129 129 # status and diff will have an accurate result when it does
130 130 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
131 131 # against itself.
132 132 for subpath in missing:
133 133 yield subpath, ctx2.nullsub(subpath, ctx1)
134 134
135 135 def nochangesfound(ui, repo, excluded=None):
136 136 '''Report no changes for push/pull, excluded is None or a list of
137 137 nodes excluded from the push/pull.
138 138 '''
139 139 secretlist = []
140 140 if excluded:
141 141 for n in excluded:
142 142 ctx = repo[n]
143 143 if ctx.phase() >= phases.secret and not ctx.extinct():
144 144 secretlist.append(n)
145 145
146 146 if secretlist:
147 147 ui.status(_("no changes found (ignored %d secret changesets)\n")
148 148 % len(secretlist))
149 149 else:
150 150 ui.status(_("no changes found\n"))
151 151
152 152 def callcatch(ui, func):
153 153 """call func() with global exception handling
154 154
155 155 return func() if no exception happens. otherwise do some error handling
156 156 and return an exit code accordingly. does not handle all exceptions.
157 157 """
158 158 try:
159 159 try:
160 160 return func()
161 161 except: # re-raises
162 162 ui.traceback()
163 163 raise
164 164 # Global exception handling, alphabetically
165 165 # Mercurial-specific first, followed by built-in and library exceptions
166 166 except error.LockHeld as inst:
167 167 if inst.errno == errno.ETIMEDOUT:
168 168 reason = _('timed out waiting for lock held by %r') % inst.locker
169 169 else:
170 170 reason = _('lock held by %r') % inst.locker
171 171 ui.warn(_("abort: %s: %s\n")
172 172 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
173 173 if not inst.locker:
174 174 ui.warn(_("(lock might be very busy)\n"))
175 175 except error.LockUnavailable as inst:
176 176 ui.warn(_("abort: could not lock %s: %s\n") %
177 177 (inst.desc or stringutil.forcebytestr(inst.filename),
178 178 encoding.strtolocal(inst.strerror)))
179 179 except error.OutOfBandError as inst:
180 180 if inst.args:
181 181 msg = _("abort: remote error:\n")
182 182 else:
183 183 msg = _("abort: remote error\n")
184 184 ui.warn(msg)
185 185 if inst.args:
186 186 ui.warn(''.join(inst.args))
187 187 if inst.hint:
188 188 ui.warn('(%s)\n' % inst.hint)
189 189 except error.RepoError as inst:
190 190 ui.warn(_("abort: %s!\n") % inst)
191 191 if inst.hint:
192 192 ui.warn(_("(%s)\n") % inst.hint)
193 193 except error.ResponseError as inst:
194 194 ui.warn(_("abort: %s") % inst.args[0])
195 195 msg = inst.args[1]
196 196 if isinstance(msg, type(u'')):
197 197 msg = pycompat.sysbytes(msg)
198 198 if not isinstance(msg, bytes):
199 199 ui.warn(" %r\n" % (msg,))
200 200 elif not msg:
201 201 ui.warn(_(" empty string\n"))
202 202 else:
203 203 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
204 204 except error.CensoredNodeError as inst:
205 205 ui.warn(_("abort: file censored %s!\n") % inst)
206 206 except error.RevlogError as inst:
207 207 ui.warn(_("abort: %s!\n") % inst)
208 208 except error.InterventionRequired as inst:
209 209 ui.warn("%s\n" % inst)
210 210 if inst.hint:
211 211 ui.warn(_("(%s)\n") % inst.hint)
212 212 return 1
213 213 except error.WdirUnsupported:
214 214 ui.warn(_("abort: working directory revision cannot be specified\n"))
215 215 except error.Abort as inst:
216 216 ui.warn(_("abort: %s\n") % inst)
217 217 if inst.hint:
218 218 ui.warn(_("(%s)\n") % inst.hint)
219 219 except ImportError as inst:
220 220 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
221 221 m = stringutil.forcebytestr(inst).split()[-1]
222 222 if m in "mpatch bdiff".split():
223 223 ui.warn(_("(did you forget to compile extensions?)\n"))
224 224 elif m in "zlib".split():
225 225 ui.warn(_("(is your Python install correct?)\n"))
226 226 except IOError as inst:
227 227 if util.safehasattr(inst, "code"):
228 228 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
229 229 elif util.safehasattr(inst, "reason"):
230 230 try: # usually it is in the form (errno, strerror)
231 231 reason = inst.reason.args[1]
232 232 except (AttributeError, IndexError):
233 233 # it might be anything, for example a string
234 234 reason = inst.reason
235 235 if isinstance(reason, unicode):
236 236 # SSLError of Python 2.7.9 contains a unicode
237 237 reason = encoding.unitolocal(reason)
238 238 ui.warn(_("abort: error: %s\n") % reason)
239 239 elif (util.safehasattr(inst, "args")
240 240 and inst.args and inst.args[0] == errno.EPIPE):
241 241 pass
242 242 elif getattr(inst, "strerror", None):
243 243 if getattr(inst, "filename", None):
244 244 ui.warn(_("abort: %s: %s\n") % (
245 245 encoding.strtolocal(inst.strerror),
246 246 stringutil.forcebytestr(inst.filename)))
247 247 else:
248 248 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
249 249 else:
250 250 raise
251 251 except OSError as inst:
252 252 if getattr(inst, "filename", None) is not None:
253 253 ui.warn(_("abort: %s: '%s'\n") % (
254 254 encoding.strtolocal(inst.strerror),
255 255 stringutil.forcebytestr(inst.filename)))
256 256 else:
257 257 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 258 except MemoryError:
259 259 ui.warn(_("abort: out of memory\n"))
260 260 except SystemExit as inst:
261 261 # Commands shouldn't sys.exit directly, but give a return code.
262 262 # Just in case catch this and and pass exit code to caller.
263 263 return inst.code
264 264 except socket.error as inst:
265 265 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
266 266
267 267 return -1
268 268
269 269 def checknewlabel(repo, lbl, kind):
270 270 # Do not use the "kind" parameter in ui output.
271 271 # It makes strings difficult to translate.
272 272 if lbl in ['tip', '.', 'null']:
273 273 raise error.Abort(_("the name '%s' is reserved") % lbl)
274 274 for c in (':', '\0', '\n', '\r'):
275 275 if c in lbl:
276 276 raise error.Abort(
277 277 _("%r cannot be used in a name") % pycompat.bytestr(c))
278 278 try:
279 279 int(lbl)
280 280 raise error.Abort(_("cannot use an integer as a name"))
281 281 except ValueError:
282 282 pass
283 283 if lbl.strip() != lbl:
284 284 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
285 285
286 286 def checkfilename(f):
287 287 '''Check that the filename f is an acceptable filename for a tracked file'''
288 288 if '\r' in f or '\n' in f:
289 289 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
290 290
291 291 def checkportable(ui, f):
292 292 '''Check if filename f is portable and warn or abort depending on config'''
293 293 checkfilename(f)
294 294 abort, warn = checkportabilityalert(ui)
295 295 if abort or warn:
296 296 msg = util.checkwinfilename(f)
297 297 if msg:
298 298 msg = "%s: %s" % (msg, procutil.shellquote(f))
299 299 if abort:
300 300 raise error.Abort(msg)
301 301 ui.warn(_("warning: %s\n") % msg)
302 302
303 303 def checkportabilityalert(ui):
304 304 '''check if the user's config requests nothing, a warning, or abort for
305 305 non-portable filenames'''
306 306 val = ui.config('ui', 'portablefilenames')
307 307 lval = val.lower()
308 308 bval = stringutil.parsebool(val)
309 309 abort = pycompat.iswindows or lval == 'abort'
310 310 warn = bval or lval == 'warn'
311 311 if bval is None and not (warn or abort or lval == 'ignore'):
312 312 raise error.ConfigError(
313 313 _("ui.portablefilenames value is invalid ('%s')") % val)
314 314 return abort, warn
315 315
316 316 class casecollisionauditor(object):
317 317 def __init__(self, ui, abort, dirstate):
318 318 self._ui = ui
319 319 self._abort = abort
320 320 allfiles = '\0'.join(dirstate._map)
321 321 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
322 322 self._dirstate = dirstate
323 323 # The purpose of _newfiles is so that we don't complain about
324 324 # case collisions if someone were to call this object with the
325 325 # same filename twice.
326 326 self._newfiles = set()
327 327
328 328 def __call__(self, f):
329 329 if f in self._newfiles:
330 330 return
331 331 fl = encoding.lower(f)
332 332 if fl in self._loweredfiles and f not in self._dirstate:
333 333 msg = _('possible case-folding collision for %s') % f
334 334 if self._abort:
335 335 raise error.Abort(msg)
336 336 self._ui.warn(_("warning: %s\n") % msg)
337 337 self._loweredfiles.add(fl)
338 338 self._newfiles.add(f)
339 339
340 340 def filteredhash(repo, maxrev):
341 341 """build hash of filtered revisions in the current repoview.
342 342
343 343 Multiple caches perform up-to-date validation by checking that the
344 344 tiprev and tipnode stored in the cache file match the current repository.
345 345 However, this is not sufficient for validating repoviews because the set
346 346 of revisions in the view may change without the repository tiprev and
347 347 tipnode changing.
348 348
349 349 This function hashes all the revs filtered from the view and returns
350 350 that SHA-1 digest.
351 351 """
352 352 cl = repo.changelog
353 353 if not cl.filteredrevs:
354 354 return None
355 355 key = None
356 356 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
357 357 if revs:
358 358 s = hashlib.sha1()
359 359 for rev in revs:
360 360 s.update('%d;' % rev)
361 361 key = s.digest()
362 362 return key
363 363
364 364 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
365 365 '''yield every hg repository under path, always recursively.
366 366 The recurse flag will only control recursion into repo working dirs'''
367 367 def errhandler(err):
368 368 if err.filename == path:
369 369 raise err
370 370 samestat = getattr(os.path, 'samestat', None)
371 371 if followsym and samestat is not None:
372 372 def adddir(dirlst, dirname):
373 373 dirstat = os.stat(dirname)
374 374 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
375 375 if not match:
376 376 dirlst.append(dirstat)
377 377 return not match
378 378 else:
379 379 followsym = False
380 380
381 381 if (seen_dirs is None) and followsym:
382 382 seen_dirs = []
383 383 adddir(seen_dirs, path)
384 384 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
385 385 dirs.sort()
386 386 if '.hg' in dirs:
387 387 yield root # found a repository
388 388 qroot = os.path.join(root, '.hg', 'patches')
389 389 if os.path.isdir(os.path.join(qroot, '.hg')):
390 390 yield qroot # we have a patch queue repo here
391 391 if recurse:
392 392 # avoid recursing inside the .hg directory
393 393 dirs.remove('.hg')
394 394 else:
395 395 dirs[:] = [] # don't descend further
396 396 elif followsym:
397 397 newdirs = []
398 398 for d in dirs:
399 399 fname = os.path.join(root, d)
400 400 if adddir(seen_dirs, fname):
401 401 if os.path.islink(fname):
402 402 for hgname in walkrepos(fname, True, seen_dirs):
403 403 yield hgname
404 404 else:
405 405 newdirs.append(d)
406 406 dirs[:] = newdirs
407 407
408 408 def binnode(ctx):
409 409 """Return binary node id for a given basectx"""
410 410 node = ctx.node()
411 411 if node is None:
412 412 return wdirid
413 413 return node
414 414
415 415 def intrev(ctx):
416 416 """Return integer for a given basectx that can be used in comparison or
417 417 arithmetic operation"""
418 418 rev = ctx.rev()
419 419 if rev is None:
420 420 return wdirrev
421 421 return rev
422 422
423 423 def formatchangeid(ctx):
424 424 """Format changectx as '{rev}:{node|formatnode}', which is the default
425 425 template provided by logcmdutil.changesettemplater"""
426 426 repo = ctx.repo()
427 427 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
428 428
429 429 def formatrevnode(ui, rev, node):
430 430 """Format given revision and node depending on the current verbosity"""
431 431 if ui.debugflag:
432 432 hexfunc = hex
433 433 else:
434 434 hexfunc = short
435 435 return '%d:%s' % (rev, hexfunc(node))
436 436
437 437 def resolvepartialhexnodeid(repo, prefix):
438 438 # Uses unfiltered repo because it's faster when then prefix is ambiguous/
439 439 # This matches the "shortest" template function.
440 440 node = repo.unfiltered().changelog._partialmatch(prefix)
441 441 if node is None:
442 442 return
443 443 repo.changelog.rev(node) # make sure node isn't filtered
444 444 return node
445 445
446 446 def isrevsymbol(repo, symbol):
447 447 try:
448 448 revsymbol(repo, symbol)
449 449 return True
450 450 except error.RepoLookupError:
451 451 return False
452 452
453 453 def revsymbol(repo, symbol):
454 454 """Returns a context given a single revision symbol (as string).
455 455
456 456 This is similar to revsingle(), but accepts only a single revision symbol,
457 457 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
458 458 not "max(public())".
459 459 """
460 460 if not isinstance(symbol, bytes):
461 461 msg = ("symbol (%s of type %s) was not a string, did you mean "
462 462 "repo[symbol]?" % (symbol, type(symbol)))
463 463 raise error.ProgrammingError(msg)
464 464 try:
465 465 if symbol in ('.', 'tip', 'null'):
466 466 return repo[symbol]
467 467
468 468 try:
469 469 r = int(symbol)
470 470 if '%d' % r != symbol:
471 471 raise ValueError
472 472 l = len(repo.changelog)
473 473 if r < 0:
474 474 r += l
475 475 if r < 0 or r >= l and r != wdirrev:
476 476 raise ValueError
477 477 return repo[r]
478 478 except error.FilteredIndexError:
479 479 raise
480 480 except (ValueError, OverflowError, IndexError):
481 481 pass
482 482
483 483 if len(symbol) == 40:
484 484 try:
485 485 node = bin(symbol)
486 486 rev = repo.changelog.rev(node)
487 487 return repo[rev]
488 488 except error.FilteredLookupError:
489 489 raise
490 490 except (TypeError, LookupError):
491 491 pass
492 492
493 493 # look up bookmarks through the name interface
494 494 try:
495 495 node = repo.names.singlenode(repo, symbol)
496 496 rev = repo.changelog.rev(node)
497 497 return repo[rev]
498 498 except KeyError:
499 499 pass
500 500
501 node = repo.unfiltered().changelog._partialmatch(symbol)
502 if node is not None:
503 rev = repo.changelog.rev(node)
504 return repo[rev]
505
501 506 return repo[symbol]
502 507
503 508 except error.WdirUnsupported:
504 509 return repo[None]
505 510 except (error.FilteredIndexError, error.FilteredLookupError,
506 511 error.FilteredRepoLookupError):
507 512 raise _filterederror(repo, symbol)
508 513
509 514 def _filterederror(repo, changeid):
510 515 """build an exception to be raised about a filtered changeid
511 516
512 517 This is extracted in a function to help extensions (eg: evolve) to
513 518 experiment with various message variants."""
514 519 if repo.filtername.startswith('visible'):
515 520
516 521 # Check if the changeset is obsolete
517 522 unfilteredrepo = repo.unfiltered()
518 523 ctx = revsymbol(unfilteredrepo, changeid)
519 524
520 525 # If the changeset is obsolete, enrich the message with the reason
521 526 # that made this changeset not visible
522 527 if ctx.obsolete():
523 528 msg = obsutil._getfilteredreason(repo, changeid, ctx)
524 529 else:
525 530 msg = _("hidden revision '%s'") % changeid
526 531
527 532 hint = _('use --hidden to access hidden revisions')
528 533
529 534 return error.FilteredRepoLookupError(msg, hint=hint)
530 535 msg = _("filtered revision '%s' (not in '%s' subset)")
531 536 msg %= (changeid, repo.filtername)
532 537 return error.FilteredRepoLookupError(msg)
533 538
534 539 def revsingle(repo, revspec, default='.', localalias=None):
535 540 if not revspec and revspec != 0:
536 541 return repo[default]
537 542
538 543 l = revrange(repo, [revspec], localalias=localalias)
539 544 if not l:
540 545 raise error.Abort(_('empty revision set'))
541 546 return repo[l.last()]
542 547
543 548 def _pairspec(revspec):
544 549 tree = revsetlang.parse(revspec)
545 550 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
546 551
547 552 def revpairnodes(repo, revs):
548 553 repo.ui.deprecwarn("revpairnodes is deprecated, please use revpair", "4.6")
549 554 ctx1, ctx2 = revpair(repo, revs)
550 555 return ctx1.node(), ctx2.node()
551 556
552 557 def revpair(repo, revs):
553 558 if not revs:
554 559 return repo['.'], repo[None]
555 560
556 561 l = revrange(repo, revs)
557 562
558 563 if not l:
559 564 first = second = None
560 565 elif l.isascending():
561 566 first = l.min()
562 567 second = l.max()
563 568 elif l.isdescending():
564 569 first = l.max()
565 570 second = l.min()
566 571 else:
567 572 first = l.first()
568 573 second = l.last()
569 574
570 575 if first is None:
571 576 raise error.Abort(_('empty revision range'))
572 577 if (first == second and len(revs) >= 2
573 578 and not all(revrange(repo, [r]) for r in revs)):
574 579 raise error.Abort(_('empty revision on one side of range'))
575 580
576 581 # if top-level is range expression, the result must always be a pair
577 582 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
578 583 return repo[first], repo[None]
579 584
580 585 return repo[first], repo[second]
581 586
582 587 def revrange(repo, specs, localalias=None):
583 588 """Execute 1 to many revsets and return the union.
584 589
585 590 This is the preferred mechanism for executing revsets using user-specified
586 591 config options, such as revset aliases.
587 592
588 593 The revsets specified by ``specs`` will be executed via a chained ``OR``
589 594 expression. If ``specs`` is empty, an empty result is returned.
590 595
591 596 ``specs`` can contain integers, in which case they are assumed to be
592 597 revision numbers.
593 598
594 599 It is assumed the revsets are already formatted. If you have arguments
595 600 that need to be expanded in the revset, call ``revsetlang.formatspec()``
596 601 and pass the result as an element of ``specs``.
597 602
598 603 Specifying a single revset is allowed.
599 604
600 605 Returns a ``revset.abstractsmartset`` which is a list-like interface over
601 606 integer revisions.
602 607 """
603 608 allspecs = []
604 609 for spec in specs:
605 610 if isinstance(spec, int):
606 611 spec = revsetlang.formatspec('rev(%d)', spec)
607 612 allspecs.append(spec)
608 613 return repo.anyrevs(allspecs, user=True, localalias=localalias)
609 614
610 615 def meaningfulparents(repo, ctx):
611 616 """Return list of meaningful (or all if debug) parentrevs for rev.
612 617
613 618 For merges (two non-nullrev revisions) both parents are meaningful.
614 619 Otherwise the first parent revision is considered meaningful if it
615 620 is not the preceding revision.
616 621 """
617 622 parents = ctx.parents()
618 623 if len(parents) > 1:
619 624 return parents
620 625 if repo.ui.debugflag:
621 626 return [parents[0], repo['null']]
622 627 if parents[0].rev() >= intrev(ctx) - 1:
623 628 return []
624 629 return parents
625 630
626 631 def expandpats(pats):
627 632 '''Expand bare globs when running on windows.
628 633 On posix we assume it already has already been done by sh.'''
629 634 if not util.expandglobs:
630 635 return list(pats)
631 636 ret = []
632 637 for kindpat in pats:
633 638 kind, pat = matchmod._patsplit(kindpat, None)
634 639 if kind is None:
635 640 try:
636 641 globbed = glob.glob(pat)
637 642 except re.error:
638 643 globbed = [pat]
639 644 if globbed:
640 645 ret.extend(globbed)
641 646 continue
642 647 ret.append(kindpat)
643 648 return ret
644 649
645 650 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
646 651 badfn=None):
647 652 '''Return a matcher and the patterns that were used.
648 653 The matcher will warn about bad matches, unless an alternate badfn callback
649 654 is provided.'''
650 655 if pats == ("",):
651 656 pats = []
652 657 if opts is None:
653 658 opts = {}
654 659 if not globbed and default == 'relpath':
655 660 pats = expandpats(pats or [])
656 661
657 662 def bad(f, msg):
658 663 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
659 664
660 665 if badfn is None:
661 666 badfn = bad
662 667
663 668 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
664 669 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
665 670
666 671 if m.always():
667 672 pats = []
668 673 return m, pats
669 674
670 675 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
671 676 badfn=None):
672 677 '''Return a matcher that will warn about bad matches.'''
673 678 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
674 679
675 680 def matchall(repo):
676 681 '''Return a matcher that will efficiently match everything.'''
677 682 return matchmod.always(repo.root, repo.getcwd())
678 683
679 684 def matchfiles(repo, files, badfn=None):
680 685 '''Return a matcher that will efficiently match exactly these files.'''
681 686 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
682 687
683 688 def parsefollowlinespattern(repo, rev, pat, msg):
684 689 """Return a file name from `pat` pattern suitable for usage in followlines
685 690 logic.
686 691 """
687 692 if not matchmod.patkind(pat):
688 693 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
689 694 else:
690 695 ctx = repo[rev]
691 696 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
692 697 files = [f for f in ctx if m(f)]
693 698 if len(files) != 1:
694 699 raise error.ParseError(msg)
695 700 return files[0]
696 701
697 702 def origpath(ui, repo, filepath):
698 703 '''customize where .orig files are created
699 704
700 705 Fetch user defined path from config file: [ui] origbackuppath = <path>
701 706 Fall back to default (filepath with .orig suffix) if not specified
702 707 '''
703 708 origbackuppath = ui.config('ui', 'origbackuppath')
704 709 if not origbackuppath:
705 710 return filepath + ".orig"
706 711
707 712 # Convert filepath from an absolute path into a path inside the repo.
708 713 filepathfromroot = util.normpath(os.path.relpath(filepath,
709 714 start=repo.root))
710 715
711 716 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
712 717 origbackupdir = origvfs.dirname(filepathfromroot)
713 718 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
714 719 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
715 720
716 721 # Remove any files that conflict with the backup file's path
717 722 for f in reversed(list(util.finddirs(filepathfromroot))):
718 723 if origvfs.isfileorlink(f):
719 724 ui.note(_('removing conflicting file: %s\n')
720 725 % origvfs.join(f))
721 726 origvfs.unlink(f)
722 727 break
723 728
724 729 origvfs.makedirs(origbackupdir)
725 730
726 731 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
727 732 ui.note(_('removing conflicting directory: %s\n')
728 733 % origvfs.join(filepathfromroot))
729 734 origvfs.rmtree(filepathfromroot, forcibly=True)
730 735
731 736 return origvfs.join(filepathfromroot)
732 737
733 738 class _containsnode(object):
734 739 """proxy __contains__(node) to container.__contains__ which accepts revs"""
735 740
736 741 def __init__(self, repo, revcontainer):
737 742 self._torev = repo.changelog.rev
738 743 self._revcontains = revcontainer.__contains__
739 744
740 745 def __contains__(self, node):
741 746 return self._revcontains(self._torev(node))
742 747
743 748 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
744 749 """do common cleanups when old nodes are replaced by new nodes
745 750
746 751 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
747 752 (we might also want to move working directory parent in the future)
748 753
749 754 By default, bookmark moves are calculated automatically from 'replacements',
750 755 but 'moves' can be used to override that. Also, 'moves' may include
751 756 additional bookmark moves that should not have associated obsmarkers.
752 757
753 758 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
754 759 have replacements. operation is a string, like "rebase".
755 760
756 761 metadata is dictionary containing metadata to be stored in obsmarker if
757 762 obsolescence is enabled.
758 763 """
759 764 if not replacements and not moves:
760 765 return
761 766
762 767 # translate mapping's other forms
763 768 if not util.safehasattr(replacements, 'items'):
764 769 replacements = {n: () for n in replacements}
765 770
766 771 # Calculate bookmark movements
767 772 if moves is None:
768 773 moves = {}
769 774 # Unfiltered repo is needed since nodes in replacements might be hidden.
770 775 unfi = repo.unfiltered()
771 776 for oldnode, newnodes in replacements.items():
772 777 if oldnode in moves:
773 778 continue
774 779 if len(newnodes) > 1:
775 780 # usually a split, take the one with biggest rev number
776 781 newnode = next(unfi.set('max(%ln)', newnodes)).node()
777 782 elif len(newnodes) == 0:
778 783 # move bookmark backwards
779 784 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
780 785 list(replacements)))
781 786 if roots:
782 787 newnode = roots[0].node()
783 788 else:
784 789 newnode = nullid
785 790 else:
786 791 newnode = newnodes[0]
787 792 moves[oldnode] = newnode
788 793
789 794 with repo.transaction('cleanup') as tr:
790 795 # Move bookmarks
791 796 bmarks = repo._bookmarks
792 797 bmarkchanges = []
793 798 allnewnodes = [n for ns in replacements.values() for n in ns]
794 799 for oldnode, newnode in moves.items():
795 800 oldbmarks = repo.nodebookmarks(oldnode)
796 801 if not oldbmarks:
797 802 continue
798 803 from . import bookmarks # avoid import cycle
799 804 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
800 805 (util.rapply(pycompat.maybebytestr, oldbmarks),
801 806 hex(oldnode), hex(newnode)))
802 807 # Delete divergent bookmarks being parents of related newnodes
803 808 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
804 809 allnewnodes, newnode, oldnode)
805 810 deletenodes = _containsnode(repo, deleterevs)
806 811 for name in oldbmarks:
807 812 bmarkchanges.append((name, newnode))
808 813 for b in bookmarks.divergent2delete(repo, deletenodes, name):
809 814 bmarkchanges.append((b, None))
810 815
811 816 if bmarkchanges:
812 817 bmarks.applychanges(repo, tr, bmarkchanges)
813 818
814 819 # Obsolete or strip nodes
815 820 if obsolete.isenabled(repo, obsolete.createmarkersopt):
816 821 # If a node is already obsoleted, and we want to obsolete it
817 822 # without a successor, skip that obssolete request since it's
818 823 # unnecessary. That's the "if s or not isobs(n)" check below.
819 824 # Also sort the node in topology order, that might be useful for
820 825 # some obsstore logic.
821 826 # NOTE: the filtering and sorting might belong to createmarkers.
822 827 isobs = unfi.obsstore.successors.__contains__
823 828 torev = unfi.changelog.rev
824 829 sortfunc = lambda ns: torev(ns[0])
825 830 rels = [(unfi[n], tuple(unfi[m] for m in s))
826 831 for n, s in sorted(replacements.items(), key=sortfunc)
827 832 if s or not isobs(n)]
828 833 if rels:
829 834 obsolete.createmarkers(repo, rels, operation=operation,
830 835 metadata=metadata)
831 836 else:
832 837 from . import repair # avoid import cycle
833 838 tostrip = list(replacements)
834 839 if tostrip:
835 840 repair.delayedstrip(repo.ui, repo, tostrip, operation)
836 841
837 842 def addremove(repo, matcher, prefix, opts=None):
838 843 if opts is None:
839 844 opts = {}
840 845 m = matcher
841 846 dry_run = opts.get('dry_run')
842 847 try:
843 848 similarity = float(opts.get('similarity') or 0)
844 849 except ValueError:
845 850 raise error.Abort(_('similarity must be a number'))
846 851 if similarity < 0 or similarity > 100:
847 852 raise error.Abort(_('similarity must be between 0 and 100'))
848 853 similarity /= 100.0
849 854
850 855 ret = 0
851 856 join = lambda f: os.path.join(prefix, f)
852 857
853 858 wctx = repo[None]
854 859 for subpath in sorted(wctx.substate):
855 860 submatch = matchmod.subdirmatcher(subpath, m)
856 861 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
857 862 sub = wctx.sub(subpath)
858 863 try:
859 864 if sub.addremove(submatch, prefix, opts):
860 865 ret = 1
861 866 except error.LookupError:
862 867 repo.ui.status(_("skipping missing subrepository: %s\n")
863 868 % join(subpath))
864 869
865 870 rejected = []
866 871 def badfn(f, msg):
867 872 if f in m.files():
868 873 m.bad(f, msg)
869 874 rejected.append(f)
870 875
871 876 badmatch = matchmod.badmatch(m, badfn)
872 877 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
873 878 badmatch)
874 879
875 880 unknownset = set(unknown + forgotten)
876 881 toprint = unknownset.copy()
877 882 toprint.update(deleted)
878 883 for abs in sorted(toprint):
879 884 if repo.ui.verbose or not m.exact(abs):
880 885 if abs in unknownset:
881 886 status = _('adding %s\n') % m.uipath(abs)
882 887 else:
883 888 status = _('removing %s\n') % m.uipath(abs)
884 889 repo.ui.status(status)
885 890
886 891 renames = _findrenames(repo, m, added + unknown, removed + deleted,
887 892 similarity)
888 893
889 894 if not dry_run:
890 895 _markchanges(repo, unknown + forgotten, deleted, renames)
891 896
892 897 for f in rejected:
893 898 if f in m.files():
894 899 return 1
895 900 return ret
896 901
897 902 def marktouched(repo, files, similarity=0.0):
898 903 '''Assert that files have somehow been operated upon. files are relative to
899 904 the repo root.'''
900 905 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
901 906 rejected = []
902 907
903 908 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
904 909
905 910 if repo.ui.verbose:
906 911 unknownset = set(unknown + forgotten)
907 912 toprint = unknownset.copy()
908 913 toprint.update(deleted)
909 914 for abs in sorted(toprint):
910 915 if abs in unknownset:
911 916 status = _('adding %s\n') % abs
912 917 else:
913 918 status = _('removing %s\n') % abs
914 919 repo.ui.status(status)
915 920
916 921 renames = _findrenames(repo, m, added + unknown, removed + deleted,
917 922 similarity)
918 923
919 924 _markchanges(repo, unknown + forgotten, deleted, renames)
920 925
921 926 for f in rejected:
922 927 if f in m.files():
923 928 return 1
924 929 return 0
925 930
926 931 def _interestingfiles(repo, matcher):
927 932 '''Walk dirstate with matcher, looking for files that addremove would care
928 933 about.
929 934
930 935 This is different from dirstate.status because it doesn't care about
931 936 whether files are modified or clean.'''
932 937 added, unknown, deleted, removed, forgotten = [], [], [], [], []
933 938 audit_path = pathutil.pathauditor(repo.root, cached=True)
934 939
935 940 ctx = repo[None]
936 941 dirstate = repo.dirstate
937 942 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
938 943 unknown=True, ignored=False, full=False)
939 944 for abs, st in walkresults.iteritems():
940 945 dstate = dirstate[abs]
941 946 if dstate == '?' and audit_path.check(abs):
942 947 unknown.append(abs)
943 948 elif dstate != 'r' and not st:
944 949 deleted.append(abs)
945 950 elif dstate == 'r' and st:
946 951 forgotten.append(abs)
947 952 # for finding renames
948 953 elif dstate == 'r' and not st:
949 954 removed.append(abs)
950 955 elif dstate == 'a':
951 956 added.append(abs)
952 957
953 958 return added, unknown, deleted, removed, forgotten
954 959
955 960 def _findrenames(repo, matcher, added, removed, similarity):
956 961 '''Find renames from removed files to added ones.'''
957 962 renames = {}
958 963 if similarity > 0:
959 964 for old, new, score in similar.findrenames(repo, added, removed,
960 965 similarity):
961 966 if (repo.ui.verbose or not matcher.exact(old)
962 967 or not matcher.exact(new)):
963 968 repo.ui.status(_('recording removal of %s as rename to %s '
964 969 '(%d%% similar)\n') %
965 970 (matcher.rel(old), matcher.rel(new),
966 971 score * 100))
967 972 renames[new] = old
968 973 return renames
969 974
970 975 def _markchanges(repo, unknown, deleted, renames):
971 976 '''Marks the files in unknown as added, the files in deleted as removed,
972 977 and the files in renames as copied.'''
973 978 wctx = repo[None]
974 979 with repo.wlock():
975 980 wctx.forget(deleted)
976 981 wctx.add(unknown)
977 982 for new, old in renames.iteritems():
978 983 wctx.copy(old, new)
979 984
980 985 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
981 986 """Update the dirstate to reflect the intent of copying src to dst. For
982 987 different reasons it might not end with dst being marked as copied from src.
983 988 """
984 989 origsrc = repo.dirstate.copied(src) or src
985 990 if dst == origsrc: # copying back a copy?
986 991 if repo.dirstate[dst] not in 'mn' and not dryrun:
987 992 repo.dirstate.normallookup(dst)
988 993 else:
989 994 if repo.dirstate[origsrc] == 'a' and origsrc == src:
990 995 if not ui.quiet:
991 996 ui.warn(_("%s has not been committed yet, so no copy "
992 997 "data will be stored for %s.\n")
993 998 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
994 999 if repo.dirstate[dst] in '?r' and not dryrun:
995 1000 wctx.add([dst])
996 1001 elif not dryrun:
997 1002 wctx.copy(origsrc, dst)
998 1003
999 1004 def readrequires(opener, supported):
1000 1005 '''Reads and parses .hg/requires and checks if all entries found
1001 1006 are in the list of supported features.'''
1002 1007 requirements = set(opener.read("requires").splitlines())
1003 1008 missings = []
1004 1009 for r in requirements:
1005 1010 if r not in supported:
1006 1011 if not r or not r[0:1].isalnum():
1007 1012 raise error.RequirementError(_(".hg/requires file is corrupt"))
1008 1013 missings.append(r)
1009 1014 missings.sort()
1010 1015 if missings:
1011 1016 raise error.RequirementError(
1012 1017 _("repository requires features unknown to this Mercurial: %s")
1013 1018 % " ".join(missings),
1014 1019 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1015 1020 " for more information"))
1016 1021 return requirements
1017 1022
1018 1023 def writerequires(opener, requirements):
1019 1024 with opener('requires', 'w') as fp:
1020 1025 for r in sorted(requirements):
1021 1026 fp.write("%s\n" % r)
1022 1027
1023 1028 class filecachesubentry(object):
1024 1029 def __init__(self, path, stat):
1025 1030 self.path = path
1026 1031 self.cachestat = None
1027 1032 self._cacheable = None
1028 1033
1029 1034 if stat:
1030 1035 self.cachestat = filecachesubentry.stat(self.path)
1031 1036
1032 1037 if self.cachestat:
1033 1038 self._cacheable = self.cachestat.cacheable()
1034 1039 else:
1035 1040 # None means we don't know yet
1036 1041 self._cacheable = None
1037 1042
1038 1043 def refresh(self):
1039 1044 if self.cacheable():
1040 1045 self.cachestat = filecachesubentry.stat(self.path)
1041 1046
1042 1047 def cacheable(self):
1043 1048 if self._cacheable is not None:
1044 1049 return self._cacheable
1045 1050
1046 1051 # we don't know yet, assume it is for now
1047 1052 return True
1048 1053
1049 1054 def changed(self):
1050 1055 # no point in going further if we can't cache it
1051 1056 if not self.cacheable():
1052 1057 return True
1053 1058
1054 1059 newstat = filecachesubentry.stat(self.path)
1055 1060
1056 1061 # we may not know if it's cacheable yet, check again now
1057 1062 if newstat and self._cacheable is None:
1058 1063 self._cacheable = newstat.cacheable()
1059 1064
1060 1065 # check again
1061 1066 if not self._cacheable:
1062 1067 return True
1063 1068
1064 1069 if self.cachestat != newstat:
1065 1070 self.cachestat = newstat
1066 1071 return True
1067 1072 else:
1068 1073 return False
1069 1074
1070 1075 @staticmethod
1071 1076 def stat(path):
1072 1077 try:
1073 1078 return util.cachestat(path)
1074 1079 except OSError as e:
1075 1080 if e.errno != errno.ENOENT:
1076 1081 raise
1077 1082
1078 1083 class filecacheentry(object):
1079 1084 def __init__(self, paths, stat=True):
1080 1085 self._entries = []
1081 1086 for path in paths:
1082 1087 self._entries.append(filecachesubentry(path, stat))
1083 1088
1084 1089 def changed(self):
1085 1090 '''true if any entry has changed'''
1086 1091 for entry in self._entries:
1087 1092 if entry.changed():
1088 1093 return True
1089 1094 return False
1090 1095
1091 1096 def refresh(self):
1092 1097 for entry in self._entries:
1093 1098 entry.refresh()
1094 1099
1095 1100 class filecache(object):
1096 1101 '''A property like decorator that tracks files under .hg/ for updates.
1097 1102
1098 1103 Records stat info when called in _filecache.
1099 1104
1100 1105 On subsequent calls, compares old stat info with new info, and recreates the
1101 1106 object when any of the files changes, updating the new stat info in
1102 1107 _filecache.
1103 1108
1104 1109 Mercurial either atomic renames or appends for files under .hg,
1105 1110 so to ensure the cache is reliable we need the filesystem to be able
1106 1111 to tell us if a file has been replaced. If it can't, we fallback to
1107 1112 recreating the object on every call (essentially the same behavior as
1108 1113 propertycache).
1109 1114
1110 1115 '''
1111 1116 def __init__(self, *paths):
1112 1117 self.paths = paths
1113 1118
1114 1119 def join(self, obj, fname):
1115 1120 """Used to compute the runtime path of a cached file.
1116 1121
1117 1122 Users should subclass filecache and provide their own version of this
1118 1123 function to call the appropriate join function on 'obj' (an instance
1119 1124 of the class that its member function was decorated).
1120 1125 """
1121 1126 raise NotImplementedError
1122 1127
1123 1128 def __call__(self, func):
1124 1129 self.func = func
1125 1130 self.name = func.__name__.encode('ascii')
1126 1131 return self
1127 1132
1128 1133 def __get__(self, obj, type=None):
1129 1134 # if accessed on the class, return the descriptor itself.
1130 1135 if obj is None:
1131 1136 return self
1132 1137 # do we need to check if the file changed?
1133 1138 if self.name in obj.__dict__:
1134 1139 assert self.name in obj._filecache, self.name
1135 1140 return obj.__dict__[self.name]
1136 1141
1137 1142 entry = obj._filecache.get(self.name)
1138 1143
1139 1144 if entry:
1140 1145 if entry.changed():
1141 1146 entry.obj = self.func(obj)
1142 1147 else:
1143 1148 paths = [self.join(obj, path) for path in self.paths]
1144 1149
1145 1150 # We stat -before- creating the object so our cache doesn't lie if
1146 1151 # a writer modified between the time we read and stat
1147 1152 entry = filecacheentry(paths, True)
1148 1153 entry.obj = self.func(obj)
1149 1154
1150 1155 obj._filecache[self.name] = entry
1151 1156
1152 1157 obj.__dict__[self.name] = entry.obj
1153 1158 return entry.obj
1154 1159
1155 1160 def __set__(self, obj, value):
1156 1161 if self.name not in obj._filecache:
1157 1162 # we add an entry for the missing value because X in __dict__
1158 1163 # implies X in _filecache
1159 1164 paths = [self.join(obj, path) for path in self.paths]
1160 1165 ce = filecacheentry(paths, False)
1161 1166 obj._filecache[self.name] = ce
1162 1167 else:
1163 1168 ce = obj._filecache[self.name]
1164 1169
1165 1170 ce.obj = value # update cached copy
1166 1171 obj.__dict__[self.name] = value # update copy returned by obj.x
1167 1172
1168 1173 def __delete__(self, obj):
1169 1174 try:
1170 1175 del obj.__dict__[self.name]
1171 1176 except KeyError:
1172 1177 raise AttributeError(self.name)
1173 1178
1174 1179 def extdatasource(repo, source):
1175 1180 """Gather a map of rev -> value dict from the specified source
1176 1181
1177 1182 A source spec is treated as a URL, with a special case shell: type
1178 1183 for parsing the output from a shell command.
1179 1184
1180 1185 The data is parsed as a series of newline-separated records where
1181 1186 each record is a revision specifier optionally followed by a space
1182 1187 and a freeform string value. If the revision is known locally, it
1183 1188 is converted to a rev, otherwise the record is skipped.
1184 1189
1185 1190 Note that both key and value are treated as UTF-8 and converted to
1186 1191 the local encoding. This allows uniformity between local and
1187 1192 remote data sources.
1188 1193 """
1189 1194
1190 1195 spec = repo.ui.config("extdata", source)
1191 1196 if not spec:
1192 1197 raise error.Abort(_("unknown extdata source '%s'") % source)
1193 1198
1194 1199 data = {}
1195 1200 src = proc = None
1196 1201 try:
1197 1202 if spec.startswith("shell:"):
1198 1203 # external commands should be run relative to the repo root
1199 1204 cmd = spec[6:]
1200 1205 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1201 1206 close_fds=procutil.closefds,
1202 1207 stdout=subprocess.PIPE, cwd=repo.root)
1203 1208 src = proc.stdout
1204 1209 else:
1205 1210 # treat as a URL or file
1206 1211 src = url.open(repo.ui, spec)
1207 1212 for l in src:
1208 1213 if " " in l:
1209 1214 k, v = l.strip().split(" ", 1)
1210 1215 else:
1211 1216 k, v = l.strip(), ""
1212 1217
1213 1218 k = encoding.tolocal(k)
1214 1219 try:
1215 1220 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1216 1221 except (error.LookupError, error.RepoLookupError):
1217 1222 pass # we ignore data for nodes that don't exist locally
1218 1223 finally:
1219 1224 if proc:
1220 1225 proc.communicate()
1221 1226 if src:
1222 1227 src.close()
1223 1228 if proc and proc.returncode != 0:
1224 1229 raise error.Abort(_("extdata command '%s' failed: %s")
1225 1230 % (cmd, procutil.explainexit(proc.returncode)))
1226 1231
1227 1232 return data
1228 1233
1229 1234 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1230 1235 if lock is None:
1231 1236 raise error.LockInheritanceContractViolation(
1232 1237 'lock can only be inherited while held')
1233 1238 if environ is None:
1234 1239 environ = {}
1235 1240 with lock.inherit() as locker:
1236 1241 environ[envvar] = locker
1237 1242 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1238 1243
1239 1244 def wlocksub(repo, cmd, *args, **kwargs):
1240 1245 """run cmd as a subprocess that allows inheriting repo's wlock
1241 1246
1242 1247 This can only be called while the wlock is held. This takes all the
1243 1248 arguments that ui.system does, and returns the exit code of the
1244 1249 subprocess."""
1245 1250 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1246 1251 **kwargs)
1247 1252
1248 1253 def gdinitconfig(ui):
1249 1254 """helper function to know if a repo should be created as general delta
1250 1255 """
1251 1256 # experimental config: format.generaldelta
1252 1257 return (ui.configbool('format', 'generaldelta')
1253 1258 or ui.configbool('format', 'usegeneraldelta'))
1254 1259
1255 1260 def gddeltaconfig(ui):
1256 1261 """helper function to know if incoming delta should be optimised
1257 1262 """
1258 1263 # experimental config: format.generaldelta
1259 1264 return ui.configbool('format', 'generaldelta')
1260 1265
1261 1266 class simplekeyvaluefile(object):
1262 1267 """A simple file with key=value lines
1263 1268
1264 1269 Keys must be alphanumerics and start with a letter, values must not
1265 1270 contain '\n' characters"""
1266 1271 firstlinekey = '__firstline'
1267 1272
1268 1273 def __init__(self, vfs, path, keys=None):
1269 1274 self.vfs = vfs
1270 1275 self.path = path
1271 1276
1272 1277 def read(self, firstlinenonkeyval=False):
1273 1278 """Read the contents of a simple key-value file
1274 1279
1275 1280 'firstlinenonkeyval' indicates whether the first line of file should
1276 1281 be treated as a key-value pair or reuturned fully under the
1277 1282 __firstline key."""
1278 1283 lines = self.vfs.readlines(self.path)
1279 1284 d = {}
1280 1285 if firstlinenonkeyval:
1281 1286 if not lines:
1282 1287 e = _("empty simplekeyvalue file")
1283 1288 raise error.CorruptedState(e)
1284 1289 # we don't want to include '\n' in the __firstline
1285 1290 d[self.firstlinekey] = lines[0][:-1]
1286 1291 del lines[0]
1287 1292
1288 1293 try:
1289 1294 # the 'if line.strip()' part prevents us from failing on empty
1290 1295 # lines which only contain '\n' therefore are not skipped
1291 1296 # by 'if line'
1292 1297 updatedict = dict(line[:-1].split('=', 1) for line in lines
1293 1298 if line.strip())
1294 1299 if self.firstlinekey in updatedict:
1295 1300 e = _("%r can't be used as a key")
1296 1301 raise error.CorruptedState(e % self.firstlinekey)
1297 1302 d.update(updatedict)
1298 1303 except ValueError as e:
1299 1304 raise error.CorruptedState(str(e))
1300 1305 return d
1301 1306
1302 1307 def write(self, data, firstline=None):
1303 1308 """Write key=>value mapping to a file
1304 1309 data is a dict. Keys must be alphanumerical and start with a letter.
1305 1310 Values must not contain newline characters.
1306 1311
1307 1312 If 'firstline' is not None, it is written to file before
1308 1313 everything else, as it is, not in a key=value form"""
1309 1314 lines = []
1310 1315 if firstline is not None:
1311 1316 lines.append('%s\n' % firstline)
1312 1317
1313 1318 for k, v in data.items():
1314 1319 if k == self.firstlinekey:
1315 1320 e = "key name '%s' is reserved" % self.firstlinekey
1316 1321 raise error.ProgrammingError(e)
1317 1322 if not k[0:1].isalpha():
1318 1323 e = "keys must start with a letter in a key-value file"
1319 1324 raise error.ProgrammingError(e)
1320 1325 if not k.isalnum():
1321 1326 e = "invalid key name in a simple key-value file"
1322 1327 raise error.ProgrammingError(e)
1323 1328 if '\n' in v:
1324 1329 e = "invalid value in a simple key-value file"
1325 1330 raise error.ProgrammingError(e)
1326 1331 lines.append("%s=%s\n" % (k, v))
1327 1332 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1328 1333 fp.write(''.join(lines))
1329 1334
1330 1335 _reportobsoletedsource = [
1331 1336 'debugobsolete',
1332 1337 'pull',
1333 1338 'push',
1334 1339 'serve',
1335 1340 'unbundle',
1336 1341 ]
1337 1342
1338 1343 _reportnewcssource = [
1339 1344 'pull',
1340 1345 'unbundle',
1341 1346 ]
1342 1347
1343 1348 # a list of (repo, ctx, files) functions called by various commands to allow
1344 1349 # extensions to ensure the corresponding files are available locally, before the
1345 1350 # command uses them.
1346 1351 fileprefetchhooks = util.hooks()
1347 1352
1348 1353 # A marker that tells the evolve extension to suppress its own reporting
1349 1354 _reportstroubledchangesets = True
1350 1355
1351 1356 def registersummarycallback(repo, otr, txnname=''):
1352 1357 """register a callback to issue a summary after the transaction is closed
1353 1358 """
1354 1359 def txmatch(sources):
1355 1360 return any(txnname.startswith(source) for source in sources)
1356 1361
1357 1362 categories = []
1358 1363
1359 1364 def reportsummary(func):
1360 1365 """decorator for report callbacks."""
1361 1366 # The repoview life cycle is shorter than the one of the actual
1362 1367 # underlying repository. So the filtered object can die before the
1363 1368 # weakref is used leading to troubles. We keep a reference to the
1364 1369 # unfiltered object and restore the filtering when retrieving the
1365 1370 # repository through the weakref.
1366 1371 filtername = repo.filtername
1367 1372 reporef = weakref.ref(repo.unfiltered())
1368 1373 def wrapped(tr):
1369 1374 repo = reporef()
1370 1375 if filtername:
1371 1376 repo = repo.filtered(filtername)
1372 1377 func(repo, tr)
1373 1378 newcat = '%02i-txnreport' % len(categories)
1374 1379 otr.addpostclose(newcat, wrapped)
1375 1380 categories.append(newcat)
1376 1381 return wrapped
1377 1382
1378 1383 if txmatch(_reportobsoletedsource):
1379 1384 @reportsummary
1380 1385 def reportobsoleted(repo, tr):
1381 1386 obsoleted = obsutil.getobsoleted(repo, tr)
1382 1387 if obsoleted:
1383 1388 repo.ui.status(_('obsoleted %i changesets\n')
1384 1389 % len(obsoleted))
1385 1390
1386 1391 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1387 1392 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1388 1393 instabilitytypes = [
1389 1394 ('orphan', 'orphan'),
1390 1395 ('phase-divergent', 'phasedivergent'),
1391 1396 ('content-divergent', 'contentdivergent'),
1392 1397 ]
1393 1398
1394 1399 def getinstabilitycounts(repo):
1395 1400 filtered = repo.changelog.filteredrevs
1396 1401 counts = {}
1397 1402 for instability, revset in instabilitytypes:
1398 1403 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1399 1404 filtered)
1400 1405 return counts
1401 1406
1402 1407 oldinstabilitycounts = getinstabilitycounts(repo)
1403 1408 @reportsummary
1404 1409 def reportnewinstabilities(repo, tr):
1405 1410 newinstabilitycounts = getinstabilitycounts(repo)
1406 1411 for instability, revset in instabilitytypes:
1407 1412 delta = (newinstabilitycounts[instability] -
1408 1413 oldinstabilitycounts[instability])
1409 1414 if delta > 0:
1410 1415 repo.ui.warn(_('%i new %s changesets\n') %
1411 1416 (delta, instability))
1412 1417
1413 1418 if txmatch(_reportnewcssource):
1414 1419 @reportsummary
1415 1420 def reportnewcs(repo, tr):
1416 1421 """Report the range of new revisions pulled/unbundled."""
1417 1422 newrevs = tr.changes.get('revs', xrange(0, 0))
1418 1423 if not newrevs:
1419 1424 return
1420 1425
1421 1426 # Compute the bounds of new revisions' range, excluding obsoletes.
1422 1427 unfi = repo.unfiltered()
1423 1428 revs = unfi.revs('%ld and not obsolete()', newrevs)
1424 1429 if not revs:
1425 1430 # Got only obsoletes.
1426 1431 return
1427 1432 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1428 1433
1429 1434 if minrev == maxrev:
1430 1435 revrange = minrev
1431 1436 else:
1432 1437 revrange = '%s:%s' % (minrev, maxrev)
1433 1438 repo.ui.status(_('new changesets %s\n') % revrange)
1434 1439
1435 1440 def nodesummaries(repo, nodes, maxnumnodes=4):
1436 1441 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1437 1442 return ' '.join(short(h) for h in nodes)
1438 1443 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1439 1444 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1440 1445
1441 1446 def enforcesinglehead(repo, tr, desc):
1442 1447 """check that no named branch has multiple heads"""
1443 1448 if desc in ('strip', 'repair'):
1444 1449 # skip the logic during strip
1445 1450 return
1446 1451 visible = repo.filtered('visible')
1447 1452 # possible improvement: we could restrict the check to affected branch
1448 1453 for name, heads in visible.branchmap().iteritems():
1449 1454 if len(heads) > 1:
1450 1455 msg = _('rejecting multiple heads on branch "%s"')
1451 1456 msg %= name
1452 1457 hint = _('%d heads: %s')
1453 1458 hint %= (len(heads), nodesummaries(repo, heads))
1454 1459 raise error.Abort(msg, hint=hint)
1455 1460
1456 1461 def wrapconvertsink(sink):
1457 1462 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1458 1463 before it is used, whether or not the convert extension was formally loaded.
1459 1464 """
1460 1465 return sink
1461 1466
1462 1467 def unhidehashlikerevs(repo, specs, hiddentype):
1463 1468 """parse the user specs and unhide changesets whose hash or revision number
1464 1469 is passed.
1465 1470
1466 1471 hiddentype can be: 1) 'warn': warn while unhiding changesets
1467 1472 2) 'nowarn': don't warn while unhiding changesets
1468 1473
1469 1474 returns a repo object with the required changesets unhidden
1470 1475 """
1471 1476 if not repo.filtername or not repo.ui.configbool('experimental',
1472 1477 'directaccess'):
1473 1478 return repo
1474 1479
1475 1480 if repo.filtername not in ('visible', 'visible-hidden'):
1476 1481 return repo
1477 1482
1478 1483 symbols = set()
1479 1484 for spec in specs:
1480 1485 try:
1481 1486 tree = revsetlang.parse(spec)
1482 1487 except error.ParseError: # will be reported by scmutil.revrange()
1483 1488 continue
1484 1489
1485 1490 symbols.update(revsetlang.gethashlikesymbols(tree))
1486 1491
1487 1492 if not symbols:
1488 1493 return repo
1489 1494
1490 1495 revs = _getrevsfromsymbols(repo, symbols)
1491 1496
1492 1497 if not revs:
1493 1498 return repo
1494 1499
1495 1500 if hiddentype == 'warn':
1496 1501 unfi = repo.unfiltered()
1497 1502 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1498 1503 repo.ui.warn(_("warning: accessing hidden changesets for write "
1499 1504 "operation: %s\n") % revstr)
1500 1505
1501 1506 # we have to use new filtername to separate branch/tags cache until we can
1502 1507 # disbale these cache when revisions are dynamically pinned.
1503 1508 return repo.filtered('visible-hidden', revs)
1504 1509
1505 1510 def _getrevsfromsymbols(repo, symbols):
1506 1511 """parse the list of symbols and returns a set of revision numbers of hidden
1507 1512 changesets present in symbols"""
1508 1513 revs = set()
1509 1514 unfi = repo.unfiltered()
1510 1515 unficl = unfi.changelog
1511 1516 cl = repo.changelog
1512 1517 tiprev = len(unficl)
1513 1518 pmatch = unficl._partialmatch
1514 1519 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1515 1520 for s in symbols:
1516 1521 try:
1517 1522 n = int(s)
1518 1523 if n <= tiprev:
1519 1524 if not allowrevnums:
1520 1525 continue
1521 1526 else:
1522 1527 if n not in cl:
1523 1528 revs.add(n)
1524 1529 continue
1525 1530 except ValueError:
1526 1531 pass
1527 1532
1528 1533 try:
1529 1534 s = pmatch(s)
1530 1535 except (error.LookupError, error.WdirUnsupported):
1531 1536 s = None
1532 1537
1533 1538 if s is not None:
1534 1539 rev = unficl.rev(s)
1535 1540 if rev not in cl:
1536 1541 revs.add(rev)
1537 1542
1538 1543 return revs
General Comments 0
You need to be logged in to leave comments. Login now