##// END OF EJS Templates
scmutil: factor out building of transaction summary callback...
Denis Laxalde -
r34621:b799f116 default
parent child Browse files
Show More
@@ -1,1218 +1,1228 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 hex,
22 22 nullid,
23 23 short,
24 24 wdirid,
25 25 wdirrev,
26 26 )
27 27
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 match as matchmod,
32 32 obsolete,
33 33 obsutil,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 revsetlang,
38 38 similar,
39 39 url,
40 40 util,
41 41 vfs,
42 42 )
43 43
44 44 if pycompat.osname == 'nt':
45 45 from . import scmwindows as scmplatform
46 46 else:
47 47 from . import scmposix as scmplatform
48 48
49 49 termsize = scmplatform.termsize
50 50
51 51 class status(tuple):
52 52 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
53 53 and 'ignored' properties are only relevant to the working copy.
54 54 '''
55 55
56 56 __slots__ = ()
57 57
58 58 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
59 59 clean):
60 60 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
61 61 ignored, clean))
62 62
63 63 @property
64 64 def modified(self):
65 65 '''files that have been modified'''
66 66 return self[0]
67 67
68 68 @property
69 69 def added(self):
70 70 '''files that have been added'''
71 71 return self[1]
72 72
73 73 @property
74 74 def removed(self):
75 75 '''files that have been removed'''
76 76 return self[2]
77 77
78 78 @property
79 79 def deleted(self):
80 80 '''files that are in the dirstate, but have been deleted from the
81 81 working copy (aka "missing")
82 82 '''
83 83 return self[3]
84 84
85 85 @property
86 86 def unknown(self):
87 87 '''files not in the dirstate that are not ignored'''
88 88 return self[4]
89 89
90 90 @property
91 91 def ignored(self):
92 92 '''files not in the dirstate that are ignored (by _dirignore())'''
93 93 return self[5]
94 94
95 95 @property
96 96 def clean(self):
97 97 '''files that have not been modified'''
98 98 return self[6]
99 99
100 100 def __repr__(self, *args, **kwargs):
101 101 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
102 102 'unknown=%r, ignored=%r, clean=%r>') % self)
103 103
104 104 def itersubrepos(ctx1, ctx2):
105 105 """find subrepos in ctx1 or ctx2"""
106 106 # Create a (subpath, ctx) mapping where we prefer subpaths from
107 107 # ctx1. The subpaths from ctx2 are important when the .hgsub file
108 108 # has been modified (in ctx2) but not yet committed (in ctx1).
109 109 subpaths = dict.fromkeys(ctx2.substate, ctx2)
110 110 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
111 111
112 112 missing = set()
113 113
114 114 for subpath in ctx2.substate:
115 115 if subpath not in ctx1.substate:
116 116 del subpaths[subpath]
117 117 missing.add(subpath)
118 118
119 119 for subpath, ctx in sorted(subpaths.iteritems()):
120 120 yield subpath, ctx.sub(subpath)
121 121
122 122 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
123 123 # status and diff will have an accurate result when it does
124 124 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
125 125 # against itself.
126 126 for subpath in missing:
127 127 yield subpath, ctx2.nullsub(subpath, ctx1)
128 128
129 129 def nochangesfound(ui, repo, excluded=None):
130 130 '''Report no changes for push/pull, excluded is None or a list of
131 131 nodes excluded from the push/pull.
132 132 '''
133 133 secretlist = []
134 134 if excluded:
135 135 for n in excluded:
136 136 ctx = repo[n]
137 137 if ctx.phase() >= phases.secret and not ctx.extinct():
138 138 secretlist.append(n)
139 139
140 140 if secretlist:
141 141 ui.status(_("no changes found (ignored %d secret changesets)\n")
142 142 % len(secretlist))
143 143 else:
144 144 ui.status(_("no changes found\n"))
145 145
146 146 def callcatch(ui, func):
147 147 """call func() with global exception handling
148 148
149 149 return func() if no exception happens. otherwise do some error handling
150 150 and return an exit code accordingly. does not handle all exceptions.
151 151 """
152 152 try:
153 153 try:
154 154 return func()
155 155 except: # re-raises
156 156 ui.traceback()
157 157 raise
158 158 # Global exception handling, alphabetically
159 159 # Mercurial-specific first, followed by built-in and library exceptions
160 160 except error.LockHeld as inst:
161 161 if inst.errno == errno.ETIMEDOUT:
162 162 reason = _('timed out waiting for lock held by %r') % inst.locker
163 163 else:
164 164 reason = _('lock held by %r') % inst.locker
165 165 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
166 166 if not inst.locker:
167 167 ui.warn(_("(lock might be very busy)\n"))
168 168 except error.LockUnavailable as inst:
169 169 ui.warn(_("abort: could not lock %s: %s\n") %
170 170 (inst.desc or inst.filename,
171 171 encoding.strtolocal(inst.strerror)))
172 172 except error.OutOfBandError as inst:
173 173 if inst.args:
174 174 msg = _("abort: remote error:\n")
175 175 else:
176 176 msg = _("abort: remote error\n")
177 177 ui.warn(msg)
178 178 if inst.args:
179 179 ui.warn(''.join(inst.args))
180 180 if inst.hint:
181 181 ui.warn('(%s)\n' % inst.hint)
182 182 except error.RepoError as inst:
183 183 ui.warn(_("abort: %s!\n") % inst)
184 184 if inst.hint:
185 185 ui.warn(_("(%s)\n") % inst.hint)
186 186 except error.ResponseError as inst:
187 187 ui.warn(_("abort: %s") % inst.args[0])
188 188 if not isinstance(inst.args[1], basestring):
189 189 ui.warn(" %r\n" % (inst.args[1],))
190 190 elif not inst.args[1]:
191 191 ui.warn(_(" empty string\n"))
192 192 else:
193 193 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
194 194 except error.CensoredNodeError as inst:
195 195 ui.warn(_("abort: file censored %s!\n") % inst)
196 196 except error.RevlogError as inst:
197 197 ui.warn(_("abort: %s!\n") % inst)
198 198 except error.InterventionRequired as inst:
199 199 ui.warn("%s\n" % inst)
200 200 if inst.hint:
201 201 ui.warn(_("(%s)\n") % inst.hint)
202 202 return 1
203 203 except error.WdirUnsupported:
204 204 ui.warn(_("abort: working directory revision cannot be specified\n"))
205 205 except error.Abort as inst:
206 206 ui.warn(_("abort: %s\n") % inst)
207 207 if inst.hint:
208 208 ui.warn(_("(%s)\n") % inst.hint)
209 209 except ImportError as inst:
210 210 ui.warn(_("abort: %s!\n") % inst)
211 211 m = str(inst).split()[-1]
212 212 if m in "mpatch bdiff".split():
213 213 ui.warn(_("(did you forget to compile extensions?)\n"))
214 214 elif m in "zlib".split():
215 215 ui.warn(_("(is your Python install correct?)\n"))
216 216 except IOError as inst:
217 217 if util.safehasattr(inst, "code"):
218 218 ui.warn(_("abort: %s\n") % inst)
219 219 elif util.safehasattr(inst, "reason"):
220 220 try: # usually it is in the form (errno, strerror)
221 221 reason = inst.reason.args[1]
222 222 except (AttributeError, IndexError):
223 223 # it might be anything, for example a string
224 224 reason = inst.reason
225 225 if isinstance(reason, unicode):
226 226 # SSLError of Python 2.7.9 contains a unicode
227 227 reason = encoding.unitolocal(reason)
228 228 ui.warn(_("abort: error: %s\n") % reason)
229 229 elif (util.safehasattr(inst, "args")
230 230 and inst.args and inst.args[0] == errno.EPIPE):
231 231 pass
232 232 elif getattr(inst, "strerror", None):
233 233 if getattr(inst, "filename", None):
234 234 ui.warn(_("abort: %s: %s\n") % (
235 235 encoding.strtolocal(inst.strerror), inst.filename))
236 236 else:
237 237 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
238 238 else:
239 239 raise
240 240 except OSError as inst:
241 241 if getattr(inst, "filename", None) is not None:
242 242 ui.warn(_("abort: %s: '%s'\n") % (
243 243 encoding.strtolocal(inst.strerror), inst.filename))
244 244 else:
245 245 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
246 246 except MemoryError:
247 247 ui.warn(_("abort: out of memory\n"))
248 248 except SystemExit as inst:
249 249 # Commands shouldn't sys.exit directly, but give a return code.
250 250 # Just in case catch this and and pass exit code to caller.
251 251 return inst.code
252 252 except socket.error as inst:
253 253 ui.warn(_("abort: %s\n") % inst.args[-1])
254 254
255 255 return -1
256 256
257 257 def checknewlabel(repo, lbl, kind):
258 258 # Do not use the "kind" parameter in ui output.
259 259 # It makes strings difficult to translate.
260 260 if lbl in ['tip', '.', 'null']:
261 261 raise error.Abort(_("the name '%s' is reserved") % lbl)
262 262 for c in (':', '\0', '\n', '\r'):
263 263 if c in lbl:
264 264 raise error.Abort(_("%r cannot be used in a name") % c)
265 265 try:
266 266 int(lbl)
267 267 raise error.Abort(_("cannot use an integer as a name"))
268 268 except ValueError:
269 269 pass
270 270
271 271 def checkfilename(f):
272 272 '''Check that the filename f is an acceptable filename for a tracked file'''
273 273 if '\r' in f or '\n' in f:
274 274 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
275 275
276 276 def checkportable(ui, f):
277 277 '''Check if filename f is portable and warn or abort depending on config'''
278 278 checkfilename(f)
279 279 abort, warn = checkportabilityalert(ui)
280 280 if abort or warn:
281 281 msg = util.checkwinfilename(f)
282 282 if msg:
283 283 msg = "%s: %s" % (msg, util.shellquote(f))
284 284 if abort:
285 285 raise error.Abort(msg)
286 286 ui.warn(_("warning: %s\n") % msg)
287 287
288 288 def checkportabilityalert(ui):
289 289 '''check if the user's config requests nothing, a warning, or abort for
290 290 non-portable filenames'''
291 291 val = ui.config('ui', 'portablefilenames')
292 292 lval = val.lower()
293 293 bval = util.parsebool(val)
294 294 abort = pycompat.osname == 'nt' or lval == 'abort'
295 295 warn = bval or lval == 'warn'
296 296 if bval is None and not (warn or abort or lval == 'ignore'):
297 297 raise error.ConfigError(
298 298 _("ui.portablefilenames value is invalid ('%s')") % val)
299 299 return abort, warn
300 300
301 301 class casecollisionauditor(object):
302 302 def __init__(self, ui, abort, dirstate):
303 303 self._ui = ui
304 304 self._abort = abort
305 305 allfiles = '\0'.join(dirstate._map)
306 306 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
307 307 self._dirstate = dirstate
308 308 # The purpose of _newfiles is so that we don't complain about
309 309 # case collisions if someone were to call this object with the
310 310 # same filename twice.
311 311 self._newfiles = set()
312 312
313 313 def __call__(self, f):
314 314 if f in self._newfiles:
315 315 return
316 316 fl = encoding.lower(f)
317 317 if fl in self._loweredfiles and f not in self._dirstate:
318 318 msg = _('possible case-folding collision for %s') % f
319 319 if self._abort:
320 320 raise error.Abort(msg)
321 321 self._ui.warn(_("warning: %s\n") % msg)
322 322 self._loweredfiles.add(fl)
323 323 self._newfiles.add(f)
324 324
325 325 def filteredhash(repo, maxrev):
326 326 """build hash of filtered revisions in the current repoview.
327 327
328 328 Multiple caches perform up-to-date validation by checking that the
329 329 tiprev and tipnode stored in the cache file match the current repository.
330 330 However, this is not sufficient for validating repoviews because the set
331 331 of revisions in the view may change without the repository tiprev and
332 332 tipnode changing.
333 333
334 334 This function hashes all the revs filtered from the view and returns
335 335 that SHA-1 digest.
336 336 """
337 337 cl = repo.changelog
338 338 if not cl.filteredrevs:
339 339 return None
340 340 key = None
341 341 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
342 342 if revs:
343 343 s = hashlib.sha1()
344 344 for rev in revs:
345 345 s.update('%d;' % rev)
346 346 key = s.digest()
347 347 return key
348 348
349 349 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
350 350 '''yield every hg repository under path, always recursively.
351 351 The recurse flag will only control recursion into repo working dirs'''
352 352 def errhandler(err):
353 353 if err.filename == path:
354 354 raise err
355 355 samestat = getattr(os.path, 'samestat', None)
356 356 if followsym and samestat is not None:
357 357 def adddir(dirlst, dirname):
358 358 match = False
359 359 dirstat = os.stat(dirname)
360 360 for lstdirstat in dirlst:
361 361 if samestat(dirstat, lstdirstat):
362 362 match = True
363 363 break
364 364 if not match:
365 365 dirlst.append(dirstat)
366 366 return not match
367 367 else:
368 368 followsym = False
369 369
370 370 if (seen_dirs is None) and followsym:
371 371 seen_dirs = []
372 372 adddir(seen_dirs, path)
373 373 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
374 374 dirs.sort()
375 375 if '.hg' in dirs:
376 376 yield root # found a repository
377 377 qroot = os.path.join(root, '.hg', 'patches')
378 378 if os.path.isdir(os.path.join(qroot, '.hg')):
379 379 yield qroot # we have a patch queue repo here
380 380 if recurse:
381 381 # avoid recursing inside the .hg directory
382 382 dirs.remove('.hg')
383 383 else:
384 384 dirs[:] = [] # don't descend further
385 385 elif followsym:
386 386 newdirs = []
387 387 for d in dirs:
388 388 fname = os.path.join(root, d)
389 389 if adddir(seen_dirs, fname):
390 390 if os.path.islink(fname):
391 391 for hgname in walkrepos(fname, True, seen_dirs):
392 392 yield hgname
393 393 else:
394 394 newdirs.append(d)
395 395 dirs[:] = newdirs
396 396
397 397 def binnode(ctx):
398 398 """Return binary node id for a given basectx"""
399 399 node = ctx.node()
400 400 if node is None:
401 401 return wdirid
402 402 return node
403 403
404 404 def intrev(ctx):
405 405 """Return integer for a given basectx that can be used in comparison or
406 406 arithmetic operation"""
407 407 rev = ctx.rev()
408 408 if rev is None:
409 409 return wdirrev
410 410 return rev
411 411
412 412 def formatchangeid(ctx):
413 413 """Format changectx as '{rev}:{node|formatnode}', which is the default
414 414 template provided by cmdutil.changeset_templater"""
415 415 repo = ctx.repo()
416 416 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
417 417
418 418 def formatrevnode(ui, rev, node):
419 419 """Format given revision and node depending on the current verbosity"""
420 420 if ui.debugflag:
421 421 hexfunc = hex
422 422 else:
423 423 hexfunc = short
424 424 return '%d:%s' % (rev, hexfunc(node))
425 425
426 426 def revsingle(repo, revspec, default='.', localalias=None):
427 427 if not revspec and revspec != 0:
428 428 return repo[default]
429 429
430 430 l = revrange(repo, [revspec], localalias=localalias)
431 431 if not l:
432 432 raise error.Abort(_('empty revision set'))
433 433 return repo[l.last()]
434 434
435 435 def _pairspec(revspec):
436 436 tree = revsetlang.parse(revspec)
437 437 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
438 438
439 439 def revpair(repo, revs):
440 440 if not revs:
441 441 return repo.dirstate.p1(), None
442 442
443 443 l = revrange(repo, revs)
444 444
445 445 if not l:
446 446 first = second = None
447 447 elif l.isascending():
448 448 first = l.min()
449 449 second = l.max()
450 450 elif l.isdescending():
451 451 first = l.max()
452 452 second = l.min()
453 453 else:
454 454 first = l.first()
455 455 second = l.last()
456 456
457 457 if first is None:
458 458 raise error.Abort(_('empty revision range'))
459 459 if (first == second and len(revs) >= 2
460 460 and not all(revrange(repo, [r]) for r in revs)):
461 461 raise error.Abort(_('empty revision on one side of range'))
462 462
463 463 # if top-level is range expression, the result must always be a pair
464 464 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
465 465 return repo.lookup(first), None
466 466
467 467 return repo.lookup(first), repo.lookup(second)
468 468
469 469 def revrange(repo, specs, localalias=None):
470 470 """Execute 1 to many revsets and return the union.
471 471
472 472 This is the preferred mechanism for executing revsets using user-specified
473 473 config options, such as revset aliases.
474 474
475 475 The revsets specified by ``specs`` will be executed via a chained ``OR``
476 476 expression. If ``specs`` is empty, an empty result is returned.
477 477
478 478 ``specs`` can contain integers, in which case they are assumed to be
479 479 revision numbers.
480 480
481 481 It is assumed the revsets are already formatted. If you have arguments
482 482 that need to be expanded in the revset, call ``revsetlang.formatspec()``
483 483 and pass the result as an element of ``specs``.
484 484
485 485 Specifying a single revset is allowed.
486 486
487 487 Returns a ``revset.abstractsmartset`` which is a list-like interface over
488 488 integer revisions.
489 489 """
490 490 allspecs = []
491 491 for spec in specs:
492 492 if isinstance(spec, int):
493 493 spec = revsetlang.formatspec('rev(%d)', spec)
494 494 allspecs.append(spec)
495 495 return repo.anyrevs(allspecs, user=True, localalias=localalias)
496 496
497 497 def meaningfulparents(repo, ctx):
498 498 """Return list of meaningful (or all if debug) parentrevs for rev.
499 499
500 500 For merges (two non-nullrev revisions) both parents are meaningful.
501 501 Otherwise the first parent revision is considered meaningful if it
502 502 is not the preceding revision.
503 503 """
504 504 parents = ctx.parents()
505 505 if len(parents) > 1:
506 506 return parents
507 507 if repo.ui.debugflag:
508 508 return [parents[0], repo['null']]
509 509 if parents[0].rev() >= intrev(ctx) - 1:
510 510 return []
511 511 return parents
512 512
513 513 def expandpats(pats):
514 514 '''Expand bare globs when running on windows.
515 515 On posix we assume it already has already been done by sh.'''
516 516 if not util.expandglobs:
517 517 return list(pats)
518 518 ret = []
519 519 for kindpat in pats:
520 520 kind, pat = matchmod._patsplit(kindpat, None)
521 521 if kind is None:
522 522 try:
523 523 globbed = glob.glob(pat)
524 524 except re.error:
525 525 globbed = [pat]
526 526 if globbed:
527 527 ret.extend(globbed)
528 528 continue
529 529 ret.append(kindpat)
530 530 return ret
531 531
532 532 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
533 533 badfn=None):
534 534 '''Return a matcher and the patterns that were used.
535 535 The matcher will warn about bad matches, unless an alternate badfn callback
536 536 is provided.'''
537 537 if pats == ("",):
538 538 pats = []
539 539 if opts is None:
540 540 opts = {}
541 541 if not globbed and default == 'relpath':
542 542 pats = expandpats(pats or [])
543 543
544 544 def bad(f, msg):
545 545 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
546 546
547 547 if badfn is None:
548 548 badfn = bad
549 549
550 550 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
551 551 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
552 552
553 553 if m.always():
554 554 pats = []
555 555 return m, pats
556 556
557 557 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
558 558 badfn=None):
559 559 '''Return a matcher that will warn about bad matches.'''
560 560 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
561 561
562 562 def matchall(repo):
563 563 '''Return a matcher that will efficiently match everything.'''
564 564 return matchmod.always(repo.root, repo.getcwd())
565 565
566 566 def matchfiles(repo, files, badfn=None):
567 567 '''Return a matcher that will efficiently match exactly these files.'''
568 568 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
569 569
570 570 def origpath(ui, repo, filepath):
571 571 '''customize where .orig files are created
572 572
573 573 Fetch user defined path from config file: [ui] origbackuppath = <path>
574 574 Fall back to default (filepath with .orig suffix) if not specified
575 575 '''
576 576 origbackuppath = ui.config('ui', 'origbackuppath')
577 577 if not origbackuppath:
578 578 return filepath + ".orig"
579 579
580 580 # Convert filepath from an absolute path into a path inside the repo.
581 581 filepathfromroot = util.normpath(os.path.relpath(filepath,
582 582 start=repo.root))
583 583
584 584 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
585 585 origbackupdir = origvfs.dirname(filepathfromroot)
586 586 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
587 587 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
588 588
589 589 # Remove any files that conflict with the backup file's path
590 590 for f in reversed(list(util.finddirs(filepathfromroot))):
591 591 if origvfs.isfileorlink(f):
592 592 ui.note(_('removing conflicting file: %s\n')
593 593 % origvfs.join(f))
594 594 origvfs.unlink(f)
595 595 break
596 596
597 597 origvfs.makedirs(origbackupdir)
598 598
599 599 if origvfs.isdir(filepathfromroot):
600 600 ui.note(_('removing conflicting directory: %s\n')
601 601 % origvfs.join(filepathfromroot))
602 602 origvfs.rmtree(filepathfromroot, forcibly=True)
603 603
604 604 return origvfs.join(filepathfromroot)
605 605
606 606 class _containsnode(object):
607 607 """proxy __contains__(node) to container.__contains__ which accepts revs"""
608 608
609 609 def __init__(self, repo, revcontainer):
610 610 self._torev = repo.changelog.rev
611 611 self._revcontains = revcontainer.__contains__
612 612
613 613 def __contains__(self, node):
614 614 return self._revcontains(self._torev(node))
615 615
616 616 def cleanupnodes(repo, replacements, operation, moves=None):
617 617 """do common cleanups when old nodes are replaced by new nodes
618 618
619 619 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
620 620 (we might also want to move working directory parent in the future)
621 621
622 622 By default, bookmark moves are calculated automatically from 'replacements',
623 623 but 'moves' can be used to override that. Also, 'moves' may include
624 624 additional bookmark moves that should not have associated obsmarkers.
625 625
626 626 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
627 627 have replacements. operation is a string, like "rebase".
628 628 """
629 629 if not replacements and not moves:
630 630 return
631 631
632 632 # translate mapping's other forms
633 633 if not util.safehasattr(replacements, 'items'):
634 634 replacements = {n: () for n in replacements}
635 635
636 636 # Calculate bookmark movements
637 637 if moves is None:
638 638 moves = {}
639 639 # Unfiltered repo is needed since nodes in replacements might be hidden.
640 640 unfi = repo.unfiltered()
641 641 for oldnode, newnodes in replacements.items():
642 642 if oldnode in moves:
643 643 continue
644 644 if len(newnodes) > 1:
645 645 # usually a split, take the one with biggest rev number
646 646 newnode = next(unfi.set('max(%ln)', newnodes)).node()
647 647 elif len(newnodes) == 0:
648 648 # move bookmark backwards
649 649 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
650 650 list(replacements)))
651 651 if roots:
652 652 newnode = roots[0].node()
653 653 else:
654 654 newnode = nullid
655 655 else:
656 656 newnode = newnodes[0]
657 657 moves[oldnode] = newnode
658 658
659 659 with repo.transaction('cleanup') as tr:
660 660 # Move bookmarks
661 661 bmarks = repo._bookmarks
662 662 bmarkchanges = []
663 663 allnewnodes = [n for ns in replacements.values() for n in ns]
664 664 for oldnode, newnode in moves.items():
665 665 oldbmarks = repo.nodebookmarks(oldnode)
666 666 if not oldbmarks:
667 667 continue
668 668 from . import bookmarks # avoid import cycle
669 669 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
670 670 (oldbmarks, hex(oldnode), hex(newnode)))
671 671 # Delete divergent bookmarks being parents of related newnodes
672 672 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
673 673 allnewnodes, newnode, oldnode)
674 674 deletenodes = _containsnode(repo, deleterevs)
675 675 for name in oldbmarks:
676 676 bmarkchanges.append((name, newnode))
677 677 for b in bookmarks.divergent2delete(repo, deletenodes, name):
678 678 bmarkchanges.append((b, None))
679 679
680 680 if bmarkchanges:
681 681 bmarks.applychanges(repo, tr, bmarkchanges)
682 682
683 683 # Obsolete or strip nodes
684 684 if obsolete.isenabled(repo, obsolete.createmarkersopt):
685 685 # If a node is already obsoleted, and we want to obsolete it
686 686 # without a successor, skip that obssolete request since it's
687 687 # unnecessary. That's the "if s or not isobs(n)" check below.
688 688 # Also sort the node in topology order, that might be useful for
689 689 # some obsstore logic.
690 690 # NOTE: the filtering and sorting might belong to createmarkers.
691 691 isobs = unfi.obsstore.successors.__contains__
692 692 torev = unfi.changelog.rev
693 693 sortfunc = lambda ns: torev(ns[0])
694 694 rels = [(unfi[n], tuple(unfi[m] for m in s))
695 695 for n, s in sorted(replacements.items(), key=sortfunc)
696 696 if s or not isobs(n)]
697 697 if rels:
698 698 obsolete.createmarkers(repo, rels, operation=operation)
699 699 else:
700 700 from . import repair # avoid import cycle
701 701 tostrip = list(replacements)
702 702 if tostrip:
703 703 repair.delayedstrip(repo.ui, repo, tostrip, operation)
704 704
705 705 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
706 706 if opts is None:
707 707 opts = {}
708 708 m = matcher
709 709 if dry_run is None:
710 710 dry_run = opts.get('dry_run')
711 711 if similarity is None:
712 712 similarity = float(opts.get('similarity') or 0)
713 713
714 714 ret = 0
715 715 join = lambda f: os.path.join(prefix, f)
716 716
717 717 wctx = repo[None]
718 718 for subpath in sorted(wctx.substate):
719 719 submatch = matchmod.subdirmatcher(subpath, m)
720 720 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
721 721 sub = wctx.sub(subpath)
722 722 try:
723 723 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
724 724 ret = 1
725 725 except error.LookupError:
726 726 repo.ui.status(_("skipping missing subrepository: %s\n")
727 727 % join(subpath))
728 728
729 729 rejected = []
730 730 def badfn(f, msg):
731 731 if f in m.files():
732 732 m.bad(f, msg)
733 733 rejected.append(f)
734 734
735 735 badmatch = matchmod.badmatch(m, badfn)
736 736 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
737 737 badmatch)
738 738
739 739 unknownset = set(unknown + forgotten)
740 740 toprint = unknownset.copy()
741 741 toprint.update(deleted)
742 742 for abs in sorted(toprint):
743 743 if repo.ui.verbose or not m.exact(abs):
744 744 if abs in unknownset:
745 745 status = _('adding %s\n') % m.uipath(abs)
746 746 else:
747 747 status = _('removing %s\n') % m.uipath(abs)
748 748 repo.ui.status(status)
749 749
750 750 renames = _findrenames(repo, m, added + unknown, removed + deleted,
751 751 similarity)
752 752
753 753 if not dry_run:
754 754 _markchanges(repo, unknown + forgotten, deleted, renames)
755 755
756 756 for f in rejected:
757 757 if f in m.files():
758 758 return 1
759 759 return ret
760 760
761 761 def marktouched(repo, files, similarity=0.0):
762 762 '''Assert that files have somehow been operated upon. files are relative to
763 763 the repo root.'''
764 764 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
765 765 rejected = []
766 766
767 767 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
768 768
769 769 if repo.ui.verbose:
770 770 unknownset = set(unknown + forgotten)
771 771 toprint = unknownset.copy()
772 772 toprint.update(deleted)
773 773 for abs in sorted(toprint):
774 774 if abs in unknownset:
775 775 status = _('adding %s\n') % abs
776 776 else:
777 777 status = _('removing %s\n') % abs
778 778 repo.ui.status(status)
779 779
780 780 renames = _findrenames(repo, m, added + unknown, removed + deleted,
781 781 similarity)
782 782
783 783 _markchanges(repo, unknown + forgotten, deleted, renames)
784 784
785 785 for f in rejected:
786 786 if f in m.files():
787 787 return 1
788 788 return 0
789 789
790 790 def _interestingfiles(repo, matcher):
791 791 '''Walk dirstate with matcher, looking for files that addremove would care
792 792 about.
793 793
794 794 This is different from dirstate.status because it doesn't care about
795 795 whether files are modified or clean.'''
796 796 added, unknown, deleted, removed, forgotten = [], [], [], [], []
797 797 audit_path = pathutil.pathauditor(repo.root, cached=True)
798 798
799 799 ctx = repo[None]
800 800 dirstate = repo.dirstate
801 801 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
802 802 unknown=True, ignored=False, full=False)
803 803 for abs, st in walkresults.iteritems():
804 804 dstate = dirstate[abs]
805 805 if dstate == '?' and audit_path.check(abs):
806 806 unknown.append(abs)
807 807 elif dstate != 'r' and not st:
808 808 deleted.append(abs)
809 809 elif dstate == 'r' and st:
810 810 forgotten.append(abs)
811 811 # for finding renames
812 812 elif dstate == 'r' and not st:
813 813 removed.append(abs)
814 814 elif dstate == 'a':
815 815 added.append(abs)
816 816
817 817 return added, unknown, deleted, removed, forgotten
818 818
819 819 def _findrenames(repo, matcher, added, removed, similarity):
820 820 '''Find renames from removed files to added ones.'''
821 821 renames = {}
822 822 if similarity > 0:
823 823 for old, new, score in similar.findrenames(repo, added, removed,
824 824 similarity):
825 825 if (repo.ui.verbose or not matcher.exact(old)
826 826 or not matcher.exact(new)):
827 827 repo.ui.status(_('recording removal of %s as rename to %s '
828 828 '(%d%% similar)\n') %
829 829 (matcher.rel(old), matcher.rel(new),
830 830 score * 100))
831 831 renames[new] = old
832 832 return renames
833 833
834 834 def _markchanges(repo, unknown, deleted, renames):
835 835 '''Marks the files in unknown as added, the files in deleted as removed,
836 836 and the files in renames as copied.'''
837 837 wctx = repo[None]
838 838 with repo.wlock():
839 839 wctx.forget(deleted)
840 840 wctx.add(unknown)
841 841 for new, old in renames.iteritems():
842 842 wctx.copy(old, new)
843 843
844 844 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
845 845 """Update the dirstate to reflect the intent of copying src to dst. For
846 846 different reasons it might not end with dst being marked as copied from src.
847 847 """
848 848 origsrc = repo.dirstate.copied(src) or src
849 849 if dst == origsrc: # copying back a copy?
850 850 if repo.dirstate[dst] not in 'mn' and not dryrun:
851 851 repo.dirstate.normallookup(dst)
852 852 else:
853 853 if repo.dirstate[origsrc] == 'a' and origsrc == src:
854 854 if not ui.quiet:
855 855 ui.warn(_("%s has not been committed yet, so no copy "
856 856 "data will be stored for %s.\n")
857 857 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
858 858 if repo.dirstate[dst] in '?r' and not dryrun:
859 859 wctx.add([dst])
860 860 elif not dryrun:
861 861 wctx.copy(origsrc, dst)
862 862
863 863 def readrequires(opener, supported):
864 864 '''Reads and parses .hg/requires and checks if all entries found
865 865 are in the list of supported features.'''
866 866 requirements = set(opener.read("requires").splitlines())
867 867 missings = []
868 868 for r in requirements:
869 869 if r not in supported:
870 870 if not r or not r[0].isalnum():
871 871 raise error.RequirementError(_(".hg/requires file is corrupt"))
872 872 missings.append(r)
873 873 missings.sort()
874 874 if missings:
875 875 raise error.RequirementError(
876 876 _("repository requires features unknown to this Mercurial: %s")
877 877 % " ".join(missings),
878 878 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
879 879 " for more information"))
880 880 return requirements
881 881
882 882 def writerequires(opener, requirements):
883 883 with opener('requires', 'w') as fp:
884 884 for r in sorted(requirements):
885 885 fp.write("%s\n" % r)
886 886
887 887 class filecachesubentry(object):
888 888 def __init__(self, path, stat):
889 889 self.path = path
890 890 self.cachestat = None
891 891 self._cacheable = None
892 892
893 893 if stat:
894 894 self.cachestat = filecachesubentry.stat(self.path)
895 895
896 896 if self.cachestat:
897 897 self._cacheable = self.cachestat.cacheable()
898 898 else:
899 899 # None means we don't know yet
900 900 self._cacheable = None
901 901
902 902 def refresh(self):
903 903 if self.cacheable():
904 904 self.cachestat = filecachesubentry.stat(self.path)
905 905
906 906 def cacheable(self):
907 907 if self._cacheable is not None:
908 908 return self._cacheable
909 909
910 910 # we don't know yet, assume it is for now
911 911 return True
912 912
913 913 def changed(self):
914 914 # no point in going further if we can't cache it
915 915 if not self.cacheable():
916 916 return True
917 917
918 918 newstat = filecachesubentry.stat(self.path)
919 919
920 920 # we may not know if it's cacheable yet, check again now
921 921 if newstat and self._cacheable is None:
922 922 self._cacheable = newstat.cacheable()
923 923
924 924 # check again
925 925 if not self._cacheable:
926 926 return True
927 927
928 928 if self.cachestat != newstat:
929 929 self.cachestat = newstat
930 930 return True
931 931 else:
932 932 return False
933 933
934 934 @staticmethod
935 935 def stat(path):
936 936 try:
937 937 return util.cachestat(path)
938 938 except OSError as e:
939 939 if e.errno != errno.ENOENT:
940 940 raise
941 941
942 942 class filecacheentry(object):
943 943 def __init__(self, paths, stat=True):
944 944 self._entries = []
945 945 for path in paths:
946 946 self._entries.append(filecachesubentry(path, stat))
947 947
948 948 def changed(self):
949 949 '''true if any entry has changed'''
950 950 for entry in self._entries:
951 951 if entry.changed():
952 952 return True
953 953 return False
954 954
955 955 def refresh(self):
956 956 for entry in self._entries:
957 957 entry.refresh()
958 958
959 959 class filecache(object):
960 960 '''A property like decorator that tracks files under .hg/ for updates.
961 961
962 962 Records stat info when called in _filecache.
963 963
964 964 On subsequent calls, compares old stat info with new info, and recreates the
965 965 object when any of the files changes, updating the new stat info in
966 966 _filecache.
967 967
968 968 Mercurial either atomic renames or appends for files under .hg,
969 969 so to ensure the cache is reliable we need the filesystem to be able
970 970 to tell us if a file has been replaced. If it can't, we fallback to
971 971 recreating the object on every call (essentially the same behavior as
972 972 propertycache).
973 973
974 974 '''
975 975 def __init__(self, *paths):
976 976 self.paths = paths
977 977
978 978 def join(self, obj, fname):
979 979 """Used to compute the runtime path of a cached file.
980 980
981 981 Users should subclass filecache and provide their own version of this
982 982 function to call the appropriate join function on 'obj' (an instance
983 983 of the class that its member function was decorated).
984 984 """
985 985 raise NotImplementedError
986 986
987 987 def __call__(self, func):
988 988 self.func = func
989 989 self.name = func.__name__.encode('ascii')
990 990 return self
991 991
992 992 def __get__(self, obj, type=None):
993 993 # if accessed on the class, return the descriptor itself.
994 994 if obj is None:
995 995 return self
996 996 # do we need to check if the file changed?
997 997 if self.name in obj.__dict__:
998 998 assert self.name in obj._filecache, self.name
999 999 return obj.__dict__[self.name]
1000 1000
1001 1001 entry = obj._filecache.get(self.name)
1002 1002
1003 1003 if entry:
1004 1004 if entry.changed():
1005 1005 entry.obj = self.func(obj)
1006 1006 else:
1007 1007 paths = [self.join(obj, path) for path in self.paths]
1008 1008
1009 1009 # We stat -before- creating the object so our cache doesn't lie if
1010 1010 # a writer modified between the time we read and stat
1011 1011 entry = filecacheentry(paths, True)
1012 1012 entry.obj = self.func(obj)
1013 1013
1014 1014 obj._filecache[self.name] = entry
1015 1015
1016 1016 obj.__dict__[self.name] = entry.obj
1017 1017 return entry.obj
1018 1018
1019 1019 def __set__(self, obj, value):
1020 1020 if self.name not in obj._filecache:
1021 1021 # we add an entry for the missing value because X in __dict__
1022 1022 # implies X in _filecache
1023 1023 paths = [self.join(obj, path) for path in self.paths]
1024 1024 ce = filecacheentry(paths, False)
1025 1025 obj._filecache[self.name] = ce
1026 1026 else:
1027 1027 ce = obj._filecache[self.name]
1028 1028
1029 1029 ce.obj = value # update cached copy
1030 1030 obj.__dict__[self.name] = value # update copy returned by obj.x
1031 1031
1032 1032 def __delete__(self, obj):
1033 1033 try:
1034 1034 del obj.__dict__[self.name]
1035 1035 except KeyError:
1036 1036 raise AttributeError(self.name)
1037 1037
1038 1038 def extdatasource(repo, source):
1039 1039 """Gather a map of rev -> value dict from the specified source
1040 1040
1041 1041 A source spec is treated as a URL, with a special case shell: type
1042 1042 for parsing the output from a shell command.
1043 1043
1044 1044 The data is parsed as a series of newline-separated records where
1045 1045 each record is a revision specifier optionally followed by a space
1046 1046 and a freeform string value. If the revision is known locally, it
1047 1047 is converted to a rev, otherwise the record is skipped.
1048 1048
1049 1049 Note that both key and value are treated as UTF-8 and converted to
1050 1050 the local encoding. This allows uniformity between local and
1051 1051 remote data sources.
1052 1052 """
1053 1053
1054 1054 spec = repo.ui.config("extdata", source)
1055 1055 if not spec:
1056 1056 raise error.Abort(_("unknown extdata source '%s'") % source)
1057 1057
1058 1058 data = {}
1059 1059 src = proc = None
1060 1060 try:
1061 1061 if spec.startswith("shell:"):
1062 1062 # external commands should be run relative to the repo root
1063 1063 cmd = spec[6:]
1064 1064 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1065 1065 close_fds=util.closefds,
1066 1066 stdout=subprocess.PIPE, cwd=repo.root)
1067 1067 src = proc.stdout
1068 1068 else:
1069 1069 # treat as a URL or file
1070 1070 src = url.open(repo.ui, spec)
1071 1071 for l in src:
1072 1072 if " " in l:
1073 1073 k, v = l.strip().split(" ", 1)
1074 1074 else:
1075 1075 k, v = l.strip(), ""
1076 1076
1077 1077 k = encoding.tolocal(k)
1078 1078 try:
1079 1079 data[repo[k].rev()] = encoding.tolocal(v)
1080 1080 except (error.LookupError, error.RepoLookupError):
1081 1081 pass # we ignore data for nodes that don't exist locally
1082 1082 finally:
1083 1083 if proc:
1084 1084 proc.communicate()
1085 1085 if proc.returncode != 0:
1086 1086 # not an error so 'cmd | grep' can be empty
1087 1087 repo.ui.debug("extdata command '%s' %s\n"
1088 1088 % (cmd, util.explainexit(proc.returncode)[0]))
1089 1089 if src:
1090 1090 src.close()
1091 1091
1092 1092 return data
1093 1093
1094 1094 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1095 1095 if lock is None:
1096 1096 raise error.LockInheritanceContractViolation(
1097 1097 'lock can only be inherited while held')
1098 1098 if environ is None:
1099 1099 environ = {}
1100 1100 with lock.inherit() as locker:
1101 1101 environ[envvar] = locker
1102 1102 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1103 1103
1104 1104 def wlocksub(repo, cmd, *args, **kwargs):
1105 1105 """run cmd as a subprocess that allows inheriting repo's wlock
1106 1106
1107 1107 This can only be called while the wlock is held. This takes all the
1108 1108 arguments that ui.system does, and returns the exit code of the
1109 1109 subprocess."""
1110 1110 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1111 1111 **kwargs)
1112 1112
1113 1113 def gdinitconfig(ui):
1114 1114 """helper function to know if a repo should be created as general delta
1115 1115 """
1116 1116 # experimental config: format.generaldelta
1117 1117 return (ui.configbool('format', 'generaldelta')
1118 1118 or ui.configbool('format', 'usegeneraldelta'))
1119 1119
1120 1120 def gddeltaconfig(ui):
1121 1121 """helper function to know if incoming delta should be optimised
1122 1122 """
1123 1123 # experimental config: format.generaldelta
1124 1124 return ui.configbool('format', 'generaldelta')
1125 1125
1126 1126 class simplekeyvaluefile(object):
1127 1127 """A simple file with key=value lines
1128 1128
1129 1129 Keys must be alphanumerics and start with a letter, values must not
1130 1130 contain '\n' characters"""
1131 1131 firstlinekey = '__firstline'
1132 1132
1133 1133 def __init__(self, vfs, path, keys=None):
1134 1134 self.vfs = vfs
1135 1135 self.path = path
1136 1136
1137 1137 def read(self, firstlinenonkeyval=False):
1138 1138 """Read the contents of a simple key-value file
1139 1139
1140 1140 'firstlinenonkeyval' indicates whether the first line of file should
1141 1141 be treated as a key-value pair or reuturned fully under the
1142 1142 __firstline key."""
1143 1143 lines = self.vfs.readlines(self.path)
1144 1144 d = {}
1145 1145 if firstlinenonkeyval:
1146 1146 if not lines:
1147 1147 e = _("empty simplekeyvalue file")
1148 1148 raise error.CorruptedState(e)
1149 1149 # we don't want to include '\n' in the __firstline
1150 1150 d[self.firstlinekey] = lines[0][:-1]
1151 1151 del lines[0]
1152 1152
1153 1153 try:
1154 1154 # the 'if line.strip()' part prevents us from failing on empty
1155 1155 # lines which only contain '\n' therefore are not skipped
1156 1156 # by 'if line'
1157 1157 updatedict = dict(line[:-1].split('=', 1) for line in lines
1158 1158 if line.strip())
1159 1159 if self.firstlinekey in updatedict:
1160 1160 e = _("%r can't be used as a key")
1161 1161 raise error.CorruptedState(e % self.firstlinekey)
1162 1162 d.update(updatedict)
1163 1163 except ValueError as e:
1164 1164 raise error.CorruptedState(str(e))
1165 1165 return d
1166 1166
1167 1167 def write(self, data, firstline=None):
1168 1168 """Write key=>value mapping to a file
1169 1169 data is a dict. Keys must be alphanumerical and start with a letter.
1170 1170 Values must not contain newline characters.
1171 1171
1172 1172 If 'firstline' is not None, it is written to file before
1173 1173 everything else, as it is, not in a key=value form"""
1174 1174 lines = []
1175 1175 if firstline is not None:
1176 1176 lines.append('%s\n' % firstline)
1177 1177
1178 1178 for k, v in data.items():
1179 1179 if k == self.firstlinekey:
1180 1180 e = "key name '%s' is reserved" % self.firstlinekey
1181 1181 raise error.ProgrammingError(e)
1182 1182 if not k[0].isalpha():
1183 1183 e = "keys must start with a letter in a key-value file"
1184 1184 raise error.ProgrammingError(e)
1185 1185 if not k.isalnum():
1186 1186 e = "invalid key name in a simple key-value file"
1187 1187 raise error.ProgrammingError(e)
1188 1188 if '\n' in v:
1189 1189 e = "invalid value in a simple key-value file"
1190 1190 raise error.ProgrammingError(e)
1191 1191 lines.append("%s=%s\n" % (k, v))
1192 1192 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1193 1193 fp.write(''.join(lines))
1194 1194
1195 1195 _reportobsoletedsource = [
1196 1196 'debugobsolete',
1197 1197 'pull',
1198 1198 'push',
1199 1199 'serve',
1200 1200 'unbundle',
1201 1201 ]
1202 1202
1203 1203 def registersummarycallback(repo, otr, txnname=''):
1204 1204 """register a callback to issue a summary after the transaction is closed
1205 1205 """
1206 1206 def txmatch(sources):
1207 1207 return any(txnname.startswith(source) for source in sources)
1208 1208
1209 if txmatch(_reportobsoletedsource):
1209 categories = []
1210
1211 def reportsummary(func):
1212 """decorator for report callbacks."""
1210 1213 reporef = weakref.ref(repo)
1211 def reportsummary(tr):
1212 """the actual callback reporting the summary"""
1214 def wrapped(tr):
1213 1215 repo = reporef()
1216 func(repo, tr)
1217 newcat = '%2i-txnreport' % len(categories)
1218 otr.addpostclose(newcat, wrapped)
1219 categories.append(newcat)
1220 return wrapped
1221
1222 if txmatch(_reportobsoletedsource):
1223 @reportsummary
1224 def reportobsoleted(repo, tr):
1214 1225 obsoleted = obsutil.getobsoleted(repo, tr)
1215 1226 if obsoleted:
1216 1227 repo.ui.status(_('obsoleted %i changesets\n')
1217 1228 % len(obsoleted))
1218 otr.addpostclose('00-txnreport', reportsummary)
General Comments 0
You need to be logged in to leave comments. Login now