##// END OF EJS Templates
scmutil: fix a repr in an error message on Python 3...
Augie Fackler -
r36587:bb5f5c1c default
parent child Browse files
Show More
@@ -1,1418 +1,1419
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 hex,
22 22 nullid,
23 23 short,
24 24 wdirid,
25 25 wdirrev,
26 26 )
27 27
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 match as matchmod,
32 32 obsolete,
33 33 obsutil,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 revsetlang,
38 38 similar,
39 39 url,
40 40 util,
41 41 vfs,
42 42 )
43 43
44 44 if pycompat.iswindows:
45 45 from . import scmwindows as scmplatform
46 46 else:
47 47 from . import scmposix as scmplatform
48 48
49 49 termsize = scmplatform.termsize
50 50
51 51 class status(tuple):
52 52 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
53 53 and 'ignored' properties are only relevant to the working copy.
54 54 '''
55 55
56 56 __slots__ = ()
57 57
58 58 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
59 59 clean):
60 60 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
61 61 ignored, clean))
62 62
63 63 @property
64 64 def modified(self):
65 65 '''files that have been modified'''
66 66 return self[0]
67 67
68 68 @property
69 69 def added(self):
70 70 '''files that have been added'''
71 71 return self[1]
72 72
73 73 @property
74 74 def removed(self):
75 75 '''files that have been removed'''
76 76 return self[2]
77 77
78 78 @property
79 79 def deleted(self):
80 80 '''files that are in the dirstate, but have been deleted from the
81 81 working copy (aka "missing")
82 82 '''
83 83 return self[3]
84 84
85 85 @property
86 86 def unknown(self):
87 87 '''files not in the dirstate that are not ignored'''
88 88 return self[4]
89 89
90 90 @property
91 91 def ignored(self):
92 92 '''files not in the dirstate that are ignored (by _dirignore())'''
93 93 return self[5]
94 94
95 95 @property
96 96 def clean(self):
97 97 '''files that have not been modified'''
98 98 return self[6]
99 99
100 100 def __repr__(self, *args, **kwargs):
101 101 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
102 102 'unknown=%r, ignored=%r, clean=%r>') % self)
103 103
104 104 def itersubrepos(ctx1, ctx2):
105 105 """find subrepos in ctx1 or ctx2"""
106 106 # Create a (subpath, ctx) mapping where we prefer subpaths from
107 107 # ctx1. The subpaths from ctx2 are important when the .hgsub file
108 108 # has been modified (in ctx2) but not yet committed (in ctx1).
109 109 subpaths = dict.fromkeys(ctx2.substate, ctx2)
110 110 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
111 111
112 112 missing = set()
113 113
114 114 for subpath in ctx2.substate:
115 115 if subpath not in ctx1.substate:
116 116 del subpaths[subpath]
117 117 missing.add(subpath)
118 118
119 119 for subpath, ctx in sorted(subpaths.iteritems()):
120 120 yield subpath, ctx.sub(subpath)
121 121
122 122 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
123 123 # status and diff will have an accurate result when it does
124 124 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
125 125 # against itself.
126 126 for subpath in missing:
127 127 yield subpath, ctx2.nullsub(subpath, ctx1)
128 128
129 129 def nochangesfound(ui, repo, excluded=None):
130 130 '''Report no changes for push/pull, excluded is None or a list of
131 131 nodes excluded from the push/pull.
132 132 '''
133 133 secretlist = []
134 134 if excluded:
135 135 for n in excluded:
136 136 ctx = repo[n]
137 137 if ctx.phase() >= phases.secret and not ctx.extinct():
138 138 secretlist.append(n)
139 139
140 140 if secretlist:
141 141 ui.status(_("no changes found (ignored %d secret changesets)\n")
142 142 % len(secretlist))
143 143 else:
144 144 ui.status(_("no changes found\n"))
145 145
146 146 def callcatch(ui, func):
147 147 """call func() with global exception handling
148 148
149 149 return func() if no exception happens. otherwise do some error handling
150 150 and return an exit code accordingly. does not handle all exceptions.
151 151 """
152 152 try:
153 153 try:
154 154 return func()
155 155 except: # re-raises
156 156 ui.traceback()
157 157 raise
158 158 # Global exception handling, alphabetically
159 159 # Mercurial-specific first, followed by built-in and library exceptions
160 160 except error.LockHeld as inst:
161 161 if inst.errno == errno.ETIMEDOUT:
162 162 reason = _('timed out waiting for lock held by %r') % inst.locker
163 163 else:
164 164 reason = _('lock held by %r') % inst.locker
165 165 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
166 166 if not inst.locker:
167 167 ui.warn(_("(lock might be very busy)\n"))
168 168 except error.LockUnavailable as inst:
169 169 ui.warn(_("abort: could not lock %s: %s\n") %
170 170 (inst.desc or inst.filename,
171 171 encoding.strtolocal(inst.strerror)))
172 172 except error.OutOfBandError as inst:
173 173 if inst.args:
174 174 msg = _("abort: remote error:\n")
175 175 else:
176 176 msg = _("abort: remote error\n")
177 177 ui.warn(msg)
178 178 if inst.args:
179 179 ui.warn(''.join(inst.args))
180 180 if inst.hint:
181 181 ui.warn('(%s)\n' % inst.hint)
182 182 except error.RepoError as inst:
183 183 ui.warn(_("abort: %s!\n") % inst)
184 184 if inst.hint:
185 185 ui.warn(_("(%s)\n") % inst.hint)
186 186 except error.ResponseError as inst:
187 187 ui.warn(_("abort: %s") % inst.args[0])
188 188 if not isinstance(inst.args[1], basestring):
189 189 ui.warn(" %r\n" % (inst.args[1],))
190 190 elif not inst.args[1]:
191 191 ui.warn(_(" empty string\n"))
192 192 else:
193 193 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
194 194 except error.CensoredNodeError as inst:
195 195 ui.warn(_("abort: file censored %s!\n") % inst)
196 196 except error.RevlogError as inst:
197 197 ui.warn(_("abort: %s!\n") % inst)
198 198 except error.InterventionRequired as inst:
199 199 ui.warn("%s\n" % inst)
200 200 if inst.hint:
201 201 ui.warn(_("(%s)\n") % inst.hint)
202 202 return 1
203 203 except error.WdirUnsupported:
204 204 ui.warn(_("abort: working directory revision cannot be specified\n"))
205 205 except error.Abort as inst:
206 206 ui.warn(_("abort: %s\n") % inst)
207 207 if inst.hint:
208 208 ui.warn(_("(%s)\n") % inst.hint)
209 209 except ImportError as inst:
210 210 ui.warn(_("abort: %s!\n") % inst)
211 211 m = util.forcebytestr(inst).split()[-1]
212 212 if m in "mpatch bdiff".split():
213 213 ui.warn(_("(did you forget to compile extensions?)\n"))
214 214 elif m in "zlib".split():
215 215 ui.warn(_("(is your Python install correct?)\n"))
216 216 except IOError as inst:
217 217 if util.safehasattr(inst, "code"):
218 218 ui.warn(_("abort: %s\n") % util.forcebytestr(inst))
219 219 elif util.safehasattr(inst, "reason"):
220 220 try: # usually it is in the form (errno, strerror)
221 221 reason = inst.reason.args[1]
222 222 except (AttributeError, IndexError):
223 223 # it might be anything, for example a string
224 224 reason = inst.reason
225 225 if isinstance(reason, unicode):
226 226 # SSLError of Python 2.7.9 contains a unicode
227 227 reason = encoding.unitolocal(reason)
228 228 ui.warn(_("abort: error: %s\n") % reason)
229 229 elif (util.safehasattr(inst, "args")
230 230 and inst.args and inst.args[0] == errno.EPIPE):
231 231 pass
232 232 elif getattr(inst, "strerror", None):
233 233 if getattr(inst, "filename", None):
234 234 ui.warn(_("abort: %s: %s\n") % (
235 235 encoding.strtolocal(inst.strerror), inst.filename))
236 236 else:
237 237 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
238 238 else:
239 239 raise
240 240 except OSError as inst:
241 241 if getattr(inst, "filename", None) is not None:
242 242 ui.warn(_("abort: %s: '%s'\n") % (
243 243 encoding.strtolocal(inst.strerror), inst.filename))
244 244 else:
245 245 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
246 246 except MemoryError:
247 247 ui.warn(_("abort: out of memory\n"))
248 248 except SystemExit as inst:
249 249 # Commands shouldn't sys.exit directly, but give a return code.
250 250 # Just in case catch this and and pass exit code to caller.
251 251 return inst.code
252 252 except socket.error as inst:
253 253 ui.warn(_("abort: %s\n") % inst.args[-1])
254 254
255 255 return -1
256 256
257 257 def checknewlabel(repo, lbl, kind):
258 258 # Do not use the "kind" parameter in ui output.
259 259 # It makes strings difficult to translate.
260 260 if lbl in ['tip', '.', 'null']:
261 261 raise error.Abort(_("the name '%s' is reserved") % lbl)
262 262 for c in (':', '\0', '\n', '\r'):
263 263 if c in lbl:
264 raise error.Abort(_("%r cannot be used in a name") % c)
264 raise error.Abort(
265 _("%r cannot be used in a name") % pycompat.bytestr(c))
265 266 try:
266 267 int(lbl)
267 268 raise error.Abort(_("cannot use an integer as a name"))
268 269 except ValueError:
269 270 pass
270 271 if lbl.strip() != lbl:
271 272 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
272 273
273 274 def checkfilename(f):
274 275 '''Check that the filename f is an acceptable filename for a tracked file'''
275 276 if '\r' in f or '\n' in f:
276 277 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
277 278
278 279 def checkportable(ui, f):
279 280 '''Check if filename f is portable and warn or abort depending on config'''
280 281 checkfilename(f)
281 282 abort, warn = checkportabilityalert(ui)
282 283 if abort or warn:
283 284 msg = util.checkwinfilename(f)
284 285 if msg:
285 286 msg = "%s: %s" % (msg, util.shellquote(f))
286 287 if abort:
287 288 raise error.Abort(msg)
288 289 ui.warn(_("warning: %s\n") % msg)
289 290
290 291 def checkportabilityalert(ui):
291 292 '''check if the user's config requests nothing, a warning, or abort for
292 293 non-portable filenames'''
293 294 val = ui.config('ui', 'portablefilenames')
294 295 lval = val.lower()
295 296 bval = util.parsebool(val)
296 297 abort = pycompat.iswindows or lval == 'abort'
297 298 warn = bval or lval == 'warn'
298 299 if bval is None and not (warn or abort or lval == 'ignore'):
299 300 raise error.ConfigError(
300 301 _("ui.portablefilenames value is invalid ('%s')") % val)
301 302 return abort, warn
302 303
303 304 class casecollisionauditor(object):
304 305 def __init__(self, ui, abort, dirstate):
305 306 self._ui = ui
306 307 self._abort = abort
307 308 allfiles = '\0'.join(dirstate._map)
308 309 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
309 310 self._dirstate = dirstate
310 311 # The purpose of _newfiles is so that we don't complain about
311 312 # case collisions if someone were to call this object with the
312 313 # same filename twice.
313 314 self._newfiles = set()
314 315
315 316 def __call__(self, f):
316 317 if f in self._newfiles:
317 318 return
318 319 fl = encoding.lower(f)
319 320 if fl in self._loweredfiles and f not in self._dirstate:
320 321 msg = _('possible case-folding collision for %s') % f
321 322 if self._abort:
322 323 raise error.Abort(msg)
323 324 self._ui.warn(_("warning: %s\n") % msg)
324 325 self._loweredfiles.add(fl)
325 326 self._newfiles.add(f)
326 327
327 328 def filteredhash(repo, maxrev):
328 329 """build hash of filtered revisions in the current repoview.
329 330
330 331 Multiple caches perform up-to-date validation by checking that the
331 332 tiprev and tipnode stored in the cache file match the current repository.
332 333 However, this is not sufficient for validating repoviews because the set
333 334 of revisions in the view may change without the repository tiprev and
334 335 tipnode changing.
335 336
336 337 This function hashes all the revs filtered from the view and returns
337 338 that SHA-1 digest.
338 339 """
339 340 cl = repo.changelog
340 341 if not cl.filteredrevs:
341 342 return None
342 343 key = None
343 344 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
344 345 if revs:
345 346 s = hashlib.sha1()
346 347 for rev in revs:
347 348 s.update('%d;' % rev)
348 349 key = s.digest()
349 350 return key
350 351
351 352 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
352 353 '''yield every hg repository under path, always recursively.
353 354 The recurse flag will only control recursion into repo working dirs'''
354 355 def errhandler(err):
355 356 if err.filename == path:
356 357 raise err
357 358 samestat = getattr(os.path, 'samestat', None)
358 359 if followsym and samestat is not None:
359 360 def adddir(dirlst, dirname):
360 361 dirstat = os.stat(dirname)
361 362 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
362 363 if not match:
363 364 dirlst.append(dirstat)
364 365 return not match
365 366 else:
366 367 followsym = False
367 368
368 369 if (seen_dirs is None) and followsym:
369 370 seen_dirs = []
370 371 adddir(seen_dirs, path)
371 372 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
372 373 dirs.sort()
373 374 if '.hg' in dirs:
374 375 yield root # found a repository
375 376 qroot = os.path.join(root, '.hg', 'patches')
376 377 if os.path.isdir(os.path.join(qroot, '.hg')):
377 378 yield qroot # we have a patch queue repo here
378 379 if recurse:
379 380 # avoid recursing inside the .hg directory
380 381 dirs.remove('.hg')
381 382 else:
382 383 dirs[:] = [] # don't descend further
383 384 elif followsym:
384 385 newdirs = []
385 386 for d in dirs:
386 387 fname = os.path.join(root, d)
387 388 if adddir(seen_dirs, fname):
388 389 if os.path.islink(fname):
389 390 for hgname in walkrepos(fname, True, seen_dirs):
390 391 yield hgname
391 392 else:
392 393 newdirs.append(d)
393 394 dirs[:] = newdirs
394 395
395 396 def binnode(ctx):
396 397 """Return binary node id for a given basectx"""
397 398 node = ctx.node()
398 399 if node is None:
399 400 return wdirid
400 401 return node
401 402
402 403 def intrev(ctx):
403 404 """Return integer for a given basectx that can be used in comparison or
404 405 arithmetic operation"""
405 406 rev = ctx.rev()
406 407 if rev is None:
407 408 return wdirrev
408 409 return rev
409 410
410 411 def formatchangeid(ctx):
411 412 """Format changectx as '{rev}:{node|formatnode}', which is the default
412 413 template provided by logcmdutil.changesettemplater"""
413 414 repo = ctx.repo()
414 415 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
415 416
416 417 def formatrevnode(ui, rev, node):
417 418 """Format given revision and node depending on the current verbosity"""
418 419 if ui.debugflag:
419 420 hexfunc = hex
420 421 else:
421 422 hexfunc = short
422 423 return '%d:%s' % (rev, hexfunc(node))
423 424
424 425 def revsingle(repo, revspec, default='.', localalias=None):
425 426 if not revspec and revspec != 0:
426 427 return repo[default]
427 428
428 429 l = revrange(repo, [revspec], localalias=localalias)
429 430 if not l:
430 431 raise error.Abort(_('empty revision set'))
431 432 return repo[l.last()]
432 433
433 434 def _pairspec(revspec):
434 435 tree = revsetlang.parse(revspec)
435 436 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
436 437
437 438 def revpair(repo, revs):
438 439 if not revs:
439 440 return repo.dirstate.p1(), None
440 441
441 442 l = revrange(repo, revs)
442 443
443 444 if not l:
444 445 first = second = None
445 446 elif l.isascending():
446 447 first = l.min()
447 448 second = l.max()
448 449 elif l.isdescending():
449 450 first = l.max()
450 451 second = l.min()
451 452 else:
452 453 first = l.first()
453 454 second = l.last()
454 455
455 456 if first is None:
456 457 raise error.Abort(_('empty revision range'))
457 458 if (first == second and len(revs) >= 2
458 459 and not all(revrange(repo, [r]) for r in revs)):
459 460 raise error.Abort(_('empty revision on one side of range'))
460 461
461 462 # if top-level is range expression, the result must always be a pair
462 463 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
463 464 return repo.lookup(first), None
464 465
465 466 return repo.lookup(first), repo.lookup(second)
466 467
467 468 def revrange(repo, specs, localalias=None):
468 469 """Execute 1 to many revsets and return the union.
469 470
470 471 This is the preferred mechanism for executing revsets using user-specified
471 472 config options, such as revset aliases.
472 473
473 474 The revsets specified by ``specs`` will be executed via a chained ``OR``
474 475 expression. If ``specs`` is empty, an empty result is returned.
475 476
476 477 ``specs`` can contain integers, in which case they are assumed to be
477 478 revision numbers.
478 479
479 480 It is assumed the revsets are already formatted. If you have arguments
480 481 that need to be expanded in the revset, call ``revsetlang.formatspec()``
481 482 and pass the result as an element of ``specs``.
482 483
483 484 Specifying a single revset is allowed.
484 485
485 486 Returns a ``revset.abstractsmartset`` which is a list-like interface over
486 487 integer revisions.
487 488 """
488 489 allspecs = []
489 490 for spec in specs:
490 491 if isinstance(spec, int):
491 492 spec = revsetlang.formatspec('rev(%d)', spec)
492 493 allspecs.append(spec)
493 494 return repo.anyrevs(allspecs, user=True, localalias=localalias)
494 495
495 496 def meaningfulparents(repo, ctx):
496 497 """Return list of meaningful (or all if debug) parentrevs for rev.
497 498
498 499 For merges (two non-nullrev revisions) both parents are meaningful.
499 500 Otherwise the first parent revision is considered meaningful if it
500 501 is not the preceding revision.
501 502 """
502 503 parents = ctx.parents()
503 504 if len(parents) > 1:
504 505 return parents
505 506 if repo.ui.debugflag:
506 507 return [parents[0], repo['null']]
507 508 if parents[0].rev() >= intrev(ctx) - 1:
508 509 return []
509 510 return parents
510 511
511 512 def expandpats(pats):
512 513 '''Expand bare globs when running on windows.
513 514 On posix we assume it already has already been done by sh.'''
514 515 if not util.expandglobs:
515 516 return list(pats)
516 517 ret = []
517 518 for kindpat in pats:
518 519 kind, pat = matchmod._patsplit(kindpat, None)
519 520 if kind is None:
520 521 try:
521 522 globbed = glob.glob(pat)
522 523 except re.error:
523 524 globbed = [pat]
524 525 if globbed:
525 526 ret.extend(globbed)
526 527 continue
527 528 ret.append(kindpat)
528 529 return ret
529 530
530 531 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
531 532 badfn=None):
532 533 '''Return a matcher and the patterns that were used.
533 534 The matcher will warn about bad matches, unless an alternate badfn callback
534 535 is provided.'''
535 536 if pats == ("",):
536 537 pats = []
537 538 if opts is None:
538 539 opts = {}
539 540 if not globbed and default == 'relpath':
540 541 pats = expandpats(pats or [])
541 542
542 543 def bad(f, msg):
543 544 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
544 545
545 546 if badfn is None:
546 547 badfn = bad
547 548
548 549 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
549 550 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
550 551
551 552 if m.always():
552 553 pats = []
553 554 return m, pats
554 555
555 556 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
556 557 badfn=None):
557 558 '''Return a matcher that will warn about bad matches.'''
558 559 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
559 560
560 561 def matchall(repo):
561 562 '''Return a matcher that will efficiently match everything.'''
562 563 return matchmod.always(repo.root, repo.getcwd())
563 564
564 565 def matchfiles(repo, files, badfn=None):
565 566 '''Return a matcher that will efficiently match exactly these files.'''
566 567 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
567 568
568 569 def parsefollowlinespattern(repo, rev, pat, msg):
569 570 """Return a file name from `pat` pattern suitable for usage in followlines
570 571 logic.
571 572 """
572 573 if not matchmod.patkind(pat):
573 574 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
574 575 else:
575 576 ctx = repo[rev]
576 577 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
577 578 files = [f for f in ctx if m(f)]
578 579 if len(files) != 1:
579 580 raise error.ParseError(msg)
580 581 return files[0]
581 582
582 583 def origpath(ui, repo, filepath):
583 584 '''customize where .orig files are created
584 585
585 586 Fetch user defined path from config file: [ui] origbackuppath = <path>
586 587 Fall back to default (filepath with .orig suffix) if not specified
587 588 '''
588 589 origbackuppath = ui.config('ui', 'origbackuppath')
589 590 if not origbackuppath:
590 591 return filepath + ".orig"
591 592
592 593 # Convert filepath from an absolute path into a path inside the repo.
593 594 filepathfromroot = util.normpath(os.path.relpath(filepath,
594 595 start=repo.root))
595 596
596 597 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
597 598 origbackupdir = origvfs.dirname(filepathfromroot)
598 599 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
599 600 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
600 601
601 602 # Remove any files that conflict with the backup file's path
602 603 for f in reversed(list(util.finddirs(filepathfromroot))):
603 604 if origvfs.isfileorlink(f):
604 605 ui.note(_('removing conflicting file: %s\n')
605 606 % origvfs.join(f))
606 607 origvfs.unlink(f)
607 608 break
608 609
609 610 origvfs.makedirs(origbackupdir)
610 611
611 612 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
612 613 ui.note(_('removing conflicting directory: %s\n')
613 614 % origvfs.join(filepathfromroot))
614 615 origvfs.rmtree(filepathfromroot, forcibly=True)
615 616
616 617 return origvfs.join(filepathfromroot)
617 618
618 619 class _containsnode(object):
619 620 """proxy __contains__(node) to container.__contains__ which accepts revs"""
620 621
621 622 def __init__(self, repo, revcontainer):
622 623 self._torev = repo.changelog.rev
623 624 self._revcontains = revcontainer.__contains__
624 625
625 626 def __contains__(self, node):
626 627 return self._revcontains(self._torev(node))
627 628
628 629 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
629 630 """do common cleanups when old nodes are replaced by new nodes
630 631
631 632 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
632 633 (we might also want to move working directory parent in the future)
633 634
634 635 By default, bookmark moves are calculated automatically from 'replacements',
635 636 but 'moves' can be used to override that. Also, 'moves' may include
636 637 additional bookmark moves that should not have associated obsmarkers.
637 638
638 639 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
639 640 have replacements. operation is a string, like "rebase".
640 641
641 642 metadata is dictionary containing metadata to be stored in obsmarker if
642 643 obsolescence is enabled.
643 644 """
644 645 if not replacements and not moves:
645 646 return
646 647
647 648 # translate mapping's other forms
648 649 if not util.safehasattr(replacements, 'items'):
649 650 replacements = {n: () for n in replacements}
650 651
651 652 # Calculate bookmark movements
652 653 if moves is None:
653 654 moves = {}
654 655 # Unfiltered repo is needed since nodes in replacements might be hidden.
655 656 unfi = repo.unfiltered()
656 657 for oldnode, newnodes in replacements.items():
657 658 if oldnode in moves:
658 659 continue
659 660 if len(newnodes) > 1:
660 661 # usually a split, take the one with biggest rev number
661 662 newnode = next(unfi.set('max(%ln)', newnodes)).node()
662 663 elif len(newnodes) == 0:
663 664 # move bookmark backwards
664 665 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
665 666 list(replacements)))
666 667 if roots:
667 668 newnode = roots[0].node()
668 669 else:
669 670 newnode = nullid
670 671 else:
671 672 newnode = newnodes[0]
672 673 moves[oldnode] = newnode
673 674
674 675 with repo.transaction('cleanup') as tr:
675 676 # Move bookmarks
676 677 bmarks = repo._bookmarks
677 678 bmarkchanges = []
678 679 allnewnodes = [n for ns in replacements.values() for n in ns]
679 680 for oldnode, newnode in moves.items():
680 681 oldbmarks = repo.nodebookmarks(oldnode)
681 682 if not oldbmarks:
682 683 continue
683 684 from . import bookmarks # avoid import cycle
684 685 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
685 686 (oldbmarks, hex(oldnode), hex(newnode)))
686 687 # Delete divergent bookmarks being parents of related newnodes
687 688 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
688 689 allnewnodes, newnode, oldnode)
689 690 deletenodes = _containsnode(repo, deleterevs)
690 691 for name in oldbmarks:
691 692 bmarkchanges.append((name, newnode))
692 693 for b in bookmarks.divergent2delete(repo, deletenodes, name):
693 694 bmarkchanges.append((b, None))
694 695
695 696 if bmarkchanges:
696 697 bmarks.applychanges(repo, tr, bmarkchanges)
697 698
698 699 # Obsolete or strip nodes
699 700 if obsolete.isenabled(repo, obsolete.createmarkersopt):
700 701 # If a node is already obsoleted, and we want to obsolete it
701 702 # without a successor, skip that obssolete request since it's
702 703 # unnecessary. That's the "if s or not isobs(n)" check below.
703 704 # Also sort the node in topology order, that might be useful for
704 705 # some obsstore logic.
705 706 # NOTE: the filtering and sorting might belong to createmarkers.
706 707 isobs = unfi.obsstore.successors.__contains__
707 708 torev = unfi.changelog.rev
708 709 sortfunc = lambda ns: torev(ns[0])
709 710 rels = [(unfi[n], tuple(unfi[m] for m in s))
710 711 for n, s in sorted(replacements.items(), key=sortfunc)
711 712 if s or not isobs(n)]
712 713 if rels:
713 714 obsolete.createmarkers(repo, rels, operation=operation,
714 715 metadata=metadata)
715 716 else:
716 717 from . import repair # avoid import cycle
717 718 tostrip = list(replacements)
718 719 if tostrip:
719 720 repair.delayedstrip(repo.ui, repo, tostrip, operation)
720 721
721 722 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
722 723 if opts is None:
723 724 opts = {}
724 725 m = matcher
725 726 if dry_run is None:
726 727 dry_run = opts.get('dry_run')
727 728 if similarity is None:
728 729 similarity = float(opts.get('similarity') or 0)
729 730
730 731 ret = 0
731 732 join = lambda f: os.path.join(prefix, f)
732 733
733 734 wctx = repo[None]
734 735 for subpath in sorted(wctx.substate):
735 736 submatch = matchmod.subdirmatcher(subpath, m)
736 737 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
737 738 sub = wctx.sub(subpath)
738 739 try:
739 740 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
740 741 ret = 1
741 742 except error.LookupError:
742 743 repo.ui.status(_("skipping missing subrepository: %s\n")
743 744 % join(subpath))
744 745
745 746 rejected = []
746 747 def badfn(f, msg):
747 748 if f in m.files():
748 749 m.bad(f, msg)
749 750 rejected.append(f)
750 751
751 752 badmatch = matchmod.badmatch(m, badfn)
752 753 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
753 754 badmatch)
754 755
755 756 unknownset = set(unknown + forgotten)
756 757 toprint = unknownset.copy()
757 758 toprint.update(deleted)
758 759 for abs in sorted(toprint):
759 760 if repo.ui.verbose or not m.exact(abs):
760 761 if abs in unknownset:
761 762 status = _('adding %s\n') % m.uipath(abs)
762 763 else:
763 764 status = _('removing %s\n') % m.uipath(abs)
764 765 repo.ui.status(status)
765 766
766 767 renames = _findrenames(repo, m, added + unknown, removed + deleted,
767 768 similarity)
768 769
769 770 if not dry_run:
770 771 _markchanges(repo, unknown + forgotten, deleted, renames)
771 772
772 773 for f in rejected:
773 774 if f in m.files():
774 775 return 1
775 776 return ret
776 777
777 778 def marktouched(repo, files, similarity=0.0):
778 779 '''Assert that files have somehow been operated upon. files are relative to
779 780 the repo root.'''
780 781 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
781 782 rejected = []
782 783
783 784 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
784 785
785 786 if repo.ui.verbose:
786 787 unknownset = set(unknown + forgotten)
787 788 toprint = unknownset.copy()
788 789 toprint.update(deleted)
789 790 for abs in sorted(toprint):
790 791 if abs in unknownset:
791 792 status = _('adding %s\n') % abs
792 793 else:
793 794 status = _('removing %s\n') % abs
794 795 repo.ui.status(status)
795 796
796 797 renames = _findrenames(repo, m, added + unknown, removed + deleted,
797 798 similarity)
798 799
799 800 _markchanges(repo, unknown + forgotten, deleted, renames)
800 801
801 802 for f in rejected:
802 803 if f in m.files():
803 804 return 1
804 805 return 0
805 806
806 807 def _interestingfiles(repo, matcher):
807 808 '''Walk dirstate with matcher, looking for files that addremove would care
808 809 about.
809 810
810 811 This is different from dirstate.status because it doesn't care about
811 812 whether files are modified or clean.'''
812 813 added, unknown, deleted, removed, forgotten = [], [], [], [], []
813 814 audit_path = pathutil.pathauditor(repo.root, cached=True)
814 815
815 816 ctx = repo[None]
816 817 dirstate = repo.dirstate
817 818 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
818 819 unknown=True, ignored=False, full=False)
819 820 for abs, st in walkresults.iteritems():
820 821 dstate = dirstate[abs]
821 822 if dstate == '?' and audit_path.check(abs):
822 823 unknown.append(abs)
823 824 elif dstate != 'r' and not st:
824 825 deleted.append(abs)
825 826 elif dstate == 'r' and st:
826 827 forgotten.append(abs)
827 828 # for finding renames
828 829 elif dstate == 'r' and not st:
829 830 removed.append(abs)
830 831 elif dstate == 'a':
831 832 added.append(abs)
832 833
833 834 return added, unknown, deleted, removed, forgotten
834 835
835 836 def _findrenames(repo, matcher, added, removed, similarity):
836 837 '''Find renames from removed files to added ones.'''
837 838 renames = {}
838 839 if similarity > 0:
839 840 for old, new, score in similar.findrenames(repo, added, removed,
840 841 similarity):
841 842 if (repo.ui.verbose or not matcher.exact(old)
842 843 or not matcher.exact(new)):
843 844 repo.ui.status(_('recording removal of %s as rename to %s '
844 845 '(%d%% similar)\n') %
845 846 (matcher.rel(old), matcher.rel(new),
846 847 score * 100))
847 848 renames[new] = old
848 849 return renames
849 850
850 851 def _markchanges(repo, unknown, deleted, renames):
851 852 '''Marks the files in unknown as added, the files in deleted as removed,
852 853 and the files in renames as copied.'''
853 854 wctx = repo[None]
854 855 with repo.wlock():
855 856 wctx.forget(deleted)
856 857 wctx.add(unknown)
857 858 for new, old in renames.iteritems():
858 859 wctx.copy(old, new)
859 860
860 861 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
861 862 """Update the dirstate to reflect the intent of copying src to dst. For
862 863 different reasons it might not end with dst being marked as copied from src.
863 864 """
864 865 origsrc = repo.dirstate.copied(src) or src
865 866 if dst == origsrc: # copying back a copy?
866 867 if repo.dirstate[dst] not in 'mn' and not dryrun:
867 868 repo.dirstate.normallookup(dst)
868 869 else:
869 870 if repo.dirstate[origsrc] == 'a' and origsrc == src:
870 871 if not ui.quiet:
871 872 ui.warn(_("%s has not been committed yet, so no copy "
872 873 "data will be stored for %s.\n")
873 874 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
874 875 if repo.dirstate[dst] in '?r' and not dryrun:
875 876 wctx.add([dst])
876 877 elif not dryrun:
877 878 wctx.copy(origsrc, dst)
878 879
879 880 def readrequires(opener, supported):
880 881 '''Reads and parses .hg/requires and checks if all entries found
881 882 are in the list of supported features.'''
882 883 requirements = set(opener.read("requires").splitlines())
883 884 missings = []
884 885 for r in requirements:
885 886 if r not in supported:
886 887 if not r or not r[0:1].isalnum():
887 888 raise error.RequirementError(_(".hg/requires file is corrupt"))
888 889 missings.append(r)
889 890 missings.sort()
890 891 if missings:
891 892 raise error.RequirementError(
892 893 _("repository requires features unknown to this Mercurial: %s")
893 894 % " ".join(missings),
894 895 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
895 896 " for more information"))
896 897 return requirements
897 898
898 899 def writerequires(opener, requirements):
899 900 with opener('requires', 'w') as fp:
900 901 for r in sorted(requirements):
901 902 fp.write("%s\n" % r)
902 903
903 904 class filecachesubentry(object):
904 905 def __init__(self, path, stat):
905 906 self.path = path
906 907 self.cachestat = None
907 908 self._cacheable = None
908 909
909 910 if stat:
910 911 self.cachestat = filecachesubentry.stat(self.path)
911 912
912 913 if self.cachestat:
913 914 self._cacheable = self.cachestat.cacheable()
914 915 else:
915 916 # None means we don't know yet
916 917 self._cacheable = None
917 918
918 919 def refresh(self):
919 920 if self.cacheable():
920 921 self.cachestat = filecachesubentry.stat(self.path)
921 922
922 923 def cacheable(self):
923 924 if self._cacheable is not None:
924 925 return self._cacheable
925 926
926 927 # we don't know yet, assume it is for now
927 928 return True
928 929
929 930 def changed(self):
930 931 # no point in going further if we can't cache it
931 932 if not self.cacheable():
932 933 return True
933 934
934 935 newstat = filecachesubentry.stat(self.path)
935 936
936 937 # we may not know if it's cacheable yet, check again now
937 938 if newstat and self._cacheable is None:
938 939 self._cacheable = newstat.cacheable()
939 940
940 941 # check again
941 942 if not self._cacheable:
942 943 return True
943 944
944 945 if self.cachestat != newstat:
945 946 self.cachestat = newstat
946 947 return True
947 948 else:
948 949 return False
949 950
950 951 @staticmethod
951 952 def stat(path):
952 953 try:
953 954 return util.cachestat(path)
954 955 except OSError as e:
955 956 if e.errno != errno.ENOENT:
956 957 raise
957 958
958 959 class filecacheentry(object):
959 960 def __init__(self, paths, stat=True):
960 961 self._entries = []
961 962 for path in paths:
962 963 self._entries.append(filecachesubentry(path, stat))
963 964
964 965 def changed(self):
965 966 '''true if any entry has changed'''
966 967 for entry in self._entries:
967 968 if entry.changed():
968 969 return True
969 970 return False
970 971
971 972 def refresh(self):
972 973 for entry in self._entries:
973 974 entry.refresh()
974 975
975 976 class filecache(object):
976 977 '''A property like decorator that tracks files under .hg/ for updates.
977 978
978 979 Records stat info when called in _filecache.
979 980
980 981 On subsequent calls, compares old stat info with new info, and recreates the
981 982 object when any of the files changes, updating the new stat info in
982 983 _filecache.
983 984
984 985 Mercurial either atomic renames or appends for files under .hg,
985 986 so to ensure the cache is reliable we need the filesystem to be able
986 987 to tell us if a file has been replaced. If it can't, we fallback to
987 988 recreating the object on every call (essentially the same behavior as
988 989 propertycache).
989 990
990 991 '''
991 992 def __init__(self, *paths):
992 993 self.paths = paths
993 994
994 995 def join(self, obj, fname):
995 996 """Used to compute the runtime path of a cached file.
996 997
997 998 Users should subclass filecache and provide their own version of this
998 999 function to call the appropriate join function on 'obj' (an instance
999 1000 of the class that its member function was decorated).
1000 1001 """
1001 1002 raise NotImplementedError
1002 1003
1003 1004 def __call__(self, func):
1004 1005 self.func = func
1005 1006 self.name = func.__name__.encode('ascii')
1006 1007 return self
1007 1008
1008 1009 def __get__(self, obj, type=None):
1009 1010 # if accessed on the class, return the descriptor itself.
1010 1011 if obj is None:
1011 1012 return self
1012 1013 # do we need to check if the file changed?
1013 1014 if self.name in obj.__dict__:
1014 1015 assert self.name in obj._filecache, self.name
1015 1016 return obj.__dict__[self.name]
1016 1017
1017 1018 entry = obj._filecache.get(self.name)
1018 1019
1019 1020 if entry:
1020 1021 if entry.changed():
1021 1022 entry.obj = self.func(obj)
1022 1023 else:
1023 1024 paths = [self.join(obj, path) for path in self.paths]
1024 1025
1025 1026 # We stat -before- creating the object so our cache doesn't lie if
1026 1027 # a writer modified between the time we read and stat
1027 1028 entry = filecacheentry(paths, True)
1028 1029 entry.obj = self.func(obj)
1029 1030
1030 1031 obj._filecache[self.name] = entry
1031 1032
1032 1033 obj.__dict__[self.name] = entry.obj
1033 1034 return entry.obj
1034 1035
1035 1036 def __set__(self, obj, value):
1036 1037 if self.name not in obj._filecache:
1037 1038 # we add an entry for the missing value because X in __dict__
1038 1039 # implies X in _filecache
1039 1040 paths = [self.join(obj, path) for path in self.paths]
1040 1041 ce = filecacheentry(paths, False)
1041 1042 obj._filecache[self.name] = ce
1042 1043 else:
1043 1044 ce = obj._filecache[self.name]
1044 1045
1045 1046 ce.obj = value # update cached copy
1046 1047 obj.__dict__[self.name] = value # update copy returned by obj.x
1047 1048
1048 1049 def __delete__(self, obj):
1049 1050 try:
1050 1051 del obj.__dict__[self.name]
1051 1052 except KeyError:
1052 1053 raise AttributeError(self.name)
1053 1054
1054 1055 def extdatasource(repo, source):
1055 1056 """Gather a map of rev -> value dict from the specified source
1056 1057
1057 1058 A source spec is treated as a URL, with a special case shell: type
1058 1059 for parsing the output from a shell command.
1059 1060
1060 1061 The data is parsed as a series of newline-separated records where
1061 1062 each record is a revision specifier optionally followed by a space
1062 1063 and a freeform string value. If the revision is known locally, it
1063 1064 is converted to a rev, otherwise the record is skipped.
1064 1065
1065 1066 Note that both key and value are treated as UTF-8 and converted to
1066 1067 the local encoding. This allows uniformity between local and
1067 1068 remote data sources.
1068 1069 """
1069 1070
1070 1071 spec = repo.ui.config("extdata", source)
1071 1072 if not spec:
1072 1073 raise error.Abort(_("unknown extdata source '%s'") % source)
1073 1074
1074 1075 data = {}
1075 1076 src = proc = None
1076 1077 try:
1077 1078 if spec.startswith("shell:"):
1078 1079 # external commands should be run relative to the repo root
1079 1080 cmd = spec[6:]
1080 1081 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1081 1082 close_fds=util.closefds,
1082 1083 stdout=subprocess.PIPE, cwd=repo.root)
1083 1084 src = proc.stdout
1084 1085 else:
1085 1086 # treat as a URL or file
1086 1087 src = url.open(repo.ui, spec)
1087 1088 for l in src:
1088 1089 if " " in l:
1089 1090 k, v = l.strip().split(" ", 1)
1090 1091 else:
1091 1092 k, v = l.strip(), ""
1092 1093
1093 1094 k = encoding.tolocal(k)
1094 1095 try:
1095 1096 data[repo[k].rev()] = encoding.tolocal(v)
1096 1097 except (error.LookupError, error.RepoLookupError):
1097 1098 pass # we ignore data for nodes that don't exist locally
1098 1099 finally:
1099 1100 if proc:
1100 1101 proc.communicate()
1101 1102 if src:
1102 1103 src.close()
1103 1104 if proc and proc.returncode != 0:
1104 1105 raise error.Abort(_("extdata command '%s' failed: %s")
1105 1106 % (cmd, util.explainexit(proc.returncode)[0]))
1106 1107
1107 1108 return data
1108 1109
1109 1110 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1110 1111 if lock is None:
1111 1112 raise error.LockInheritanceContractViolation(
1112 1113 'lock can only be inherited while held')
1113 1114 if environ is None:
1114 1115 environ = {}
1115 1116 with lock.inherit() as locker:
1116 1117 environ[envvar] = locker
1117 1118 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1118 1119
1119 1120 def wlocksub(repo, cmd, *args, **kwargs):
1120 1121 """run cmd as a subprocess that allows inheriting repo's wlock
1121 1122
1122 1123 This can only be called while the wlock is held. This takes all the
1123 1124 arguments that ui.system does, and returns the exit code of the
1124 1125 subprocess."""
1125 1126 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1126 1127 **kwargs)
1127 1128
1128 1129 def gdinitconfig(ui):
1129 1130 """helper function to know if a repo should be created as general delta
1130 1131 """
1131 1132 # experimental config: format.generaldelta
1132 1133 return (ui.configbool('format', 'generaldelta')
1133 1134 or ui.configbool('format', 'usegeneraldelta'))
1134 1135
1135 1136 def gddeltaconfig(ui):
1136 1137 """helper function to know if incoming delta should be optimised
1137 1138 """
1138 1139 # experimental config: format.generaldelta
1139 1140 return ui.configbool('format', 'generaldelta')
1140 1141
1141 1142 class simplekeyvaluefile(object):
1142 1143 """A simple file with key=value lines
1143 1144
1144 1145 Keys must be alphanumerics and start with a letter, values must not
1145 1146 contain '\n' characters"""
1146 1147 firstlinekey = '__firstline'
1147 1148
1148 1149 def __init__(self, vfs, path, keys=None):
1149 1150 self.vfs = vfs
1150 1151 self.path = path
1151 1152
1152 1153 def read(self, firstlinenonkeyval=False):
1153 1154 """Read the contents of a simple key-value file
1154 1155
1155 1156 'firstlinenonkeyval' indicates whether the first line of file should
1156 1157 be treated as a key-value pair or reuturned fully under the
1157 1158 __firstline key."""
1158 1159 lines = self.vfs.readlines(self.path)
1159 1160 d = {}
1160 1161 if firstlinenonkeyval:
1161 1162 if not lines:
1162 1163 e = _("empty simplekeyvalue file")
1163 1164 raise error.CorruptedState(e)
1164 1165 # we don't want to include '\n' in the __firstline
1165 1166 d[self.firstlinekey] = lines[0][:-1]
1166 1167 del lines[0]
1167 1168
1168 1169 try:
1169 1170 # the 'if line.strip()' part prevents us from failing on empty
1170 1171 # lines which only contain '\n' therefore are not skipped
1171 1172 # by 'if line'
1172 1173 updatedict = dict(line[:-1].split('=', 1) for line in lines
1173 1174 if line.strip())
1174 1175 if self.firstlinekey in updatedict:
1175 1176 e = _("%r can't be used as a key")
1176 1177 raise error.CorruptedState(e % self.firstlinekey)
1177 1178 d.update(updatedict)
1178 1179 except ValueError as e:
1179 1180 raise error.CorruptedState(str(e))
1180 1181 return d
1181 1182
1182 1183 def write(self, data, firstline=None):
1183 1184 """Write key=>value mapping to a file
1184 1185 data is a dict. Keys must be alphanumerical and start with a letter.
1185 1186 Values must not contain newline characters.
1186 1187
1187 1188 If 'firstline' is not None, it is written to file before
1188 1189 everything else, as it is, not in a key=value form"""
1189 1190 lines = []
1190 1191 if firstline is not None:
1191 1192 lines.append('%s\n' % firstline)
1192 1193
1193 1194 for k, v in data.items():
1194 1195 if k == self.firstlinekey:
1195 1196 e = "key name '%s' is reserved" % self.firstlinekey
1196 1197 raise error.ProgrammingError(e)
1197 1198 if not k[0:1].isalpha():
1198 1199 e = "keys must start with a letter in a key-value file"
1199 1200 raise error.ProgrammingError(e)
1200 1201 if not k.isalnum():
1201 1202 e = "invalid key name in a simple key-value file"
1202 1203 raise error.ProgrammingError(e)
1203 1204 if '\n' in v:
1204 1205 e = "invalid value in a simple key-value file"
1205 1206 raise error.ProgrammingError(e)
1206 1207 lines.append("%s=%s\n" % (k, v))
1207 1208 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1208 1209 fp.write(''.join(lines))
1209 1210
1210 1211 _reportobsoletedsource = [
1211 1212 'debugobsolete',
1212 1213 'pull',
1213 1214 'push',
1214 1215 'serve',
1215 1216 'unbundle',
1216 1217 ]
1217 1218
1218 1219 _reportnewcssource = [
1219 1220 'pull',
1220 1221 'unbundle',
1221 1222 ]
1222 1223
1223 1224 # a list of (repo, ctx, files) functions called by various commands to allow
1224 1225 # extensions to ensure the corresponding files are available locally, before the
1225 1226 # command uses them.
1226 1227 fileprefetchhooks = util.hooks()
1227 1228
1228 1229 # A marker that tells the evolve extension to suppress its own reporting
1229 1230 _reportstroubledchangesets = True
1230 1231
1231 1232 def registersummarycallback(repo, otr, txnname=''):
1232 1233 """register a callback to issue a summary after the transaction is closed
1233 1234 """
1234 1235 def txmatch(sources):
1235 1236 return any(txnname.startswith(source) for source in sources)
1236 1237
1237 1238 categories = []
1238 1239
1239 1240 def reportsummary(func):
1240 1241 """decorator for report callbacks."""
1241 1242 # The repoview life cycle is shorter than the one of the actual
1242 1243 # underlying repository. So the filtered object can die before the
1243 1244 # weakref is used leading to troubles. We keep a reference to the
1244 1245 # unfiltered object and restore the filtering when retrieving the
1245 1246 # repository through the weakref.
1246 1247 filtername = repo.filtername
1247 1248 reporef = weakref.ref(repo.unfiltered())
1248 1249 def wrapped(tr):
1249 1250 repo = reporef()
1250 1251 if filtername:
1251 1252 repo = repo.filtered(filtername)
1252 1253 func(repo, tr)
1253 1254 newcat = '%02i-txnreport' % len(categories)
1254 1255 otr.addpostclose(newcat, wrapped)
1255 1256 categories.append(newcat)
1256 1257 return wrapped
1257 1258
1258 1259 if txmatch(_reportobsoletedsource):
1259 1260 @reportsummary
1260 1261 def reportobsoleted(repo, tr):
1261 1262 obsoleted = obsutil.getobsoleted(repo, tr)
1262 1263 if obsoleted:
1263 1264 repo.ui.status(_('obsoleted %i changesets\n')
1264 1265 % len(obsoleted))
1265 1266
1266 1267 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1267 1268 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1268 1269 instabilitytypes = [
1269 1270 ('orphan', 'orphan'),
1270 1271 ('phase-divergent', 'phasedivergent'),
1271 1272 ('content-divergent', 'contentdivergent'),
1272 1273 ]
1273 1274
1274 1275 def getinstabilitycounts(repo):
1275 1276 filtered = repo.changelog.filteredrevs
1276 1277 counts = {}
1277 1278 for instability, revset in instabilitytypes:
1278 1279 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1279 1280 filtered)
1280 1281 return counts
1281 1282
1282 1283 oldinstabilitycounts = getinstabilitycounts(repo)
1283 1284 @reportsummary
1284 1285 def reportnewinstabilities(repo, tr):
1285 1286 newinstabilitycounts = getinstabilitycounts(repo)
1286 1287 for instability, revset in instabilitytypes:
1287 1288 delta = (newinstabilitycounts[instability] -
1288 1289 oldinstabilitycounts[instability])
1289 1290 if delta > 0:
1290 1291 repo.ui.warn(_('%i new %s changesets\n') %
1291 1292 (delta, instability))
1292 1293
1293 1294 if txmatch(_reportnewcssource):
1294 1295 @reportsummary
1295 1296 def reportnewcs(repo, tr):
1296 1297 """Report the range of new revisions pulled/unbundled."""
1297 1298 newrevs = tr.changes.get('revs', xrange(0, 0))
1298 1299 if not newrevs:
1299 1300 return
1300 1301
1301 1302 # Compute the bounds of new revisions' range, excluding obsoletes.
1302 1303 unfi = repo.unfiltered()
1303 1304 revs = unfi.revs('%ld and not obsolete()', newrevs)
1304 1305 if not revs:
1305 1306 # Got only obsoletes.
1306 1307 return
1307 1308 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1308 1309
1309 1310 if minrev == maxrev:
1310 1311 revrange = minrev
1311 1312 else:
1312 1313 revrange = '%s:%s' % (minrev, maxrev)
1313 1314 repo.ui.status(_('new changesets %s\n') % revrange)
1314 1315
1315 1316 def nodesummaries(repo, nodes, maxnumnodes=4):
1316 1317 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1317 1318 return ' '.join(short(h) for h in nodes)
1318 1319 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1319 1320 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1320 1321
1321 1322 def enforcesinglehead(repo, tr, desc):
1322 1323 """check that no named branch has multiple heads"""
1323 1324 if desc in ('strip', 'repair'):
1324 1325 # skip the logic during strip
1325 1326 return
1326 1327 visible = repo.filtered('visible')
1327 1328 # possible improvement: we could restrict the check to affected branch
1328 1329 for name, heads in visible.branchmap().iteritems():
1329 1330 if len(heads) > 1:
1330 1331 msg = _('rejecting multiple heads on branch "%s"')
1331 1332 msg %= name
1332 1333 hint = _('%d heads: %s')
1333 1334 hint %= (len(heads), nodesummaries(repo, heads))
1334 1335 raise error.Abort(msg, hint=hint)
1335 1336
1336 1337 def wrapconvertsink(sink):
1337 1338 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1338 1339 before it is used, whether or not the convert extension was formally loaded.
1339 1340 """
1340 1341 return sink
1341 1342
1342 1343 def unhidehashlikerevs(repo, specs, hiddentype):
1343 1344 """parse the user specs and unhide changesets whose hash or revision number
1344 1345 is passed.
1345 1346
1346 1347 hiddentype can be: 1) 'warn': warn while unhiding changesets
1347 1348 2) 'nowarn': don't warn while unhiding changesets
1348 1349
1349 1350 returns a repo object with the required changesets unhidden
1350 1351 """
1351 1352 if not repo.filtername or not repo.ui.configbool('experimental',
1352 1353 'directaccess'):
1353 1354 return repo
1354 1355
1355 1356 if repo.filtername not in ('visible', 'visible-hidden'):
1356 1357 return repo
1357 1358
1358 1359 symbols = set()
1359 1360 for spec in specs:
1360 1361 try:
1361 1362 tree = revsetlang.parse(spec)
1362 1363 except error.ParseError: # will be reported by scmutil.revrange()
1363 1364 continue
1364 1365
1365 1366 symbols.update(revsetlang.gethashlikesymbols(tree))
1366 1367
1367 1368 if not symbols:
1368 1369 return repo
1369 1370
1370 1371 revs = _getrevsfromsymbols(repo, symbols)
1371 1372
1372 1373 if not revs:
1373 1374 return repo
1374 1375
1375 1376 if hiddentype == 'warn':
1376 1377 unfi = repo.unfiltered()
1377 1378 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1378 1379 repo.ui.warn(_("warning: accessing hidden changesets for write "
1379 1380 "operation: %s\n") % revstr)
1380 1381
1381 1382 # we have to use new filtername to separate branch/tags cache until we can
1382 1383 # disbale these cache when revisions are dynamically pinned.
1383 1384 return repo.filtered('visible-hidden', revs)
1384 1385
1385 1386 def _getrevsfromsymbols(repo, symbols):
1386 1387 """parse the list of symbols and returns a set of revision numbers of hidden
1387 1388 changesets present in symbols"""
1388 1389 revs = set()
1389 1390 unfi = repo.unfiltered()
1390 1391 unficl = unfi.changelog
1391 1392 cl = repo.changelog
1392 1393 tiprev = len(unficl)
1393 1394 pmatch = unficl._partialmatch
1394 1395 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1395 1396 for s in symbols:
1396 1397 try:
1397 1398 n = int(s)
1398 1399 if n <= tiprev:
1399 1400 if not allowrevnums:
1400 1401 continue
1401 1402 else:
1402 1403 if n not in cl:
1403 1404 revs.add(n)
1404 1405 continue
1405 1406 except ValueError:
1406 1407 pass
1407 1408
1408 1409 try:
1409 1410 s = pmatch(s)
1410 1411 except error.LookupError:
1411 1412 s = None
1412 1413
1413 1414 if s is not None:
1414 1415 rev = unficl.rev(s)
1415 1416 if rev not in cl:
1416 1417 revs.add(rev)
1417 1418
1418 1419 return revs
General Comments 0
You need to be logged in to leave comments. Login now