##// END OF EJS Templates
walkrepos: don't reimplement any()...
Martin von Zweigbergk -
r36356:ddd9474d default
parent child Browse files
Show More
@@ -1,1422 +1,1418
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 hex,
22 22 nullid,
23 23 short,
24 24 wdirid,
25 25 wdirrev,
26 26 )
27 27
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 match as matchmod,
32 32 obsolete,
33 33 obsutil,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 revsetlang,
38 38 similar,
39 39 url,
40 40 util,
41 41 vfs,
42 42 )
43 43
44 44 if pycompat.iswindows:
45 45 from . import scmwindows as scmplatform
46 46 else:
47 47 from . import scmposix as scmplatform
48 48
49 49 termsize = scmplatform.termsize
50 50
51 51 class status(tuple):
52 52 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
53 53 and 'ignored' properties are only relevant to the working copy.
54 54 '''
55 55
56 56 __slots__ = ()
57 57
58 58 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
59 59 clean):
60 60 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
61 61 ignored, clean))
62 62
63 63 @property
64 64 def modified(self):
65 65 '''files that have been modified'''
66 66 return self[0]
67 67
68 68 @property
69 69 def added(self):
70 70 '''files that have been added'''
71 71 return self[1]
72 72
73 73 @property
74 74 def removed(self):
75 75 '''files that have been removed'''
76 76 return self[2]
77 77
78 78 @property
79 79 def deleted(self):
80 80 '''files that are in the dirstate, but have been deleted from the
81 81 working copy (aka "missing")
82 82 '''
83 83 return self[3]
84 84
85 85 @property
86 86 def unknown(self):
87 87 '''files not in the dirstate that are not ignored'''
88 88 return self[4]
89 89
90 90 @property
91 91 def ignored(self):
92 92 '''files not in the dirstate that are ignored (by _dirignore())'''
93 93 return self[5]
94 94
95 95 @property
96 96 def clean(self):
97 97 '''files that have not been modified'''
98 98 return self[6]
99 99
100 100 def __repr__(self, *args, **kwargs):
101 101 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
102 102 'unknown=%r, ignored=%r, clean=%r>') % self)
103 103
104 104 def itersubrepos(ctx1, ctx2):
105 105 """find subrepos in ctx1 or ctx2"""
106 106 # Create a (subpath, ctx) mapping where we prefer subpaths from
107 107 # ctx1. The subpaths from ctx2 are important when the .hgsub file
108 108 # has been modified (in ctx2) but not yet committed (in ctx1).
109 109 subpaths = dict.fromkeys(ctx2.substate, ctx2)
110 110 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
111 111
112 112 missing = set()
113 113
114 114 for subpath in ctx2.substate:
115 115 if subpath not in ctx1.substate:
116 116 del subpaths[subpath]
117 117 missing.add(subpath)
118 118
119 119 for subpath, ctx in sorted(subpaths.iteritems()):
120 120 yield subpath, ctx.sub(subpath)
121 121
122 122 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
123 123 # status and diff will have an accurate result when it does
124 124 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
125 125 # against itself.
126 126 for subpath in missing:
127 127 yield subpath, ctx2.nullsub(subpath, ctx1)
128 128
129 129 def nochangesfound(ui, repo, excluded=None):
130 130 '''Report no changes for push/pull, excluded is None or a list of
131 131 nodes excluded from the push/pull.
132 132 '''
133 133 secretlist = []
134 134 if excluded:
135 135 for n in excluded:
136 136 ctx = repo[n]
137 137 if ctx.phase() >= phases.secret and not ctx.extinct():
138 138 secretlist.append(n)
139 139
140 140 if secretlist:
141 141 ui.status(_("no changes found (ignored %d secret changesets)\n")
142 142 % len(secretlist))
143 143 else:
144 144 ui.status(_("no changes found\n"))
145 145
146 146 def callcatch(ui, func):
147 147 """call func() with global exception handling
148 148
149 149 return func() if no exception happens. otherwise do some error handling
150 150 and return an exit code accordingly. does not handle all exceptions.
151 151 """
152 152 try:
153 153 try:
154 154 return func()
155 155 except: # re-raises
156 156 ui.traceback()
157 157 raise
158 158 # Global exception handling, alphabetically
159 159 # Mercurial-specific first, followed by built-in and library exceptions
160 160 except error.LockHeld as inst:
161 161 if inst.errno == errno.ETIMEDOUT:
162 162 reason = _('timed out waiting for lock held by %r') % inst.locker
163 163 else:
164 164 reason = _('lock held by %r') % inst.locker
165 165 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
166 166 if not inst.locker:
167 167 ui.warn(_("(lock might be very busy)\n"))
168 168 except error.LockUnavailable as inst:
169 169 ui.warn(_("abort: could not lock %s: %s\n") %
170 170 (inst.desc or inst.filename,
171 171 encoding.strtolocal(inst.strerror)))
172 172 except error.OutOfBandError as inst:
173 173 if inst.args:
174 174 msg = _("abort: remote error:\n")
175 175 else:
176 176 msg = _("abort: remote error\n")
177 177 ui.warn(msg)
178 178 if inst.args:
179 179 ui.warn(''.join(inst.args))
180 180 if inst.hint:
181 181 ui.warn('(%s)\n' % inst.hint)
182 182 except error.RepoError as inst:
183 183 ui.warn(_("abort: %s!\n") % inst)
184 184 if inst.hint:
185 185 ui.warn(_("(%s)\n") % inst.hint)
186 186 except error.ResponseError as inst:
187 187 ui.warn(_("abort: %s") % inst.args[0])
188 188 if not isinstance(inst.args[1], basestring):
189 189 ui.warn(" %r\n" % (inst.args[1],))
190 190 elif not inst.args[1]:
191 191 ui.warn(_(" empty string\n"))
192 192 else:
193 193 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
194 194 except error.CensoredNodeError as inst:
195 195 ui.warn(_("abort: file censored %s!\n") % inst)
196 196 except error.RevlogError as inst:
197 197 ui.warn(_("abort: %s!\n") % inst)
198 198 except error.InterventionRequired as inst:
199 199 ui.warn("%s\n" % inst)
200 200 if inst.hint:
201 201 ui.warn(_("(%s)\n") % inst.hint)
202 202 return 1
203 203 except error.WdirUnsupported:
204 204 ui.warn(_("abort: working directory revision cannot be specified\n"))
205 205 except error.Abort as inst:
206 206 ui.warn(_("abort: %s\n") % inst)
207 207 if inst.hint:
208 208 ui.warn(_("(%s)\n") % inst.hint)
209 209 except ImportError as inst:
210 210 ui.warn(_("abort: %s!\n") % inst)
211 211 m = str(inst).split()[-1]
212 212 if m in "mpatch bdiff".split():
213 213 ui.warn(_("(did you forget to compile extensions?)\n"))
214 214 elif m in "zlib".split():
215 215 ui.warn(_("(is your Python install correct?)\n"))
216 216 except IOError as inst:
217 217 if util.safehasattr(inst, "code"):
218 218 ui.warn(_("abort: %s\n") % util.forcebytestr(inst))
219 219 elif util.safehasattr(inst, "reason"):
220 220 try: # usually it is in the form (errno, strerror)
221 221 reason = inst.reason.args[1]
222 222 except (AttributeError, IndexError):
223 223 # it might be anything, for example a string
224 224 reason = inst.reason
225 225 if isinstance(reason, unicode):
226 226 # SSLError of Python 2.7.9 contains a unicode
227 227 reason = encoding.unitolocal(reason)
228 228 ui.warn(_("abort: error: %s\n") % reason)
229 229 elif (util.safehasattr(inst, "args")
230 230 and inst.args and inst.args[0] == errno.EPIPE):
231 231 pass
232 232 elif getattr(inst, "strerror", None):
233 233 if getattr(inst, "filename", None):
234 234 ui.warn(_("abort: %s: %s\n") % (
235 235 encoding.strtolocal(inst.strerror), inst.filename))
236 236 else:
237 237 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
238 238 else:
239 239 raise
240 240 except OSError as inst:
241 241 if getattr(inst, "filename", None) is not None:
242 242 ui.warn(_("abort: %s: '%s'\n") % (
243 243 encoding.strtolocal(inst.strerror), inst.filename))
244 244 else:
245 245 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
246 246 except MemoryError:
247 247 ui.warn(_("abort: out of memory\n"))
248 248 except SystemExit as inst:
249 249 # Commands shouldn't sys.exit directly, but give a return code.
250 250 # Just in case catch this and and pass exit code to caller.
251 251 return inst.code
252 252 except socket.error as inst:
253 253 ui.warn(_("abort: %s\n") % inst.args[-1])
254 254
255 255 return -1
256 256
257 257 def checknewlabel(repo, lbl, kind):
258 258 # Do not use the "kind" parameter in ui output.
259 259 # It makes strings difficult to translate.
260 260 if lbl in ['tip', '.', 'null']:
261 261 raise error.Abort(_("the name '%s' is reserved") % lbl)
262 262 for c in (':', '\0', '\n', '\r'):
263 263 if c in lbl:
264 264 raise error.Abort(_("%r cannot be used in a name") % c)
265 265 try:
266 266 int(lbl)
267 267 raise error.Abort(_("cannot use an integer as a name"))
268 268 except ValueError:
269 269 pass
270 270 if lbl.strip() != lbl:
271 271 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
272 272
273 273 def checkfilename(f):
274 274 '''Check that the filename f is an acceptable filename for a tracked file'''
275 275 if '\r' in f or '\n' in f:
276 276 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
277 277
278 278 def checkportable(ui, f):
279 279 '''Check if filename f is portable and warn or abort depending on config'''
280 280 checkfilename(f)
281 281 abort, warn = checkportabilityalert(ui)
282 282 if abort or warn:
283 283 msg = util.checkwinfilename(f)
284 284 if msg:
285 285 msg = "%s: %s" % (msg, util.shellquote(f))
286 286 if abort:
287 287 raise error.Abort(msg)
288 288 ui.warn(_("warning: %s\n") % msg)
289 289
290 290 def checkportabilityalert(ui):
291 291 '''check if the user's config requests nothing, a warning, or abort for
292 292 non-portable filenames'''
293 293 val = ui.config('ui', 'portablefilenames')
294 294 lval = val.lower()
295 295 bval = util.parsebool(val)
296 296 abort = pycompat.iswindows or lval == 'abort'
297 297 warn = bval or lval == 'warn'
298 298 if bval is None and not (warn or abort or lval == 'ignore'):
299 299 raise error.ConfigError(
300 300 _("ui.portablefilenames value is invalid ('%s')") % val)
301 301 return abort, warn
302 302
303 303 class casecollisionauditor(object):
304 304 def __init__(self, ui, abort, dirstate):
305 305 self._ui = ui
306 306 self._abort = abort
307 307 allfiles = '\0'.join(dirstate._map)
308 308 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
309 309 self._dirstate = dirstate
310 310 # The purpose of _newfiles is so that we don't complain about
311 311 # case collisions if someone were to call this object with the
312 312 # same filename twice.
313 313 self._newfiles = set()
314 314
315 315 def __call__(self, f):
316 316 if f in self._newfiles:
317 317 return
318 318 fl = encoding.lower(f)
319 319 if fl in self._loweredfiles and f not in self._dirstate:
320 320 msg = _('possible case-folding collision for %s') % f
321 321 if self._abort:
322 322 raise error.Abort(msg)
323 323 self._ui.warn(_("warning: %s\n") % msg)
324 324 self._loweredfiles.add(fl)
325 325 self._newfiles.add(f)
326 326
327 327 def filteredhash(repo, maxrev):
328 328 """build hash of filtered revisions in the current repoview.
329 329
330 330 Multiple caches perform up-to-date validation by checking that the
331 331 tiprev and tipnode stored in the cache file match the current repository.
332 332 However, this is not sufficient for validating repoviews because the set
333 333 of revisions in the view may change without the repository tiprev and
334 334 tipnode changing.
335 335
336 336 This function hashes all the revs filtered from the view and returns
337 337 that SHA-1 digest.
338 338 """
339 339 cl = repo.changelog
340 340 if not cl.filteredrevs:
341 341 return None
342 342 key = None
343 343 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
344 344 if revs:
345 345 s = hashlib.sha1()
346 346 for rev in revs:
347 347 s.update('%d;' % rev)
348 348 key = s.digest()
349 349 return key
350 350
351 351 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
352 352 '''yield every hg repository under path, always recursively.
353 353 The recurse flag will only control recursion into repo working dirs'''
354 354 def errhandler(err):
355 355 if err.filename == path:
356 356 raise err
357 357 samestat = getattr(os.path, 'samestat', None)
358 358 if followsym and samestat is not None:
359 359 def adddir(dirlst, dirname):
360 match = False
361 360 dirstat = os.stat(dirname)
362 for lstdirstat in dirlst:
363 if samestat(dirstat, lstdirstat):
364 match = True
365 break
361 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
366 362 if not match:
367 363 dirlst.append(dirstat)
368 364 return not match
369 365 else:
370 366 followsym = False
371 367
372 368 if (seen_dirs is None) and followsym:
373 369 seen_dirs = []
374 370 adddir(seen_dirs, path)
375 371 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
376 372 dirs.sort()
377 373 if '.hg' in dirs:
378 374 yield root # found a repository
379 375 qroot = os.path.join(root, '.hg', 'patches')
380 376 if os.path.isdir(os.path.join(qroot, '.hg')):
381 377 yield qroot # we have a patch queue repo here
382 378 if recurse:
383 379 # avoid recursing inside the .hg directory
384 380 dirs.remove('.hg')
385 381 else:
386 382 dirs[:] = [] # don't descend further
387 383 elif followsym:
388 384 newdirs = []
389 385 for d in dirs:
390 386 fname = os.path.join(root, d)
391 387 if adddir(seen_dirs, fname):
392 388 if os.path.islink(fname):
393 389 for hgname in walkrepos(fname, True, seen_dirs):
394 390 yield hgname
395 391 else:
396 392 newdirs.append(d)
397 393 dirs[:] = newdirs
398 394
399 395 def binnode(ctx):
400 396 """Return binary node id for a given basectx"""
401 397 node = ctx.node()
402 398 if node is None:
403 399 return wdirid
404 400 return node
405 401
406 402 def intrev(ctx):
407 403 """Return integer for a given basectx that can be used in comparison or
408 404 arithmetic operation"""
409 405 rev = ctx.rev()
410 406 if rev is None:
411 407 return wdirrev
412 408 return rev
413 409
414 410 def formatchangeid(ctx):
415 411 """Format changectx as '{rev}:{node|formatnode}', which is the default
416 412 template provided by logcmdutil.changesettemplater"""
417 413 repo = ctx.repo()
418 414 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
419 415
420 416 def formatrevnode(ui, rev, node):
421 417 """Format given revision and node depending on the current verbosity"""
422 418 if ui.debugflag:
423 419 hexfunc = hex
424 420 else:
425 421 hexfunc = short
426 422 return '%d:%s' % (rev, hexfunc(node))
427 423
428 424 def revsingle(repo, revspec, default='.', localalias=None):
429 425 if not revspec and revspec != 0:
430 426 return repo[default]
431 427
432 428 l = revrange(repo, [revspec], localalias=localalias)
433 429 if not l:
434 430 raise error.Abort(_('empty revision set'))
435 431 return repo[l.last()]
436 432
437 433 def _pairspec(revspec):
438 434 tree = revsetlang.parse(revspec)
439 435 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
440 436
441 437 def revpair(repo, revs):
442 438 if not revs:
443 439 return repo.dirstate.p1(), None
444 440
445 441 l = revrange(repo, revs)
446 442
447 443 if not l:
448 444 first = second = None
449 445 elif l.isascending():
450 446 first = l.min()
451 447 second = l.max()
452 448 elif l.isdescending():
453 449 first = l.max()
454 450 second = l.min()
455 451 else:
456 452 first = l.first()
457 453 second = l.last()
458 454
459 455 if first is None:
460 456 raise error.Abort(_('empty revision range'))
461 457 if (first == second and len(revs) >= 2
462 458 and not all(revrange(repo, [r]) for r in revs)):
463 459 raise error.Abort(_('empty revision on one side of range'))
464 460
465 461 # if top-level is range expression, the result must always be a pair
466 462 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
467 463 return repo.lookup(first), None
468 464
469 465 return repo.lookup(first), repo.lookup(second)
470 466
471 467 def revrange(repo, specs, localalias=None):
472 468 """Execute 1 to many revsets and return the union.
473 469
474 470 This is the preferred mechanism for executing revsets using user-specified
475 471 config options, such as revset aliases.
476 472
477 473 The revsets specified by ``specs`` will be executed via a chained ``OR``
478 474 expression. If ``specs`` is empty, an empty result is returned.
479 475
480 476 ``specs`` can contain integers, in which case they are assumed to be
481 477 revision numbers.
482 478
483 479 It is assumed the revsets are already formatted. If you have arguments
484 480 that need to be expanded in the revset, call ``revsetlang.formatspec()``
485 481 and pass the result as an element of ``specs``.
486 482
487 483 Specifying a single revset is allowed.
488 484
489 485 Returns a ``revset.abstractsmartset`` which is a list-like interface over
490 486 integer revisions.
491 487 """
492 488 allspecs = []
493 489 for spec in specs:
494 490 if isinstance(spec, int):
495 491 spec = revsetlang.formatspec('rev(%d)', spec)
496 492 allspecs.append(spec)
497 493 return repo.anyrevs(allspecs, user=True, localalias=localalias)
498 494
499 495 def meaningfulparents(repo, ctx):
500 496 """Return list of meaningful (or all if debug) parentrevs for rev.
501 497
502 498 For merges (two non-nullrev revisions) both parents are meaningful.
503 499 Otherwise the first parent revision is considered meaningful if it
504 500 is not the preceding revision.
505 501 """
506 502 parents = ctx.parents()
507 503 if len(parents) > 1:
508 504 return parents
509 505 if repo.ui.debugflag:
510 506 return [parents[0], repo['null']]
511 507 if parents[0].rev() >= intrev(ctx) - 1:
512 508 return []
513 509 return parents
514 510
515 511 def expandpats(pats):
516 512 '''Expand bare globs when running on windows.
517 513 On posix we assume it already has already been done by sh.'''
518 514 if not util.expandglobs:
519 515 return list(pats)
520 516 ret = []
521 517 for kindpat in pats:
522 518 kind, pat = matchmod._patsplit(kindpat, None)
523 519 if kind is None:
524 520 try:
525 521 globbed = glob.glob(pat)
526 522 except re.error:
527 523 globbed = [pat]
528 524 if globbed:
529 525 ret.extend(globbed)
530 526 continue
531 527 ret.append(kindpat)
532 528 return ret
533 529
534 530 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
535 531 badfn=None):
536 532 '''Return a matcher and the patterns that were used.
537 533 The matcher will warn about bad matches, unless an alternate badfn callback
538 534 is provided.'''
539 535 if pats == ("",):
540 536 pats = []
541 537 if opts is None:
542 538 opts = {}
543 539 if not globbed and default == 'relpath':
544 540 pats = expandpats(pats or [])
545 541
546 542 def bad(f, msg):
547 543 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
548 544
549 545 if badfn is None:
550 546 badfn = bad
551 547
552 548 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
553 549 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
554 550
555 551 if m.always():
556 552 pats = []
557 553 return m, pats
558 554
559 555 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
560 556 badfn=None):
561 557 '''Return a matcher that will warn about bad matches.'''
562 558 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
563 559
564 560 def matchall(repo):
565 561 '''Return a matcher that will efficiently match everything.'''
566 562 return matchmod.always(repo.root, repo.getcwd())
567 563
568 564 def matchfiles(repo, files, badfn=None):
569 565 '''Return a matcher that will efficiently match exactly these files.'''
570 566 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
571 567
572 568 def parsefollowlinespattern(repo, rev, pat, msg):
573 569 """Return a file name from `pat` pattern suitable for usage in followlines
574 570 logic.
575 571 """
576 572 if not matchmod.patkind(pat):
577 573 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
578 574 else:
579 575 ctx = repo[rev]
580 576 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
581 577 files = [f for f in ctx if m(f)]
582 578 if len(files) != 1:
583 579 raise error.ParseError(msg)
584 580 return files[0]
585 581
586 582 def origpath(ui, repo, filepath):
587 583 '''customize where .orig files are created
588 584
589 585 Fetch user defined path from config file: [ui] origbackuppath = <path>
590 586 Fall back to default (filepath with .orig suffix) if not specified
591 587 '''
592 588 origbackuppath = ui.config('ui', 'origbackuppath')
593 589 if not origbackuppath:
594 590 return filepath + ".orig"
595 591
596 592 # Convert filepath from an absolute path into a path inside the repo.
597 593 filepathfromroot = util.normpath(os.path.relpath(filepath,
598 594 start=repo.root))
599 595
600 596 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
601 597 origbackupdir = origvfs.dirname(filepathfromroot)
602 598 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
603 599 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
604 600
605 601 # Remove any files that conflict with the backup file's path
606 602 for f in reversed(list(util.finddirs(filepathfromroot))):
607 603 if origvfs.isfileorlink(f):
608 604 ui.note(_('removing conflicting file: %s\n')
609 605 % origvfs.join(f))
610 606 origvfs.unlink(f)
611 607 break
612 608
613 609 origvfs.makedirs(origbackupdir)
614 610
615 611 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
616 612 ui.note(_('removing conflicting directory: %s\n')
617 613 % origvfs.join(filepathfromroot))
618 614 origvfs.rmtree(filepathfromroot, forcibly=True)
619 615
620 616 return origvfs.join(filepathfromroot)
621 617
622 618 class _containsnode(object):
623 619 """proxy __contains__(node) to container.__contains__ which accepts revs"""
624 620
625 621 def __init__(self, repo, revcontainer):
626 622 self._torev = repo.changelog.rev
627 623 self._revcontains = revcontainer.__contains__
628 624
629 625 def __contains__(self, node):
630 626 return self._revcontains(self._torev(node))
631 627
632 628 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
633 629 """do common cleanups when old nodes are replaced by new nodes
634 630
635 631 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
636 632 (we might also want to move working directory parent in the future)
637 633
638 634 By default, bookmark moves are calculated automatically from 'replacements',
639 635 but 'moves' can be used to override that. Also, 'moves' may include
640 636 additional bookmark moves that should not have associated obsmarkers.
641 637
642 638 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
643 639 have replacements. operation is a string, like "rebase".
644 640
645 641 metadata is dictionary containing metadata to be stored in obsmarker if
646 642 obsolescence is enabled.
647 643 """
648 644 if not replacements and not moves:
649 645 return
650 646
651 647 # translate mapping's other forms
652 648 if not util.safehasattr(replacements, 'items'):
653 649 replacements = {n: () for n in replacements}
654 650
655 651 # Calculate bookmark movements
656 652 if moves is None:
657 653 moves = {}
658 654 # Unfiltered repo is needed since nodes in replacements might be hidden.
659 655 unfi = repo.unfiltered()
660 656 for oldnode, newnodes in replacements.items():
661 657 if oldnode in moves:
662 658 continue
663 659 if len(newnodes) > 1:
664 660 # usually a split, take the one with biggest rev number
665 661 newnode = next(unfi.set('max(%ln)', newnodes)).node()
666 662 elif len(newnodes) == 0:
667 663 # move bookmark backwards
668 664 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
669 665 list(replacements)))
670 666 if roots:
671 667 newnode = roots[0].node()
672 668 else:
673 669 newnode = nullid
674 670 else:
675 671 newnode = newnodes[0]
676 672 moves[oldnode] = newnode
677 673
678 674 with repo.transaction('cleanup') as tr:
679 675 # Move bookmarks
680 676 bmarks = repo._bookmarks
681 677 bmarkchanges = []
682 678 allnewnodes = [n for ns in replacements.values() for n in ns]
683 679 for oldnode, newnode in moves.items():
684 680 oldbmarks = repo.nodebookmarks(oldnode)
685 681 if not oldbmarks:
686 682 continue
687 683 from . import bookmarks # avoid import cycle
688 684 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
689 685 (oldbmarks, hex(oldnode), hex(newnode)))
690 686 # Delete divergent bookmarks being parents of related newnodes
691 687 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
692 688 allnewnodes, newnode, oldnode)
693 689 deletenodes = _containsnode(repo, deleterevs)
694 690 for name in oldbmarks:
695 691 bmarkchanges.append((name, newnode))
696 692 for b in bookmarks.divergent2delete(repo, deletenodes, name):
697 693 bmarkchanges.append((b, None))
698 694
699 695 if bmarkchanges:
700 696 bmarks.applychanges(repo, tr, bmarkchanges)
701 697
702 698 # Obsolete or strip nodes
703 699 if obsolete.isenabled(repo, obsolete.createmarkersopt):
704 700 # If a node is already obsoleted, and we want to obsolete it
705 701 # without a successor, skip that obssolete request since it's
706 702 # unnecessary. That's the "if s or not isobs(n)" check below.
707 703 # Also sort the node in topology order, that might be useful for
708 704 # some obsstore logic.
709 705 # NOTE: the filtering and sorting might belong to createmarkers.
710 706 isobs = unfi.obsstore.successors.__contains__
711 707 torev = unfi.changelog.rev
712 708 sortfunc = lambda ns: torev(ns[0])
713 709 rels = [(unfi[n], tuple(unfi[m] for m in s))
714 710 for n, s in sorted(replacements.items(), key=sortfunc)
715 711 if s or not isobs(n)]
716 712 if rels:
717 713 obsolete.createmarkers(repo, rels, operation=operation,
718 714 metadata=metadata)
719 715 else:
720 716 from . import repair # avoid import cycle
721 717 tostrip = list(replacements)
722 718 if tostrip:
723 719 repair.delayedstrip(repo.ui, repo, tostrip, operation)
724 720
725 721 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
726 722 if opts is None:
727 723 opts = {}
728 724 m = matcher
729 725 if dry_run is None:
730 726 dry_run = opts.get('dry_run')
731 727 if similarity is None:
732 728 similarity = float(opts.get('similarity') or 0)
733 729
734 730 ret = 0
735 731 join = lambda f: os.path.join(prefix, f)
736 732
737 733 wctx = repo[None]
738 734 for subpath in sorted(wctx.substate):
739 735 submatch = matchmod.subdirmatcher(subpath, m)
740 736 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
741 737 sub = wctx.sub(subpath)
742 738 try:
743 739 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
744 740 ret = 1
745 741 except error.LookupError:
746 742 repo.ui.status(_("skipping missing subrepository: %s\n")
747 743 % join(subpath))
748 744
749 745 rejected = []
750 746 def badfn(f, msg):
751 747 if f in m.files():
752 748 m.bad(f, msg)
753 749 rejected.append(f)
754 750
755 751 badmatch = matchmod.badmatch(m, badfn)
756 752 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
757 753 badmatch)
758 754
759 755 unknownset = set(unknown + forgotten)
760 756 toprint = unknownset.copy()
761 757 toprint.update(deleted)
762 758 for abs in sorted(toprint):
763 759 if repo.ui.verbose or not m.exact(abs):
764 760 if abs in unknownset:
765 761 status = _('adding %s\n') % m.uipath(abs)
766 762 else:
767 763 status = _('removing %s\n') % m.uipath(abs)
768 764 repo.ui.status(status)
769 765
770 766 renames = _findrenames(repo, m, added + unknown, removed + deleted,
771 767 similarity)
772 768
773 769 if not dry_run:
774 770 _markchanges(repo, unknown + forgotten, deleted, renames)
775 771
776 772 for f in rejected:
777 773 if f in m.files():
778 774 return 1
779 775 return ret
780 776
781 777 def marktouched(repo, files, similarity=0.0):
782 778 '''Assert that files have somehow been operated upon. files are relative to
783 779 the repo root.'''
784 780 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
785 781 rejected = []
786 782
787 783 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
788 784
789 785 if repo.ui.verbose:
790 786 unknownset = set(unknown + forgotten)
791 787 toprint = unknownset.copy()
792 788 toprint.update(deleted)
793 789 for abs in sorted(toprint):
794 790 if abs in unknownset:
795 791 status = _('adding %s\n') % abs
796 792 else:
797 793 status = _('removing %s\n') % abs
798 794 repo.ui.status(status)
799 795
800 796 renames = _findrenames(repo, m, added + unknown, removed + deleted,
801 797 similarity)
802 798
803 799 _markchanges(repo, unknown + forgotten, deleted, renames)
804 800
805 801 for f in rejected:
806 802 if f in m.files():
807 803 return 1
808 804 return 0
809 805
810 806 def _interestingfiles(repo, matcher):
811 807 '''Walk dirstate with matcher, looking for files that addremove would care
812 808 about.
813 809
814 810 This is different from dirstate.status because it doesn't care about
815 811 whether files are modified or clean.'''
816 812 added, unknown, deleted, removed, forgotten = [], [], [], [], []
817 813 audit_path = pathutil.pathauditor(repo.root, cached=True)
818 814
819 815 ctx = repo[None]
820 816 dirstate = repo.dirstate
821 817 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
822 818 unknown=True, ignored=False, full=False)
823 819 for abs, st in walkresults.iteritems():
824 820 dstate = dirstate[abs]
825 821 if dstate == '?' and audit_path.check(abs):
826 822 unknown.append(abs)
827 823 elif dstate != 'r' and not st:
828 824 deleted.append(abs)
829 825 elif dstate == 'r' and st:
830 826 forgotten.append(abs)
831 827 # for finding renames
832 828 elif dstate == 'r' and not st:
833 829 removed.append(abs)
834 830 elif dstate == 'a':
835 831 added.append(abs)
836 832
837 833 return added, unknown, deleted, removed, forgotten
838 834
839 835 def _findrenames(repo, matcher, added, removed, similarity):
840 836 '''Find renames from removed files to added ones.'''
841 837 renames = {}
842 838 if similarity > 0:
843 839 for old, new, score in similar.findrenames(repo, added, removed,
844 840 similarity):
845 841 if (repo.ui.verbose or not matcher.exact(old)
846 842 or not matcher.exact(new)):
847 843 repo.ui.status(_('recording removal of %s as rename to %s '
848 844 '(%d%% similar)\n') %
849 845 (matcher.rel(old), matcher.rel(new),
850 846 score * 100))
851 847 renames[new] = old
852 848 return renames
853 849
854 850 def _markchanges(repo, unknown, deleted, renames):
855 851 '''Marks the files in unknown as added, the files in deleted as removed,
856 852 and the files in renames as copied.'''
857 853 wctx = repo[None]
858 854 with repo.wlock():
859 855 wctx.forget(deleted)
860 856 wctx.add(unknown)
861 857 for new, old in renames.iteritems():
862 858 wctx.copy(old, new)
863 859
864 860 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
865 861 """Update the dirstate to reflect the intent of copying src to dst. For
866 862 different reasons it might not end with dst being marked as copied from src.
867 863 """
868 864 origsrc = repo.dirstate.copied(src) or src
869 865 if dst == origsrc: # copying back a copy?
870 866 if repo.dirstate[dst] not in 'mn' and not dryrun:
871 867 repo.dirstate.normallookup(dst)
872 868 else:
873 869 if repo.dirstate[origsrc] == 'a' and origsrc == src:
874 870 if not ui.quiet:
875 871 ui.warn(_("%s has not been committed yet, so no copy "
876 872 "data will be stored for %s.\n")
877 873 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
878 874 if repo.dirstate[dst] in '?r' and not dryrun:
879 875 wctx.add([dst])
880 876 elif not dryrun:
881 877 wctx.copy(origsrc, dst)
882 878
883 879 def readrequires(opener, supported):
884 880 '''Reads and parses .hg/requires and checks if all entries found
885 881 are in the list of supported features.'''
886 882 requirements = set(opener.read("requires").splitlines())
887 883 missings = []
888 884 for r in requirements:
889 885 if r not in supported:
890 886 if not r or not r[0:1].isalnum():
891 887 raise error.RequirementError(_(".hg/requires file is corrupt"))
892 888 missings.append(r)
893 889 missings.sort()
894 890 if missings:
895 891 raise error.RequirementError(
896 892 _("repository requires features unknown to this Mercurial: %s")
897 893 % " ".join(missings),
898 894 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
899 895 " for more information"))
900 896 return requirements
901 897
902 898 def writerequires(opener, requirements):
903 899 with opener('requires', 'w') as fp:
904 900 for r in sorted(requirements):
905 901 fp.write("%s\n" % r)
906 902
907 903 class filecachesubentry(object):
908 904 def __init__(self, path, stat):
909 905 self.path = path
910 906 self.cachestat = None
911 907 self._cacheable = None
912 908
913 909 if stat:
914 910 self.cachestat = filecachesubentry.stat(self.path)
915 911
916 912 if self.cachestat:
917 913 self._cacheable = self.cachestat.cacheable()
918 914 else:
919 915 # None means we don't know yet
920 916 self._cacheable = None
921 917
922 918 def refresh(self):
923 919 if self.cacheable():
924 920 self.cachestat = filecachesubentry.stat(self.path)
925 921
926 922 def cacheable(self):
927 923 if self._cacheable is not None:
928 924 return self._cacheable
929 925
930 926 # we don't know yet, assume it is for now
931 927 return True
932 928
933 929 def changed(self):
934 930 # no point in going further if we can't cache it
935 931 if not self.cacheable():
936 932 return True
937 933
938 934 newstat = filecachesubentry.stat(self.path)
939 935
940 936 # we may not know if it's cacheable yet, check again now
941 937 if newstat and self._cacheable is None:
942 938 self._cacheable = newstat.cacheable()
943 939
944 940 # check again
945 941 if not self._cacheable:
946 942 return True
947 943
948 944 if self.cachestat != newstat:
949 945 self.cachestat = newstat
950 946 return True
951 947 else:
952 948 return False
953 949
954 950 @staticmethod
955 951 def stat(path):
956 952 try:
957 953 return util.cachestat(path)
958 954 except OSError as e:
959 955 if e.errno != errno.ENOENT:
960 956 raise
961 957
962 958 class filecacheentry(object):
963 959 def __init__(self, paths, stat=True):
964 960 self._entries = []
965 961 for path in paths:
966 962 self._entries.append(filecachesubentry(path, stat))
967 963
968 964 def changed(self):
969 965 '''true if any entry has changed'''
970 966 for entry in self._entries:
971 967 if entry.changed():
972 968 return True
973 969 return False
974 970
975 971 def refresh(self):
976 972 for entry in self._entries:
977 973 entry.refresh()
978 974
979 975 class filecache(object):
980 976 '''A property like decorator that tracks files under .hg/ for updates.
981 977
982 978 Records stat info when called in _filecache.
983 979
984 980 On subsequent calls, compares old stat info with new info, and recreates the
985 981 object when any of the files changes, updating the new stat info in
986 982 _filecache.
987 983
988 984 Mercurial either atomic renames or appends for files under .hg,
989 985 so to ensure the cache is reliable we need the filesystem to be able
990 986 to tell us if a file has been replaced. If it can't, we fallback to
991 987 recreating the object on every call (essentially the same behavior as
992 988 propertycache).
993 989
994 990 '''
995 991 def __init__(self, *paths):
996 992 self.paths = paths
997 993
998 994 def join(self, obj, fname):
999 995 """Used to compute the runtime path of a cached file.
1000 996
1001 997 Users should subclass filecache and provide their own version of this
1002 998 function to call the appropriate join function on 'obj' (an instance
1003 999 of the class that its member function was decorated).
1004 1000 """
1005 1001 raise NotImplementedError
1006 1002
1007 1003 def __call__(self, func):
1008 1004 self.func = func
1009 1005 self.name = func.__name__.encode('ascii')
1010 1006 return self
1011 1007
1012 1008 def __get__(self, obj, type=None):
1013 1009 # if accessed on the class, return the descriptor itself.
1014 1010 if obj is None:
1015 1011 return self
1016 1012 # do we need to check if the file changed?
1017 1013 if self.name in obj.__dict__:
1018 1014 assert self.name in obj._filecache, self.name
1019 1015 return obj.__dict__[self.name]
1020 1016
1021 1017 entry = obj._filecache.get(self.name)
1022 1018
1023 1019 if entry:
1024 1020 if entry.changed():
1025 1021 entry.obj = self.func(obj)
1026 1022 else:
1027 1023 paths = [self.join(obj, path) for path in self.paths]
1028 1024
1029 1025 # We stat -before- creating the object so our cache doesn't lie if
1030 1026 # a writer modified between the time we read and stat
1031 1027 entry = filecacheentry(paths, True)
1032 1028 entry.obj = self.func(obj)
1033 1029
1034 1030 obj._filecache[self.name] = entry
1035 1031
1036 1032 obj.__dict__[self.name] = entry.obj
1037 1033 return entry.obj
1038 1034
1039 1035 def __set__(self, obj, value):
1040 1036 if self.name not in obj._filecache:
1041 1037 # we add an entry for the missing value because X in __dict__
1042 1038 # implies X in _filecache
1043 1039 paths = [self.join(obj, path) for path in self.paths]
1044 1040 ce = filecacheentry(paths, False)
1045 1041 obj._filecache[self.name] = ce
1046 1042 else:
1047 1043 ce = obj._filecache[self.name]
1048 1044
1049 1045 ce.obj = value # update cached copy
1050 1046 obj.__dict__[self.name] = value # update copy returned by obj.x
1051 1047
1052 1048 def __delete__(self, obj):
1053 1049 try:
1054 1050 del obj.__dict__[self.name]
1055 1051 except KeyError:
1056 1052 raise AttributeError(self.name)
1057 1053
1058 1054 def extdatasource(repo, source):
1059 1055 """Gather a map of rev -> value dict from the specified source
1060 1056
1061 1057 A source spec is treated as a URL, with a special case shell: type
1062 1058 for parsing the output from a shell command.
1063 1059
1064 1060 The data is parsed as a series of newline-separated records where
1065 1061 each record is a revision specifier optionally followed by a space
1066 1062 and a freeform string value. If the revision is known locally, it
1067 1063 is converted to a rev, otherwise the record is skipped.
1068 1064
1069 1065 Note that both key and value are treated as UTF-8 and converted to
1070 1066 the local encoding. This allows uniformity between local and
1071 1067 remote data sources.
1072 1068 """
1073 1069
1074 1070 spec = repo.ui.config("extdata", source)
1075 1071 if not spec:
1076 1072 raise error.Abort(_("unknown extdata source '%s'") % source)
1077 1073
1078 1074 data = {}
1079 1075 src = proc = None
1080 1076 try:
1081 1077 if spec.startswith("shell:"):
1082 1078 # external commands should be run relative to the repo root
1083 1079 cmd = spec[6:]
1084 1080 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1085 1081 close_fds=util.closefds,
1086 1082 stdout=subprocess.PIPE, cwd=repo.root)
1087 1083 src = proc.stdout
1088 1084 else:
1089 1085 # treat as a URL or file
1090 1086 src = url.open(repo.ui, spec)
1091 1087 for l in src:
1092 1088 if " " in l:
1093 1089 k, v = l.strip().split(" ", 1)
1094 1090 else:
1095 1091 k, v = l.strip(), ""
1096 1092
1097 1093 k = encoding.tolocal(k)
1098 1094 try:
1099 1095 data[repo[k].rev()] = encoding.tolocal(v)
1100 1096 except (error.LookupError, error.RepoLookupError):
1101 1097 pass # we ignore data for nodes that don't exist locally
1102 1098 finally:
1103 1099 if proc:
1104 1100 proc.communicate()
1105 1101 if src:
1106 1102 src.close()
1107 1103 if proc and proc.returncode != 0:
1108 1104 raise error.Abort(_("extdata command '%s' failed: %s")
1109 1105 % (cmd, util.explainexit(proc.returncode)[0]))
1110 1106
1111 1107 return data
1112 1108
1113 1109 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1114 1110 if lock is None:
1115 1111 raise error.LockInheritanceContractViolation(
1116 1112 'lock can only be inherited while held')
1117 1113 if environ is None:
1118 1114 environ = {}
1119 1115 with lock.inherit() as locker:
1120 1116 environ[envvar] = locker
1121 1117 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1122 1118
1123 1119 def wlocksub(repo, cmd, *args, **kwargs):
1124 1120 """run cmd as a subprocess that allows inheriting repo's wlock
1125 1121
1126 1122 This can only be called while the wlock is held. This takes all the
1127 1123 arguments that ui.system does, and returns the exit code of the
1128 1124 subprocess."""
1129 1125 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1130 1126 **kwargs)
1131 1127
1132 1128 def gdinitconfig(ui):
1133 1129 """helper function to know if a repo should be created as general delta
1134 1130 """
1135 1131 # experimental config: format.generaldelta
1136 1132 return (ui.configbool('format', 'generaldelta')
1137 1133 or ui.configbool('format', 'usegeneraldelta'))
1138 1134
1139 1135 def gddeltaconfig(ui):
1140 1136 """helper function to know if incoming delta should be optimised
1141 1137 """
1142 1138 # experimental config: format.generaldelta
1143 1139 return ui.configbool('format', 'generaldelta')
1144 1140
1145 1141 class simplekeyvaluefile(object):
1146 1142 """A simple file with key=value lines
1147 1143
1148 1144 Keys must be alphanumerics and start with a letter, values must not
1149 1145 contain '\n' characters"""
1150 1146 firstlinekey = '__firstline'
1151 1147
1152 1148 def __init__(self, vfs, path, keys=None):
1153 1149 self.vfs = vfs
1154 1150 self.path = path
1155 1151
1156 1152 def read(self, firstlinenonkeyval=False):
1157 1153 """Read the contents of a simple key-value file
1158 1154
1159 1155 'firstlinenonkeyval' indicates whether the first line of file should
1160 1156 be treated as a key-value pair or reuturned fully under the
1161 1157 __firstline key."""
1162 1158 lines = self.vfs.readlines(self.path)
1163 1159 d = {}
1164 1160 if firstlinenonkeyval:
1165 1161 if not lines:
1166 1162 e = _("empty simplekeyvalue file")
1167 1163 raise error.CorruptedState(e)
1168 1164 # we don't want to include '\n' in the __firstline
1169 1165 d[self.firstlinekey] = lines[0][:-1]
1170 1166 del lines[0]
1171 1167
1172 1168 try:
1173 1169 # the 'if line.strip()' part prevents us from failing on empty
1174 1170 # lines which only contain '\n' therefore are not skipped
1175 1171 # by 'if line'
1176 1172 updatedict = dict(line[:-1].split('=', 1) for line in lines
1177 1173 if line.strip())
1178 1174 if self.firstlinekey in updatedict:
1179 1175 e = _("%r can't be used as a key")
1180 1176 raise error.CorruptedState(e % self.firstlinekey)
1181 1177 d.update(updatedict)
1182 1178 except ValueError as e:
1183 1179 raise error.CorruptedState(str(e))
1184 1180 return d
1185 1181
1186 1182 def write(self, data, firstline=None):
1187 1183 """Write key=>value mapping to a file
1188 1184 data is a dict. Keys must be alphanumerical and start with a letter.
1189 1185 Values must not contain newline characters.
1190 1186
1191 1187 If 'firstline' is not None, it is written to file before
1192 1188 everything else, as it is, not in a key=value form"""
1193 1189 lines = []
1194 1190 if firstline is not None:
1195 1191 lines.append('%s\n' % firstline)
1196 1192
1197 1193 for k, v in data.items():
1198 1194 if k == self.firstlinekey:
1199 1195 e = "key name '%s' is reserved" % self.firstlinekey
1200 1196 raise error.ProgrammingError(e)
1201 1197 if not k[0:1].isalpha():
1202 1198 e = "keys must start with a letter in a key-value file"
1203 1199 raise error.ProgrammingError(e)
1204 1200 if not k.isalnum():
1205 1201 e = "invalid key name in a simple key-value file"
1206 1202 raise error.ProgrammingError(e)
1207 1203 if '\n' in v:
1208 1204 e = "invalid value in a simple key-value file"
1209 1205 raise error.ProgrammingError(e)
1210 1206 lines.append("%s=%s\n" % (k, v))
1211 1207 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1212 1208 fp.write(''.join(lines))
1213 1209
1214 1210 _reportobsoletedsource = [
1215 1211 'debugobsolete',
1216 1212 'pull',
1217 1213 'push',
1218 1214 'serve',
1219 1215 'unbundle',
1220 1216 ]
1221 1217
1222 1218 _reportnewcssource = [
1223 1219 'pull',
1224 1220 'unbundle',
1225 1221 ]
1226 1222
1227 1223 # a list of (repo, ctx, files) functions called by various commands to allow
1228 1224 # extensions to ensure the corresponding files are available locally, before the
1229 1225 # command uses them.
1230 1226 fileprefetchhooks = util.hooks()
1231 1227
1232 1228 # A marker that tells the evolve extension to suppress its own reporting
1233 1229 _reportstroubledchangesets = True
1234 1230
1235 1231 def registersummarycallback(repo, otr, txnname=''):
1236 1232 """register a callback to issue a summary after the transaction is closed
1237 1233 """
1238 1234 def txmatch(sources):
1239 1235 return any(txnname.startswith(source) for source in sources)
1240 1236
1241 1237 categories = []
1242 1238
1243 1239 def reportsummary(func):
1244 1240 """decorator for report callbacks."""
1245 1241 # The repoview life cycle is shorter than the one of the actual
1246 1242 # underlying repository. So the filtered object can die before the
1247 1243 # weakref is used leading to troubles. We keep a reference to the
1248 1244 # unfiltered object and restore the filtering when retrieving the
1249 1245 # repository through the weakref.
1250 1246 filtername = repo.filtername
1251 1247 reporef = weakref.ref(repo.unfiltered())
1252 1248 def wrapped(tr):
1253 1249 repo = reporef()
1254 1250 if filtername:
1255 1251 repo = repo.filtered(filtername)
1256 1252 func(repo, tr)
1257 1253 newcat = '%02i-txnreport' % len(categories)
1258 1254 otr.addpostclose(newcat, wrapped)
1259 1255 categories.append(newcat)
1260 1256 return wrapped
1261 1257
1262 1258 if txmatch(_reportobsoletedsource):
1263 1259 @reportsummary
1264 1260 def reportobsoleted(repo, tr):
1265 1261 obsoleted = obsutil.getobsoleted(repo, tr)
1266 1262 if obsoleted:
1267 1263 repo.ui.status(_('obsoleted %i changesets\n')
1268 1264 % len(obsoleted))
1269 1265
1270 1266 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1271 1267 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1272 1268 instabilitytypes = [
1273 1269 ('orphan', 'orphan'),
1274 1270 ('phase-divergent', 'phasedivergent'),
1275 1271 ('content-divergent', 'contentdivergent'),
1276 1272 ]
1277 1273
1278 1274 def getinstabilitycounts(repo):
1279 1275 filtered = repo.changelog.filteredrevs
1280 1276 counts = {}
1281 1277 for instability, revset in instabilitytypes:
1282 1278 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1283 1279 filtered)
1284 1280 return counts
1285 1281
1286 1282 oldinstabilitycounts = getinstabilitycounts(repo)
1287 1283 @reportsummary
1288 1284 def reportnewinstabilities(repo, tr):
1289 1285 newinstabilitycounts = getinstabilitycounts(repo)
1290 1286 for instability, revset in instabilitytypes:
1291 1287 delta = (newinstabilitycounts[instability] -
1292 1288 oldinstabilitycounts[instability])
1293 1289 if delta > 0:
1294 1290 repo.ui.warn(_('%i new %s changesets\n') %
1295 1291 (delta, instability))
1296 1292
1297 1293 if txmatch(_reportnewcssource):
1298 1294 @reportsummary
1299 1295 def reportnewcs(repo, tr):
1300 1296 """Report the range of new revisions pulled/unbundled."""
1301 1297 newrevs = tr.changes.get('revs', xrange(0, 0))
1302 1298 if not newrevs:
1303 1299 return
1304 1300
1305 1301 # Compute the bounds of new revisions' range, excluding obsoletes.
1306 1302 unfi = repo.unfiltered()
1307 1303 revs = unfi.revs('%ld and not obsolete()', newrevs)
1308 1304 if not revs:
1309 1305 # Got only obsoletes.
1310 1306 return
1311 1307 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1312 1308
1313 1309 if minrev == maxrev:
1314 1310 revrange = minrev
1315 1311 else:
1316 1312 revrange = '%s:%s' % (minrev, maxrev)
1317 1313 repo.ui.status(_('new changesets %s\n') % revrange)
1318 1314
1319 1315 def nodesummaries(repo, nodes, maxnumnodes=4):
1320 1316 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1321 1317 return ' '.join(short(h) for h in nodes)
1322 1318 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1323 1319 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1324 1320
1325 1321 def enforcesinglehead(repo, tr, desc):
1326 1322 """check that no named branch has multiple heads"""
1327 1323 if desc in ('strip', 'repair'):
1328 1324 # skip the logic during strip
1329 1325 return
1330 1326 visible = repo.filtered('visible')
1331 1327 # possible improvement: we could restrict the check to affected branch
1332 1328 for name, heads in visible.branchmap().iteritems():
1333 1329 if len(heads) > 1:
1334 1330 msg = _('rejecting multiple heads on branch "%s"')
1335 1331 msg %= name
1336 1332 hint = _('%d heads: %s')
1337 1333 hint %= (len(heads), nodesummaries(repo, heads))
1338 1334 raise error.Abort(msg, hint=hint)
1339 1335
1340 1336 def wrapconvertsink(sink):
1341 1337 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1342 1338 before it is used, whether or not the convert extension was formally loaded.
1343 1339 """
1344 1340 return sink
1345 1341
1346 1342 def unhidehashlikerevs(repo, specs, hiddentype):
1347 1343 """parse the user specs and unhide changesets whose hash or revision number
1348 1344 is passed.
1349 1345
1350 1346 hiddentype can be: 1) 'warn': warn while unhiding changesets
1351 1347 2) 'nowarn': don't warn while unhiding changesets
1352 1348
1353 1349 returns a repo object with the required changesets unhidden
1354 1350 """
1355 1351 if not repo.filtername or not repo.ui.configbool('experimental',
1356 1352 'directaccess'):
1357 1353 return repo
1358 1354
1359 1355 if repo.filtername not in ('visible', 'visible-hidden'):
1360 1356 return repo
1361 1357
1362 1358 symbols = set()
1363 1359 for spec in specs:
1364 1360 try:
1365 1361 tree = revsetlang.parse(spec)
1366 1362 except error.ParseError: # will be reported by scmutil.revrange()
1367 1363 continue
1368 1364
1369 1365 symbols.update(revsetlang.gethashlikesymbols(tree))
1370 1366
1371 1367 if not symbols:
1372 1368 return repo
1373 1369
1374 1370 revs = _getrevsfromsymbols(repo, symbols)
1375 1371
1376 1372 if not revs:
1377 1373 return repo
1378 1374
1379 1375 if hiddentype == 'warn':
1380 1376 unfi = repo.unfiltered()
1381 1377 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1382 1378 repo.ui.warn(_("warning: accessing hidden changesets for write "
1383 1379 "operation: %s\n") % revstr)
1384 1380
1385 1381 # we have to use new filtername to separate branch/tags cache until we can
1386 1382 # disbale these cache when revisions are dynamically pinned.
1387 1383 return repo.filtered('visible-hidden', revs)
1388 1384
1389 1385 def _getrevsfromsymbols(repo, symbols):
1390 1386 """parse the list of symbols and returns a set of revision numbers of hidden
1391 1387 changesets present in symbols"""
1392 1388 revs = set()
1393 1389 unfi = repo.unfiltered()
1394 1390 unficl = unfi.changelog
1395 1391 cl = repo.changelog
1396 1392 tiprev = len(unficl)
1397 1393 pmatch = unficl._partialmatch
1398 1394 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1399 1395 for s in symbols:
1400 1396 try:
1401 1397 n = int(s)
1402 1398 if n <= tiprev:
1403 1399 if not allowrevnums:
1404 1400 continue
1405 1401 else:
1406 1402 if n not in cl:
1407 1403 revs.add(n)
1408 1404 continue
1409 1405 except ValueError:
1410 1406 pass
1411 1407
1412 1408 try:
1413 1409 s = pmatch(s)
1414 1410 except error.LookupError:
1415 1411 s = None
1416 1412
1417 1413 if s is not None:
1418 1414 rev = unficl.rev(s)
1419 1415 if rev not in cl:
1420 1416 revs.add(rev)
1421 1417
1422 1418 return revs
General Comments 0
You need to be logged in to leave comments. Login now