##// END OF EJS Templates
scmutil: fix requires-file isalnum() check on first byte...
Augie Fackler -
r36331:3f98634b default
parent child Browse files
Show More
@@ -1,1422 +1,1422 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 hex,
22 22 nullid,
23 23 short,
24 24 wdirid,
25 25 wdirrev,
26 26 )
27 27
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 match as matchmod,
32 32 obsolete,
33 33 obsutil,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 revsetlang,
38 38 similar,
39 39 url,
40 40 util,
41 41 vfs,
42 42 )
43 43
44 44 if pycompat.iswindows:
45 45 from . import scmwindows as scmplatform
46 46 else:
47 47 from . import scmposix as scmplatform
48 48
49 49 termsize = scmplatform.termsize
50 50
51 51 class status(tuple):
52 52 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
53 53 and 'ignored' properties are only relevant to the working copy.
54 54 '''
55 55
56 56 __slots__ = ()
57 57
58 58 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
59 59 clean):
60 60 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
61 61 ignored, clean))
62 62
63 63 @property
64 64 def modified(self):
65 65 '''files that have been modified'''
66 66 return self[0]
67 67
68 68 @property
69 69 def added(self):
70 70 '''files that have been added'''
71 71 return self[1]
72 72
73 73 @property
74 74 def removed(self):
75 75 '''files that have been removed'''
76 76 return self[2]
77 77
78 78 @property
79 79 def deleted(self):
80 80 '''files that are in the dirstate, but have been deleted from the
81 81 working copy (aka "missing")
82 82 '''
83 83 return self[3]
84 84
85 85 @property
86 86 def unknown(self):
87 87 '''files not in the dirstate that are not ignored'''
88 88 return self[4]
89 89
90 90 @property
91 91 def ignored(self):
92 92 '''files not in the dirstate that are ignored (by _dirignore())'''
93 93 return self[5]
94 94
95 95 @property
96 96 def clean(self):
97 97 '''files that have not been modified'''
98 98 return self[6]
99 99
100 100 def __repr__(self, *args, **kwargs):
101 101 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
102 102 'unknown=%r, ignored=%r, clean=%r>') % self)
103 103
104 104 def itersubrepos(ctx1, ctx2):
105 105 """find subrepos in ctx1 or ctx2"""
106 106 # Create a (subpath, ctx) mapping where we prefer subpaths from
107 107 # ctx1. The subpaths from ctx2 are important when the .hgsub file
108 108 # has been modified (in ctx2) but not yet committed (in ctx1).
109 109 subpaths = dict.fromkeys(ctx2.substate, ctx2)
110 110 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
111 111
112 112 missing = set()
113 113
114 114 for subpath in ctx2.substate:
115 115 if subpath not in ctx1.substate:
116 116 del subpaths[subpath]
117 117 missing.add(subpath)
118 118
119 119 for subpath, ctx in sorted(subpaths.iteritems()):
120 120 yield subpath, ctx.sub(subpath)
121 121
122 122 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
123 123 # status and diff will have an accurate result when it does
124 124 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
125 125 # against itself.
126 126 for subpath in missing:
127 127 yield subpath, ctx2.nullsub(subpath, ctx1)
128 128
129 129 def nochangesfound(ui, repo, excluded=None):
130 130 '''Report no changes for push/pull, excluded is None or a list of
131 131 nodes excluded from the push/pull.
132 132 '''
133 133 secretlist = []
134 134 if excluded:
135 135 for n in excluded:
136 136 ctx = repo[n]
137 137 if ctx.phase() >= phases.secret and not ctx.extinct():
138 138 secretlist.append(n)
139 139
140 140 if secretlist:
141 141 ui.status(_("no changes found (ignored %d secret changesets)\n")
142 142 % len(secretlist))
143 143 else:
144 144 ui.status(_("no changes found\n"))
145 145
146 146 def callcatch(ui, func):
147 147 """call func() with global exception handling
148 148
149 149 return func() if no exception happens. otherwise do some error handling
150 150 and return an exit code accordingly. does not handle all exceptions.
151 151 """
152 152 try:
153 153 try:
154 154 return func()
155 155 except: # re-raises
156 156 ui.traceback()
157 157 raise
158 158 # Global exception handling, alphabetically
159 159 # Mercurial-specific first, followed by built-in and library exceptions
160 160 except error.LockHeld as inst:
161 161 if inst.errno == errno.ETIMEDOUT:
162 162 reason = _('timed out waiting for lock held by %r') % inst.locker
163 163 else:
164 164 reason = _('lock held by %r') % inst.locker
165 165 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
166 166 if not inst.locker:
167 167 ui.warn(_("(lock might be very busy)\n"))
168 168 except error.LockUnavailable as inst:
169 169 ui.warn(_("abort: could not lock %s: %s\n") %
170 170 (inst.desc or inst.filename,
171 171 encoding.strtolocal(inst.strerror)))
172 172 except error.OutOfBandError as inst:
173 173 if inst.args:
174 174 msg = _("abort: remote error:\n")
175 175 else:
176 176 msg = _("abort: remote error\n")
177 177 ui.warn(msg)
178 178 if inst.args:
179 179 ui.warn(''.join(inst.args))
180 180 if inst.hint:
181 181 ui.warn('(%s)\n' % inst.hint)
182 182 except error.RepoError as inst:
183 183 ui.warn(_("abort: %s!\n") % inst)
184 184 if inst.hint:
185 185 ui.warn(_("(%s)\n") % inst.hint)
186 186 except error.ResponseError as inst:
187 187 ui.warn(_("abort: %s") % inst.args[0])
188 188 if not isinstance(inst.args[1], basestring):
189 189 ui.warn(" %r\n" % (inst.args[1],))
190 190 elif not inst.args[1]:
191 191 ui.warn(_(" empty string\n"))
192 192 else:
193 193 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
194 194 except error.CensoredNodeError as inst:
195 195 ui.warn(_("abort: file censored %s!\n") % inst)
196 196 except error.RevlogError as inst:
197 197 ui.warn(_("abort: %s!\n") % inst)
198 198 except error.InterventionRequired as inst:
199 199 ui.warn("%s\n" % inst)
200 200 if inst.hint:
201 201 ui.warn(_("(%s)\n") % inst.hint)
202 202 return 1
203 203 except error.WdirUnsupported:
204 204 ui.warn(_("abort: working directory revision cannot be specified\n"))
205 205 except error.Abort as inst:
206 206 ui.warn(_("abort: %s\n") % inst)
207 207 if inst.hint:
208 208 ui.warn(_("(%s)\n") % inst.hint)
209 209 except ImportError as inst:
210 210 ui.warn(_("abort: %s!\n") % inst)
211 211 m = str(inst).split()[-1]
212 212 if m in "mpatch bdiff".split():
213 213 ui.warn(_("(did you forget to compile extensions?)\n"))
214 214 elif m in "zlib".split():
215 215 ui.warn(_("(is your Python install correct?)\n"))
216 216 except IOError as inst:
217 217 if util.safehasattr(inst, "code"):
218 218 ui.warn(_("abort: %s\n") % util.forcebytestr(inst))
219 219 elif util.safehasattr(inst, "reason"):
220 220 try: # usually it is in the form (errno, strerror)
221 221 reason = inst.reason.args[1]
222 222 except (AttributeError, IndexError):
223 223 # it might be anything, for example a string
224 224 reason = inst.reason
225 225 if isinstance(reason, unicode):
226 226 # SSLError of Python 2.7.9 contains a unicode
227 227 reason = encoding.unitolocal(reason)
228 228 ui.warn(_("abort: error: %s\n") % reason)
229 229 elif (util.safehasattr(inst, "args")
230 230 and inst.args and inst.args[0] == errno.EPIPE):
231 231 pass
232 232 elif getattr(inst, "strerror", None):
233 233 if getattr(inst, "filename", None):
234 234 ui.warn(_("abort: %s: %s\n") % (
235 235 encoding.strtolocal(inst.strerror), inst.filename))
236 236 else:
237 237 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
238 238 else:
239 239 raise
240 240 except OSError as inst:
241 241 if getattr(inst, "filename", None) is not None:
242 242 ui.warn(_("abort: %s: '%s'\n") % (
243 243 encoding.strtolocal(inst.strerror), inst.filename))
244 244 else:
245 245 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
246 246 except MemoryError:
247 247 ui.warn(_("abort: out of memory\n"))
248 248 except SystemExit as inst:
249 249 # Commands shouldn't sys.exit directly, but give a return code.
250 250 # Just in case catch this and and pass exit code to caller.
251 251 return inst.code
252 252 except socket.error as inst:
253 253 ui.warn(_("abort: %s\n") % inst.args[-1])
254 254
255 255 return -1
256 256
257 257 def checknewlabel(repo, lbl, kind):
258 258 # Do not use the "kind" parameter in ui output.
259 259 # It makes strings difficult to translate.
260 260 if lbl in ['tip', '.', 'null']:
261 261 raise error.Abort(_("the name '%s' is reserved") % lbl)
262 262 for c in (':', '\0', '\n', '\r'):
263 263 if c in lbl:
264 264 raise error.Abort(_("%r cannot be used in a name") % c)
265 265 try:
266 266 int(lbl)
267 267 raise error.Abort(_("cannot use an integer as a name"))
268 268 except ValueError:
269 269 pass
270 270 if lbl.strip() != lbl:
271 271 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
272 272
273 273 def checkfilename(f):
274 274 '''Check that the filename f is an acceptable filename for a tracked file'''
275 275 if '\r' in f or '\n' in f:
276 276 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
277 277
278 278 def checkportable(ui, f):
279 279 '''Check if filename f is portable and warn or abort depending on config'''
280 280 checkfilename(f)
281 281 abort, warn = checkportabilityalert(ui)
282 282 if abort or warn:
283 283 msg = util.checkwinfilename(f)
284 284 if msg:
285 285 msg = "%s: %s" % (msg, util.shellquote(f))
286 286 if abort:
287 287 raise error.Abort(msg)
288 288 ui.warn(_("warning: %s\n") % msg)
289 289
290 290 def checkportabilityalert(ui):
291 291 '''check if the user's config requests nothing, a warning, or abort for
292 292 non-portable filenames'''
293 293 val = ui.config('ui', 'portablefilenames')
294 294 lval = val.lower()
295 295 bval = util.parsebool(val)
296 296 abort = pycompat.iswindows or lval == 'abort'
297 297 warn = bval or lval == 'warn'
298 298 if bval is None and not (warn or abort or lval == 'ignore'):
299 299 raise error.ConfigError(
300 300 _("ui.portablefilenames value is invalid ('%s')") % val)
301 301 return abort, warn
302 302
303 303 class casecollisionauditor(object):
304 304 def __init__(self, ui, abort, dirstate):
305 305 self._ui = ui
306 306 self._abort = abort
307 307 allfiles = '\0'.join(dirstate._map)
308 308 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
309 309 self._dirstate = dirstate
310 310 # The purpose of _newfiles is so that we don't complain about
311 311 # case collisions if someone were to call this object with the
312 312 # same filename twice.
313 313 self._newfiles = set()
314 314
315 315 def __call__(self, f):
316 316 if f in self._newfiles:
317 317 return
318 318 fl = encoding.lower(f)
319 319 if fl in self._loweredfiles and f not in self._dirstate:
320 320 msg = _('possible case-folding collision for %s') % f
321 321 if self._abort:
322 322 raise error.Abort(msg)
323 323 self._ui.warn(_("warning: %s\n") % msg)
324 324 self._loweredfiles.add(fl)
325 325 self._newfiles.add(f)
326 326
327 327 def filteredhash(repo, maxrev):
328 328 """build hash of filtered revisions in the current repoview.
329 329
330 330 Multiple caches perform up-to-date validation by checking that the
331 331 tiprev and tipnode stored in the cache file match the current repository.
332 332 However, this is not sufficient for validating repoviews because the set
333 333 of revisions in the view may change without the repository tiprev and
334 334 tipnode changing.
335 335
336 336 This function hashes all the revs filtered from the view and returns
337 337 that SHA-1 digest.
338 338 """
339 339 cl = repo.changelog
340 340 if not cl.filteredrevs:
341 341 return None
342 342 key = None
343 343 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
344 344 if revs:
345 345 s = hashlib.sha1()
346 346 for rev in revs:
347 347 s.update('%d;' % rev)
348 348 key = s.digest()
349 349 return key
350 350
351 351 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
352 352 '''yield every hg repository under path, always recursively.
353 353 The recurse flag will only control recursion into repo working dirs'''
354 354 def errhandler(err):
355 355 if err.filename == path:
356 356 raise err
357 357 samestat = getattr(os.path, 'samestat', None)
358 358 if followsym and samestat is not None:
359 359 def adddir(dirlst, dirname):
360 360 match = False
361 361 dirstat = os.stat(dirname)
362 362 for lstdirstat in dirlst:
363 363 if samestat(dirstat, lstdirstat):
364 364 match = True
365 365 break
366 366 if not match:
367 367 dirlst.append(dirstat)
368 368 return not match
369 369 else:
370 370 followsym = False
371 371
372 372 if (seen_dirs is None) and followsym:
373 373 seen_dirs = []
374 374 adddir(seen_dirs, path)
375 375 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
376 376 dirs.sort()
377 377 if '.hg' in dirs:
378 378 yield root # found a repository
379 379 qroot = os.path.join(root, '.hg', 'patches')
380 380 if os.path.isdir(os.path.join(qroot, '.hg')):
381 381 yield qroot # we have a patch queue repo here
382 382 if recurse:
383 383 # avoid recursing inside the .hg directory
384 384 dirs.remove('.hg')
385 385 else:
386 386 dirs[:] = [] # don't descend further
387 387 elif followsym:
388 388 newdirs = []
389 389 for d in dirs:
390 390 fname = os.path.join(root, d)
391 391 if adddir(seen_dirs, fname):
392 392 if os.path.islink(fname):
393 393 for hgname in walkrepos(fname, True, seen_dirs):
394 394 yield hgname
395 395 else:
396 396 newdirs.append(d)
397 397 dirs[:] = newdirs
398 398
399 399 def binnode(ctx):
400 400 """Return binary node id for a given basectx"""
401 401 node = ctx.node()
402 402 if node is None:
403 403 return wdirid
404 404 return node
405 405
406 406 def intrev(ctx):
407 407 """Return integer for a given basectx that can be used in comparison or
408 408 arithmetic operation"""
409 409 rev = ctx.rev()
410 410 if rev is None:
411 411 return wdirrev
412 412 return rev
413 413
414 414 def formatchangeid(ctx):
415 415 """Format changectx as '{rev}:{node|formatnode}', which is the default
416 416 template provided by logcmdutil.changesettemplater"""
417 417 repo = ctx.repo()
418 418 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
419 419
420 420 def formatrevnode(ui, rev, node):
421 421 """Format given revision and node depending on the current verbosity"""
422 422 if ui.debugflag:
423 423 hexfunc = hex
424 424 else:
425 425 hexfunc = short
426 426 return '%d:%s' % (rev, hexfunc(node))
427 427
428 428 def revsingle(repo, revspec, default='.', localalias=None):
429 429 if not revspec and revspec != 0:
430 430 return repo[default]
431 431
432 432 l = revrange(repo, [revspec], localalias=localalias)
433 433 if not l:
434 434 raise error.Abort(_('empty revision set'))
435 435 return repo[l.last()]
436 436
437 437 def _pairspec(revspec):
438 438 tree = revsetlang.parse(revspec)
439 439 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
440 440
441 441 def revpair(repo, revs):
442 442 if not revs:
443 443 return repo.dirstate.p1(), None
444 444
445 445 l = revrange(repo, revs)
446 446
447 447 if not l:
448 448 first = second = None
449 449 elif l.isascending():
450 450 first = l.min()
451 451 second = l.max()
452 452 elif l.isdescending():
453 453 first = l.max()
454 454 second = l.min()
455 455 else:
456 456 first = l.first()
457 457 second = l.last()
458 458
459 459 if first is None:
460 460 raise error.Abort(_('empty revision range'))
461 461 if (first == second and len(revs) >= 2
462 462 and not all(revrange(repo, [r]) for r in revs)):
463 463 raise error.Abort(_('empty revision on one side of range'))
464 464
465 465 # if top-level is range expression, the result must always be a pair
466 466 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
467 467 return repo.lookup(first), None
468 468
469 469 return repo.lookup(first), repo.lookup(second)
470 470
471 471 def revrange(repo, specs, localalias=None):
472 472 """Execute 1 to many revsets and return the union.
473 473
474 474 This is the preferred mechanism for executing revsets using user-specified
475 475 config options, such as revset aliases.
476 476
477 477 The revsets specified by ``specs`` will be executed via a chained ``OR``
478 478 expression. If ``specs`` is empty, an empty result is returned.
479 479
480 480 ``specs`` can contain integers, in which case they are assumed to be
481 481 revision numbers.
482 482
483 483 It is assumed the revsets are already formatted. If you have arguments
484 484 that need to be expanded in the revset, call ``revsetlang.formatspec()``
485 485 and pass the result as an element of ``specs``.
486 486
487 487 Specifying a single revset is allowed.
488 488
489 489 Returns a ``revset.abstractsmartset`` which is a list-like interface over
490 490 integer revisions.
491 491 """
492 492 allspecs = []
493 493 for spec in specs:
494 494 if isinstance(spec, int):
495 495 spec = revsetlang.formatspec('rev(%d)', spec)
496 496 allspecs.append(spec)
497 497 return repo.anyrevs(allspecs, user=True, localalias=localalias)
498 498
499 499 def meaningfulparents(repo, ctx):
500 500 """Return list of meaningful (or all if debug) parentrevs for rev.
501 501
502 502 For merges (two non-nullrev revisions) both parents are meaningful.
503 503 Otherwise the first parent revision is considered meaningful if it
504 504 is not the preceding revision.
505 505 """
506 506 parents = ctx.parents()
507 507 if len(parents) > 1:
508 508 return parents
509 509 if repo.ui.debugflag:
510 510 return [parents[0], repo['null']]
511 511 if parents[0].rev() >= intrev(ctx) - 1:
512 512 return []
513 513 return parents
514 514
515 515 def expandpats(pats):
516 516 '''Expand bare globs when running on windows.
517 517 On posix we assume it already has already been done by sh.'''
518 518 if not util.expandglobs:
519 519 return list(pats)
520 520 ret = []
521 521 for kindpat in pats:
522 522 kind, pat = matchmod._patsplit(kindpat, None)
523 523 if kind is None:
524 524 try:
525 525 globbed = glob.glob(pat)
526 526 except re.error:
527 527 globbed = [pat]
528 528 if globbed:
529 529 ret.extend(globbed)
530 530 continue
531 531 ret.append(kindpat)
532 532 return ret
533 533
534 534 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
535 535 badfn=None):
536 536 '''Return a matcher and the patterns that were used.
537 537 The matcher will warn about bad matches, unless an alternate badfn callback
538 538 is provided.'''
539 539 if pats == ("",):
540 540 pats = []
541 541 if opts is None:
542 542 opts = {}
543 543 if not globbed and default == 'relpath':
544 544 pats = expandpats(pats or [])
545 545
546 546 def bad(f, msg):
547 547 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
548 548
549 549 if badfn is None:
550 550 badfn = bad
551 551
552 552 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
553 553 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
554 554
555 555 if m.always():
556 556 pats = []
557 557 return m, pats
558 558
559 559 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
560 560 badfn=None):
561 561 '''Return a matcher that will warn about bad matches.'''
562 562 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
563 563
564 564 def matchall(repo):
565 565 '''Return a matcher that will efficiently match everything.'''
566 566 return matchmod.always(repo.root, repo.getcwd())
567 567
568 568 def matchfiles(repo, files, badfn=None):
569 569 '''Return a matcher that will efficiently match exactly these files.'''
570 570 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
571 571
572 572 def parsefollowlinespattern(repo, rev, pat, msg):
573 573 """Return a file name from `pat` pattern suitable for usage in followlines
574 574 logic.
575 575 """
576 576 if not matchmod.patkind(pat):
577 577 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
578 578 else:
579 579 ctx = repo[rev]
580 580 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
581 581 files = [f for f in ctx if m(f)]
582 582 if len(files) != 1:
583 583 raise error.ParseError(msg)
584 584 return files[0]
585 585
586 586 def origpath(ui, repo, filepath):
587 587 '''customize where .orig files are created
588 588
589 589 Fetch user defined path from config file: [ui] origbackuppath = <path>
590 590 Fall back to default (filepath with .orig suffix) if not specified
591 591 '''
592 592 origbackuppath = ui.config('ui', 'origbackuppath')
593 593 if not origbackuppath:
594 594 return filepath + ".orig"
595 595
596 596 # Convert filepath from an absolute path into a path inside the repo.
597 597 filepathfromroot = util.normpath(os.path.relpath(filepath,
598 598 start=repo.root))
599 599
600 600 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
601 601 origbackupdir = origvfs.dirname(filepathfromroot)
602 602 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
603 603 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
604 604
605 605 # Remove any files that conflict with the backup file's path
606 606 for f in reversed(list(util.finddirs(filepathfromroot))):
607 607 if origvfs.isfileorlink(f):
608 608 ui.note(_('removing conflicting file: %s\n')
609 609 % origvfs.join(f))
610 610 origvfs.unlink(f)
611 611 break
612 612
613 613 origvfs.makedirs(origbackupdir)
614 614
615 615 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
616 616 ui.note(_('removing conflicting directory: %s\n')
617 617 % origvfs.join(filepathfromroot))
618 618 origvfs.rmtree(filepathfromroot, forcibly=True)
619 619
620 620 return origvfs.join(filepathfromroot)
621 621
622 622 class _containsnode(object):
623 623 """proxy __contains__(node) to container.__contains__ which accepts revs"""
624 624
625 625 def __init__(self, repo, revcontainer):
626 626 self._torev = repo.changelog.rev
627 627 self._revcontains = revcontainer.__contains__
628 628
629 629 def __contains__(self, node):
630 630 return self._revcontains(self._torev(node))
631 631
632 632 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
633 633 """do common cleanups when old nodes are replaced by new nodes
634 634
635 635 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
636 636 (we might also want to move working directory parent in the future)
637 637
638 638 By default, bookmark moves are calculated automatically from 'replacements',
639 639 but 'moves' can be used to override that. Also, 'moves' may include
640 640 additional bookmark moves that should not have associated obsmarkers.
641 641
642 642 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
643 643 have replacements. operation is a string, like "rebase".
644 644
645 645 metadata is dictionary containing metadata to be stored in obsmarker if
646 646 obsolescence is enabled.
647 647 """
648 648 if not replacements and not moves:
649 649 return
650 650
651 651 # translate mapping's other forms
652 652 if not util.safehasattr(replacements, 'items'):
653 653 replacements = {n: () for n in replacements}
654 654
655 655 # Calculate bookmark movements
656 656 if moves is None:
657 657 moves = {}
658 658 # Unfiltered repo is needed since nodes in replacements might be hidden.
659 659 unfi = repo.unfiltered()
660 660 for oldnode, newnodes in replacements.items():
661 661 if oldnode in moves:
662 662 continue
663 663 if len(newnodes) > 1:
664 664 # usually a split, take the one with biggest rev number
665 665 newnode = next(unfi.set('max(%ln)', newnodes)).node()
666 666 elif len(newnodes) == 0:
667 667 # move bookmark backwards
668 668 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
669 669 list(replacements)))
670 670 if roots:
671 671 newnode = roots[0].node()
672 672 else:
673 673 newnode = nullid
674 674 else:
675 675 newnode = newnodes[0]
676 676 moves[oldnode] = newnode
677 677
678 678 with repo.transaction('cleanup') as tr:
679 679 # Move bookmarks
680 680 bmarks = repo._bookmarks
681 681 bmarkchanges = []
682 682 allnewnodes = [n for ns in replacements.values() for n in ns]
683 683 for oldnode, newnode in moves.items():
684 684 oldbmarks = repo.nodebookmarks(oldnode)
685 685 if not oldbmarks:
686 686 continue
687 687 from . import bookmarks # avoid import cycle
688 688 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
689 689 (oldbmarks, hex(oldnode), hex(newnode)))
690 690 # Delete divergent bookmarks being parents of related newnodes
691 691 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
692 692 allnewnodes, newnode, oldnode)
693 693 deletenodes = _containsnode(repo, deleterevs)
694 694 for name in oldbmarks:
695 695 bmarkchanges.append((name, newnode))
696 696 for b in bookmarks.divergent2delete(repo, deletenodes, name):
697 697 bmarkchanges.append((b, None))
698 698
699 699 if bmarkchanges:
700 700 bmarks.applychanges(repo, tr, bmarkchanges)
701 701
702 702 # Obsolete or strip nodes
703 703 if obsolete.isenabled(repo, obsolete.createmarkersopt):
704 704 # If a node is already obsoleted, and we want to obsolete it
705 705 # without a successor, skip that obssolete request since it's
706 706 # unnecessary. That's the "if s or not isobs(n)" check below.
707 707 # Also sort the node in topology order, that might be useful for
708 708 # some obsstore logic.
709 709 # NOTE: the filtering and sorting might belong to createmarkers.
710 710 isobs = unfi.obsstore.successors.__contains__
711 711 torev = unfi.changelog.rev
712 712 sortfunc = lambda ns: torev(ns[0])
713 713 rels = [(unfi[n], tuple(unfi[m] for m in s))
714 714 for n, s in sorted(replacements.items(), key=sortfunc)
715 715 if s or not isobs(n)]
716 716 if rels:
717 717 obsolete.createmarkers(repo, rels, operation=operation,
718 718 metadata=metadata)
719 719 else:
720 720 from . import repair # avoid import cycle
721 721 tostrip = list(replacements)
722 722 if tostrip:
723 723 repair.delayedstrip(repo.ui, repo, tostrip, operation)
724 724
725 725 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
726 726 if opts is None:
727 727 opts = {}
728 728 m = matcher
729 729 if dry_run is None:
730 730 dry_run = opts.get('dry_run')
731 731 if similarity is None:
732 732 similarity = float(opts.get('similarity') or 0)
733 733
734 734 ret = 0
735 735 join = lambda f: os.path.join(prefix, f)
736 736
737 737 wctx = repo[None]
738 738 for subpath in sorted(wctx.substate):
739 739 submatch = matchmod.subdirmatcher(subpath, m)
740 740 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
741 741 sub = wctx.sub(subpath)
742 742 try:
743 743 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
744 744 ret = 1
745 745 except error.LookupError:
746 746 repo.ui.status(_("skipping missing subrepository: %s\n")
747 747 % join(subpath))
748 748
749 749 rejected = []
750 750 def badfn(f, msg):
751 751 if f in m.files():
752 752 m.bad(f, msg)
753 753 rejected.append(f)
754 754
755 755 badmatch = matchmod.badmatch(m, badfn)
756 756 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
757 757 badmatch)
758 758
759 759 unknownset = set(unknown + forgotten)
760 760 toprint = unknownset.copy()
761 761 toprint.update(deleted)
762 762 for abs in sorted(toprint):
763 763 if repo.ui.verbose or not m.exact(abs):
764 764 if abs in unknownset:
765 765 status = _('adding %s\n') % m.uipath(abs)
766 766 else:
767 767 status = _('removing %s\n') % m.uipath(abs)
768 768 repo.ui.status(status)
769 769
770 770 renames = _findrenames(repo, m, added + unknown, removed + deleted,
771 771 similarity)
772 772
773 773 if not dry_run:
774 774 _markchanges(repo, unknown + forgotten, deleted, renames)
775 775
776 776 for f in rejected:
777 777 if f in m.files():
778 778 return 1
779 779 return ret
780 780
781 781 def marktouched(repo, files, similarity=0.0):
782 782 '''Assert that files have somehow been operated upon. files are relative to
783 783 the repo root.'''
784 784 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
785 785 rejected = []
786 786
787 787 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
788 788
789 789 if repo.ui.verbose:
790 790 unknownset = set(unknown + forgotten)
791 791 toprint = unknownset.copy()
792 792 toprint.update(deleted)
793 793 for abs in sorted(toprint):
794 794 if abs in unknownset:
795 795 status = _('adding %s\n') % abs
796 796 else:
797 797 status = _('removing %s\n') % abs
798 798 repo.ui.status(status)
799 799
800 800 renames = _findrenames(repo, m, added + unknown, removed + deleted,
801 801 similarity)
802 802
803 803 _markchanges(repo, unknown + forgotten, deleted, renames)
804 804
805 805 for f in rejected:
806 806 if f in m.files():
807 807 return 1
808 808 return 0
809 809
810 810 def _interestingfiles(repo, matcher):
811 811 '''Walk dirstate with matcher, looking for files that addremove would care
812 812 about.
813 813
814 814 This is different from dirstate.status because it doesn't care about
815 815 whether files are modified or clean.'''
816 816 added, unknown, deleted, removed, forgotten = [], [], [], [], []
817 817 audit_path = pathutil.pathauditor(repo.root, cached=True)
818 818
819 819 ctx = repo[None]
820 820 dirstate = repo.dirstate
821 821 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
822 822 unknown=True, ignored=False, full=False)
823 823 for abs, st in walkresults.iteritems():
824 824 dstate = dirstate[abs]
825 825 if dstate == '?' and audit_path.check(abs):
826 826 unknown.append(abs)
827 827 elif dstate != 'r' and not st:
828 828 deleted.append(abs)
829 829 elif dstate == 'r' and st:
830 830 forgotten.append(abs)
831 831 # for finding renames
832 832 elif dstate == 'r' and not st:
833 833 removed.append(abs)
834 834 elif dstate == 'a':
835 835 added.append(abs)
836 836
837 837 return added, unknown, deleted, removed, forgotten
838 838
839 839 def _findrenames(repo, matcher, added, removed, similarity):
840 840 '''Find renames from removed files to added ones.'''
841 841 renames = {}
842 842 if similarity > 0:
843 843 for old, new, score in similar.findrenames(repo, added, removed,
844 844 similarity):
845 845 if (repo.ui.verbose or not matcher.exact(old)
846 846 or not matcher.exact(new)):
847 847 repo.ui.status(_('recording removal of %s as rename to %s '
848 848 '(%d%% similar)\n') %
849 849 (matcher.rel(old), matcher.rel(new),
850 850 score * 100))
851 851 renames[new] = old
852 852 return renames
853 853
854 854 def _markchanges(repo, unknown, deleted, renames):
855 855 '''Marks the files in unknown as added, the files in deleted as removed,
856 856 and the files in renames as copied.'''
857 857 wctx = repo[None]
858 858 with repo.wlock():
859 859 wctx.forget(deleted)
860 860 wctx.add(unknown)
861 861 for new, old in renames.iteritems():
862 862 wctx.copy(old, new)
863 863
864 864 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
865 865 """Update the dirstate to reflect the intent of copying src to dst. For
866 866 different reasons it might not end with dst being marked as copied from src.
867 867 """
868 868 origsrc = repo.dirstate.copied(src) or src
869 869 if dst == origsrc: # copying back a copy?
870 870 if repo.dirstate[dst] not in 'mn' and not dryrun:
871 871 repo.dirstate.normallookup(dst)
872 872 else:
873 873 if repo.dirstate[origsrc] == 'a' and origsrc == src:
874 874 if not ui.quiet:
875 875 ui.warn(_("%s has not been committed yet, so no copy "
876 876 "data will be stored for %s.\n")
877 877 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
878 878 if repo.dirstate[dst] in '?r' and not dryrun:
879 879 wctx.add([dst])
880 880 elif not dryrun:
881 881 wctx.copy(origsrc, dst)
882 882
883 883 def readrequires(opener, supported):
884 884 '''Reads and parses .hg/requires and checks if all entries found
885 885 are in the list of supported features.'''
886 886 requirements = set(opener.read("requires").splitlines())
887 887 missings = []
888 888 for r in requirements:
889 889 if r not in supported:
890 if not r or not r[0].isalnum():
890 if not r or not r[0:1].isalnum():
891 891 raise error.RequirementError(_(".hg/requires file is corrupt"))
892 892 missings.append(r)
893 893 missings.sort()
894 894 if missings:
895 895 raise error.RequirementError(
896 896 _("repository requires features unknown to this Mercurial: %s")
897 897 % " ".join(missings),
898 898 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
899 899 " for more information"))
900 900 return requirements
901 901
902 902 def writerequires(opener, requirements):
903 903 with opener('requires', 'w') as fp:
904 904 for r in sorted(requirements):
905 905 fp.write("%s\n" % r)
906 906
907 907 class filecachesubentry(object):
908 908 def __init__(self, path, stat):
909 909 self.path = path
910 910 self.cachestat = None
911 911 self._cacheable = None
912 912
913 913 if stat:
914 914 self.cachestat = filecachesubentry.stat(self.path)
915 915
916 916 if self.cachestat:
917 917 self._cacheable = self.cachestat.cacheable()
918 918 else:
919 919 # None means we don't know yet
920 920 self._cacheable = None
921 921
922 922 def refresh(self):
923 923 if self.cacheable():
924 924 self.cachestat = filecachesubentry.stat(self.path)
925 925
926 926 def cacheable(self):
927 927 if self._cacheable is not None:
928 928 return self._cacheable
929 929
930 930 # we don't know yet, assume it is for now
931 931 return True
932 932
933 933 def changed(self):
934 934 # no point in going further if we can't cache it
935 935 if not self.cacheable():
936 936 return True
937 937
938 938 newstat = filecachesubentry.stat(self.path)
939 939
940 940 # we may not know if it's cacheable yet, check again now
941 941 if newstat and self._cacheable is None:
942 942 self._cacheable = newstat.cacheable()
943 943
944 944 # check again
945 945 if not self._cacheable:
946 946 return True
947 947
948 948 if self.cachestat != newstat:
949 949 self.cachestat = newstat
950 950 return True
951 951 else:
952 952 return False
953 953
954 954 @staticmethod
955 955 def stat(path):
956 956 try:
957 957 return util.cachestat(path)
958 958 except OSError as e:
959 959 if e.errno != errno.ENOENT:
960 960 raise
961 961
962 962 class filecacheentry(object):
963 963 def __init__(self, paths, stat=True):
964 964 self._entries = []
965 965 for path in paths:
966 966 self._entries.append(filecachesubentry(path, stat))
967 967
968 968 def changed(self):
969 969 '''true if any entry has changed'''
970 970 for entry in self._entries:
971 971 if entry.changed():
972 972 return True
973 973 return False
974 974
975 975 def refresh(self):
976 976 for entry in self._entries:
977 977 entry.refresh()
978 978
979 979 class filecache(object):
980 980 '''A property like decorator that tracks files under .hg/ for updates.
981 981
982 982 Records stat info when called in _filecache.
983 983
984 984 On subsequent calls, compares old stat info with new info, and recreates the
985 985 object when any of the files changes, updating the new stat info in
986 986 _filecache.
987 987
988 988 Mercurial either atomic renames or appends for files under .hg,
989 989 so to ensure the cache is reliable we need the filesystem to be able
990 990 to tell us if a file has been replaced. If it can't, we fallback to
991 991 recreating the object on every call (essentially the same behavior as
992 992 propertycache).
993 993
994 994 '''
995 995 def __init__(self, *paths):
996 996 self.paths = paths
997 997
998 998 def join(self, obj, fname):
999 999 """Used to compute the runtime path of a cached file.
1000 1000
1001 1001 Users should subclass filecache and provide their own version of this
1002 1002 function to call the appropriate join function on 'obj' (an instance
1003 1003 of the class that its member function was decorated).
1004 1004 """
1005 1005 raise NotImplementedError
1006 1006
1007 1007 def __call__(self, func):
1008 1008 self.func = func
1009 1009 self.name = func.__name__.encode('ascii')
1010 1010 return self
1011 1011
1012 1012 def __get__(self, obj, type=None):
1013 1013 # if accessed on the class, return the descriptor itself.
1014 1014 if obj is None:
1015 1015 return self
1016 1016 # do we need to check if the file changed?
1017 1017 if self.name in obj.__dict__:
1018 1018 assert self.name in obj._filecache, self.name
1019 1019 return obj.__dict__[self.name]
1020 1020
1021 1021 entry = obj._filecache.get(self.name)
1022 1022
1023 1023 if entry:
1024 1024 if entry.changed():
1025 1025 entry.obj = self.func(obj)
1026 1026 else:
1027 1027 paths = [self.join(obj, path) for path in self.paths]
1028 1028
1029 1029 # We stat -before- creating the object so our cache doesn't lie if
1030 1030 # a writer modified between the time we read and stat
1031 1031 entry = filecacheentry(paths, True)
1032 1032 entry.obj = self.func(obj)
1033 1033
1034 1034 obj._filecache[self.name] = entry
1035 1035
1036 1036 obj.__dict__[self.name] = entry.obj
1037 1037 return entry.obj
1038 1038
1039 1039 def __set__(self, obj, value):
1040 1040 if self.name not in obj._filecache:
1041 1041 # we add an entry for the missing value because X in __dict__
1042 1042 # implies X in _filecache
1043 1043 paths = [self.join(obj, path) for path in self.paths]
1044 1044 ce = filecacheentry(paths, False)
1045 1045 obj._filecache[self.name] = ce
1046 1046 else:
1047 1047 ce = obj._filecache[self.name]
1048 1048
1049 1049 ce.obj = value # update cached copy
1050 1050 obj.__dict__[self.name] = value # update copy returned by obj.x
1051 1051
1052 1052 def __delete__(self, obj):
1053 1053 try:
1054 1054 del obj.__dict__[self.name]
1055 1055 except KeyError:
1056 1056 raise AttributeError(self.name)
1057 1057
1058 1058 def extdatasource(repo, source):
1059 1059 """Gather a map of rev -> value dict from the specified source
1060 1060
1061 1061 A source spec is treated as a URL, with a special case shell: type
1062 1062 for parsing the output from a shell command.
1063 1063
1064 1064 The data is parsed as a series of newline-separated records where
1065 1065 each record is a revision specifier optionally followed by a space
1066 1066 and a freeform string value. If the revision is known locally, it
1067 1067 is converted to a rev, otherwise the record is skipped.
1068 1068
1069 1069 Note that both key and value are treated as UTF-8 and converted to
1070 1070 the local encoding. This allows uniformity between local and
1071 1071 remote data sources.
1072 1072 """
1073 1073
1074 1074 spec = repo.ui.config("extdata", source)
1075 1075 if not spec:
1076 1076 raise error.Abort(_("unknown extdata source '%s'") % source)
1077 1077
1078 1078 data = {}
1079 1079 src = proc = None
1080 1080 try:
1081 1081 if spec.startswith("shell:"):
1082 1082 # external commands should be run relative to the repo root
1083 1083 cmd = spec[6:]
1084 1084 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1085 1085 close_fds=util.closefds,
1086 1086 stdout=subprocess.PIPE, cwd=repo.root)
1087 1087 src = proc.stdout
1088 1088 else:
1089 1089 # treat as a URL or file
1090 1090 src = url.open(repo.ui, spec)
1091 1091 for l in src:
1092 1092 if " " in l:
1093 1093 k, v = l.strip().split(" ", 1)
1094 1094 else:
1095 1095 k, v = l.strip(), ""
1096 1096
1097 1097 k = encoding.tolocal(k)
1098 1098 try:
1099 1099 data[repo[k].rev()] = encoding.tolocal(v)
1100 1100 except (error.LookupError, error.RepoLookupError):
1101 1101 pass # we ignore data for nodes that don't exist locally
1102 1102 finally:
1103 1103 if proc:
1104 1104 proc.communicate()
1105 1105 if src:
1106 1106 src.close()
1107 1107 if proc and proc.returncode != 0:
1108 1108 raise error.Abort(_("extdata command '%s' failed: %s")
1109 1109 % (cmd, util.explainexit(proc.returncode)[0]))
1110 1110
1111 1111 return data
1112 1112
1113 1113 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1114 1114 if lock is None:
1115 1115 raise error.LockInheritanceContractViolation(
1116 1116 'lock can only be inherited while held')
1117 1117 if environ is None:
1118 1118 environ = {}
1119 1119 with lock.inherit() as locker:
1120 1120 environ[envvar] = locker
1121 1121 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1122 1122
1123 1123 def wlocksub(repo, cmd, *args, **kwargs):
1124 1124 """run cmd as a subprocess that allows inheriting repo's wlock
1125 1125
1126 1126 This can only be called while the wlock is held. This takes all the
1127 1127 arguments that ui.system does, and returns the exit code of the
1128 1128 subprocess."""
1129 1129 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1130 1130 **kwargs)
1131 1131
1132 1132 def gdinitconfig(ui):
1133 1133 """helper function to know if a repo should be created as general delta
1134 1134 """
1135 1135 # experimental config: format.generaldelta
1136 1136 return (ui.configbool('format', 'generaldelta')
1137 1137 or ui.configbool('format', 'usegeneraldelta'))
1138 1138
1139 1139 def gddeltaconfig(ui):
1140 1140 """helper function to know if incoming delta should be optimised
1141 1141 """
1142 1142 # experimental config: format.generaldelta
1143 1143 return ui.configbool('format', 'generaldelta')
1144 1144
1145 1145 class simplekeyvaluefile(object):
1146 1146 """A simple file with key=value lines
1147 1147
1148 1148 Keys must be alphanumerics and start with a letter, values must not
1149 1149 contain '\n' characters"""
1150 1150 firstlinekey = '__firstline'
1151 1151
1152 1152 def __init__(self, vfs, path, keys=None):
1153 1153 self.vfs = vfs
1154 1154 self.path = path
1155 1155
1156 1156 def read(self, firstlinenonkeyval=False):
1157 1157 """Read the contents of a simple key-value file
1158 1158
1159 1159 'firstlinenonkeyval' indicates whether the first line of file should
1160 1160 be treated as a key-value pair or reuturned fully under the
1161 1161 __firstline key."""
1162 1162 lines = self.vfs.readlines(self.path)
1163 1163 d = {}
1164 1164 if firstlinenonkeyval:
1165 1165 if not lines:
1166 1166 e = _("empty simplekeyvalue file")
1167 1167 raise error.CorruptedState(e)
1168 1168 # we don't want to include '\n' in the __firstline
1169 1169 d[self.firstlinekey] = lines[0][:-1]
1170 1170 del lines[0]
1171 1171
1172 1172 try:
1173 1173 # the 'if line.strip()' part prevents us from failing on empty
1174 1174 # lines which only contain '\n' therefore are not skipped
1175 1175 # by 'if line'
1176 1176 updatedict = dict(line[:-1].split('=', 1) for line in lines
1177 1177 if line.strip())
1178 1178 if self.firstlinekey in updatedict:
1179 1179 e = _("%r can't be used as a key")
1180 1180 raise error.CorruptedState(e % self.firstlinekey)
1181 1181 d.update(updatedict)
1182 1182 except ValueError as e:
1183 1183 raise error.CorruptedState(str(e))
1184 1184 return d
1185 1185
1186 1186 def write(self, data, firstline=None):
1187 1187 """Write key=>value mapping to a file
1188 1188 data is a dict. Keys must be alphanumerical and start with a letter.
1189 1189 Values must not contain newline characters.
1190 1190
1191 1191 If 'firstline' is not None, it is written to file before
1192 1192 everything else, as it is, not in a key=value form"""
1193 1193 lines = []
1194 1194 if firstline is not None:
1195 1195 lines.append('%s\n' % firstline)
1196 1196
1197 1197 for k, v in data.items():
1198 1198 if k == self.firstlinekey:
1199 1199 e = "key name '%s' is reserved" % self.firstlinekey
1200 1200 raise error.ProgrammingError(e)
1201 1201 if not k[0:1].isalpha():
1202 1202 e = "keys must start with a letter in a key-value file"
1203 1203 raise error.ProgrammingError(e)
1204 1204 if not k.isalnum():
1205 1205 e = "invalid key name in a simple key-value file"
1206 1206 raise error.ProgrammingError(e)
1207 1207 if '\n' in v:
1208 1208 e = "invalid value in a simple key-value file"
1209 1209 raise error.ProgrammingError(e)
1210 1210 lines.append("%s=%s\n" % (k, v))
1211 1211 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1212 1212 fp.write(''.join(lines))
1213 1213
1214 1214 _reportobsoletedsource = [
1215 1215 'debugobsolete',
1216 1216 'pull',
1217 1217 'push',
1218 1218 'serve',
1219 1219 'unbundle',
1220 1220 ]
1221 1221
1222 1222 _reportnewcssource = [
1223 1223 'pull',
1224 1224 'unbundle',
1225 1225 ]
1226 1226
1227 1227 # a list of (repo, ctx, files) functions called by various commands to allow
1228 1228 # extensions to ensure the corresponding files are available locally, before the
1229 1229 # command uses them.
1230 1230 fileprefetchhooks = util.hooks()
1231 1231
1232 1232 # A marker that tells the evolve extension to suppress its own reporting
1233 1233 _reportstroubledchangesets = True
1234 1234
1235 1235 def registersummarycallback(repo, otr, txnname=''):
1236 1236 """register a callback to issue a summary after the transaction is closed
1237 1237 """
1238 1238 def txmatch(sources):
1239 1239 return any(txnname.startswith(source) for source in sources)
1240 1240
1241 1241 categories = []
1242 1242
1243 1243 def reportsummary(func):
1244 1244 """decorator for report callbacks."""
1245 1245 # The repoview life cycle is shorter than the one of the actual
1246 1246 # underlying repository. So the filtered object can die before the
1247 1247 # weakref is used leading to troubles. We keep a reference to the
1248 1248 # unfiltered object and restore the filtering when retrieving the
1249 1249 # repository through the weakref.
1250 1250 filtername = repo.filtername
1251 1251 reporef = weakref.ref(repo.unfiltered())
1252 1252 def wrapped(tr):
1253 1253 repo = reporef()
1254 1254 if filtername:
1255 1255 repo = repo.filtered(filtername)
1256 1256 func(repo, tr)
1257 1257 newcat = '%02i-txnreport' % len(categories)
1258 1258 otr.addpostclose(newcat, wrapped)
1259 1259 categories.append(newcat)
1260 1260 return wrapped
1261 1261
1262 1262 if txmatch(_reportobsoletedsource):
1263 1263 @reportsummary
1264 1264 def reportobsoleted(repo, tr):
1265 1265 obsoleted = obsutil.getobsoleted(repo, tr)
1266 1266 if obsoleted:
1267 1267 repo.ui.status(_('obsoleted %i changesets\n')
1268 1268 % len(obsoleted))
1269 1269
1270 1270 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1271 1271 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1272 1272 instabilitytypes = [
1273 1273 ('orphan', 'orphan'),
1274 1274 ('phase-divergent', 'phasedivergent'),
1275 1275 ('content-divergent', 'contentdivergent'),
1276 1276 ]
1277 1277
1278 1278 def getinstabilitycounts(repo):
1279 1279 filtered = repo.changelog.filteredrevs
1280 1280 counts = {}
1281 1281 for instability, revset in instabilitytypes:
1282 1282 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1283 1283 filtered)
1284 1284 return counts
1285 1285
1286 1286 oldinstabilitycounts = getinstabilitycounts(repo)
1287 1287 @reportsummary
1288 1288 def reportnewinstabilities(repo, tr):
1289 1289 newinstabilitycounts = getinstabilitycounts(repo)
1290 1290 for instability, revset in instabilitytypes:
1291 1291 delta = (newinstabilitycounts[instability] -
1292 1292 oldinstabilitycounts[instability])
1293 1293 if delta > 0:
1294 1294 repo.ui.warn(_('%i new %s changesets\n') %
1295 1295 (delta, instability))
1296 1296
1297 1297 if txmatch(_reportnewcssource):
1298 1298 @reportsummary
1299 1299 def reportnewcs(repo, tr):
1300 1300 """Report the range of new revisions pulled/unbundled."""
1301 1301 newrevs = tr.changes.get('revs', xrange(0, 0))
1302 1302 if not newrevs:
1303 1303 return
1304 1304
1305 1305 # Compute the bounds of new revisions' range, excluding obsoletes.
1306 1306 unfi = repo.unfiltered()
1307 1307 revs = unfi.revs('%ld and not obsolete()', newrevs)
1308 1308 if not revs:
1309 1309 # Got only obsoletes.
1310 1310 return
1311 1311 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1312 1312
1313 1313 if minrev == maxrev:
1314 1314 revrange = minrev
1315 1315 else:
1316 1316 revrange = '%s:%s' % (minrev, maxrev)
1317 1317 repo.ui.status(_('new changesets %s\n') % revrange)
1318 1318
1319 1319 def nodesummaries(repo, nodes, maxnumnodes=4):
1320 1320 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1321 1321 return ' '.join(short(h) for h in nodes)
1322 1322 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1323 1323 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1324 1324
1325 1325 def enforcesinglehead(repo, tr, desc):
1326 1326 """check that no named branch has multiple heads"""
1327 1327 if desc in ('strip', 'repair'):
1328 1328 # skip the logic during strip
1329 1329 return
1330 1330 visible = repo.filtered('visible')
1331 1331 # possible improvement: we could restrict the check to affected branch
1332 1332 for name, heads in visible.branchmap().iteritems():
1333 1333 if len(heads) > 1:
1334 1334 msg = _('rejecting multiple heads on branch "%s"')
1335 1335 msg %= name
1336 1336 hint = _('%d heads: %s')
1337 1337 hint %= (len(heads), nodesummaries(repo, heads))
1338 1338 raise error.Abort(msg, hint=hint)
1339 1339
1340 1340 def wrapconvertsink(sink):
1341 1341 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1342 1342 before it is used, whether or not the convert extension was formally loaded.
1343 1343 """
1344 1344 return sink
1345 1345
1346 1346 def unhidehashlikerevs(repo, specs, hiddentype):
1347 1347 """parse the user specs and unhide changesets whose hash or revision number
1348 1348 is passed.
1349 1349
1350 1350 hiddentype can be: 1) 'warn': warn while unhiding changesets
1351 1351 2) 'nowarn': don't warn while unhiding changesets
1352 1352
1353 1353 returns a repo object with the required changesets unhidden
1354 1354 """
1355 1355 if not repo.filtername or not repo.ui.configbool('experimental',
1356 1356 'directaccess'):
1357 1357 return repo
1358 1358
1359 1359 if repo.filtername not in ('visible', 'visible-hidden'):
1360 1360 return repo
1361 1361
1362 1362 symbols = set()
1363 1363 for spec in specs:
1364 1364 try:
1365 1365 tree = revsetlang.parse(spec)
1366 1366 except error.ParseError: # will be reported by scmutil.revrange()
1367 1367 continue
1368 1368
1369 1369 symbols.update(revsetlang.gethashlikesymbols(tree))
1370 1370
1371 1371 if not symbols:
1372 1372 return repo
1373 1373
1374 1374 revs = _getrevsfromsymbols(repo, symbols)
1375 1375
1376 1376 if not revs:
1377 1377 return repo
1378 1378
1379 1379 if hiddentype == 'warn':
1380 1380 unfi = repo.unfiltered()
1381 1381 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1382 1382 repo.ui.warn(_("warning: accessing hidden changesets for write "
1383 1383 "operation: %s\n") % revstr)
1384 1384
1385 1385 # we have to use new filtername to separate branch/tags cache until we can
1386 1386 # disbale these cache when revisions are dynamically pinned.
1387 1387 return repo.filtered('visible-hidden', revs)
1388 1388
1389 1389 def _getrevsfromsymbols(repo, symbols):
1390 1390 """parse the list of symbols and returns a set of revision numbers of hidden
1391 1391 changesets present in symbols"""
1392 1392 revs = set()
1393 1393 unfi = repo.unfiltered()
1394 1394 unficl = unfi.changelog
1395 1395 cl = repo.changelog
1396 1396 tiprev = len(unficl)
1397 1397 pmatch = unficl._partialmatch
1398 1398 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1399 1399 for s in symbols:
1400 1400 try:
1401 1401 n = int(s)
1402 1402 if n <= tiprev:
1403 1403 if not allowrevnums:
1404 1404 continue
1405 1405 else:
1406 1406 if n not in cl:
1407 1407 revs.add(n)
1408 1408 continue
1409 1409 except ValueError:
1410 1410 pass
1411 1411
1412 1412 try:
1413 1413 s = pmatch(s)
1414 1414 except error.LookupError:
1415 1415 s = None
1416 1416
1417 1417 if s is not None:
1418 1418 rev = unficl.rev(s)
1419 1419 if rev not in cl:
1420 1420 revs.add(rev)
1421 1421
1422 1422 return revs
General Comments 0
You need to be logged in to leave comments. Login now