##// END OF EJS Templates
shortest: cache disambiguation revset...
Martin von Zweigbergk -
r38889:3588e41f default
parent child Browse files
Show More
@@ -1,1733 +1,1743 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28
29 29 from . import (
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 revsetlang,
39 39 similar,
40 40 url,
41 41 util,
42 42 vfs,
43 43 )
44 44
45 45 from .utils import (
46 46 procutil,
47 47 stringutil,
48 48 )
49 49
50 50 if pycompat.iswindows:
51 51 from . import scmwindows as scmplatform
52 52 else:
53 53 from . import scmposix as scmplatform
54 54
55 55 termsize = scmplatform.termsize
56 56
57 57 class status(tuple):
58 58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
59 59 and 'ignored' properties are only relevant to the working copy.
60 60 '''
61 61
62 62 __slots__ = ()
63 63
64 64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
65 65 clean):
66 66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
67 67 ignored, clean))
68 68
69 69 @property
70 70 def modified(self):
71 71 '''files that have been modified'''
72 72 return self[0]
73 73
74 74 @property
75 75 def added(self):
76 76 '''files that have been added'''
77 77 return self[1]
78 78
79 79 @property
80 80 def removed(self):
81 81 '''files that have been removed'''
82 82 return self[2]
83 83
84 84 @property
85 85 def deleted(self):
86 86 '''files that are in the dirstate, but have been deleted from the
87 87 working copy (aka "missing")
88 88 '''
89 89 return self[3]
90 90
91 91 @property
92 92 def unknown(self):
93 93 '''files not in the dirstate that are not ignored'''
94 94 return self[4]
95 95
96 96 @property
97 97 def ignored(self):
98 98 '''files not in the dirstate that are ignored (by _dirignore())'''
99 99 return self[5]
100 100
101 101 @property
102 102 def clean(self):
103 103 '''files that have not been modified'''
104 104 return self[6]
105 105
106 106 def __repr__(self, *args, **kwargs):
107 107 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
108 108 r'unknown=%s, ignored=%s, clean=%s>') %
109 109 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
110 110
111 111 def itersubrepos(ctx1, ctx2):
112 112 """find subrepos in ctx1 or ctx2"""
113 113 # Create a (subpath, ctx) mapping where we prefer subpaths from
114 114 # ctx1. The subpaths from ctx2 are important when the .hgsub file
115 115 # has been modified (in ctx2) but not yet committed (in ctx1).
116 116 subpaths = dict.fromkeys(ctx2.substate, ctx2)
117 117 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
118 118
119 119 missing = set()
120 120
121 121 for subpath in ctx2.substate:
122 122 if subpath not in ctx1.substate:
123 123 del subpaths[subpath]
124 124 missing.add(subpath)
125 125
126 126 for subpath, ctx in sorted(subpaths.iteritems()):
127 127 yield subpath, ctx.sub(subpath)
128 128
129 129 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
130 130 # status and diff will have an accurate result when it does
131 131 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
132 132 # against itself.
133 133 for subpath in missing:
134 134 yield subpath, ctx2.nullsub(subpath, ctx1)
135 135
136 136 def nochangesfound(ui, repo, excluded=None):
137 137 '''Report no changes for push/pull, excluded is None or a list of
138 138 nodes excluded from the push/pull.
139 139 '''
140 140 secretlist = []
141 141 if excluded:
142 142 for n in excluded:
143 143 ctx = repo[n]
144 144 if ctx.phase() >= phases.secret and not ctx.extinct():
145 145 secretlist.append(n)
146 146
147 147 if secretlist:
148 148 ui.status(_("no changes found (ignored %d secret changesets)\n")
149 149 % len(secretlist))
150 150 else:
151 151 ui.status(_("no changes found\n"))
152 152
153 153 def callcatch(ui, func):
154 154 """call func() with global exception handling
155 155
156 156 return func() if no exception happens. otherwise do some error handling
157 157 and return an exit code accordingly. does not handle all exceptions.
158 158 """
159 159 try:
160 160 try:
161 161 return func()
162 162 except: # re-raises
163 163 ui.traceback()
164 164 raise
165 165 # Global exception handling, alphabetically
166 166 # Mercurial-specific first, followed by built-in and library exceptions
167 167 except error.LockHeld as inst:
168 168 if inst.errno == errno.ETIMEDOUT:
169 169 reason = _('timed out waiting for lock held by %r') % inst.locker
170 170 else:
171 171 reason = _('lock held by %r') % inst.locker
172 172 ui.error(_("abort: %s: %s\n") % (
173 173 inst.desc or stringutil.forcebytestr(inst.filename), reason))
174 174 if not inst.locker:
175 175 ui.error(_("(lock might be very busy)\n"))
176 176 except error.LockUnavailable as inst:
177 177 ui.error(_("abort: could not lock %s: %s\n") %
178 178 (inst.desc or stringutil.forcebytestr(inst.filename),
179 179 encoding.strtolocal(inst.strerror)))
180 180 except error.OutOfBandError as inst:
181 181 if inst.args:
182 182 msg = _("abort: remote error:\n")
183 183 else:
184 184 msg = _("abort: remote error\n")
185 185 ui.error(msg)
186 186 if inst.args:
187 187 ui.error(''.join(inst.args))
188 188 if inst.hint:
189 189 ui.error('(%s)\n' % inst.hint)
190 190 except error.RepoError as inst:
191 191 ui.error(_("abort: %s!\n") % inst)
192 192 if inst.hint:
193 193 ui.error(_("(%s)\n") % inst.hint)
194 194 except error.ResponseError as inst:
195 195 ui.error(_("abort: %s") % inst.args[0])
196 196 msg = inst.args[1]
197 197 if isinstance(msg, type(u'')):
198 198 msg = pycompat.sysbytes(msg)
199 199 if not isinstance(msg, bytes):
200 200 ui.error(" %r\n" % (msg,))
201 201 elif not msg:
202 202 ui.error(_(" empty string\n"))
203 203 else:
204 204 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
205 205 except error.CensoredNodeError as inst:
206 206 ui.error(_("abort: file censored %s!\n") % inst)
207 207 except error.RevlogError as inst:
208 208 ui.error(_("abort: %s!\n") % inst)
209 209 except error.InterventionRequired as inst:
210 210 ui.error("%s\n" % inst)
211 211 if inst.hint:
212 212 ui.error(_("(%s)\n") % inst.hint)
213 213 return 1
214 214 except error.WdirUnsupported:
215 215 ui.error(_("abort: working directory revision cannot be specified\n"))
216 216 except error.Abort as inst:
217 217 ui.error(_("abort: %s\n") % inst)
218 218 if inst.hint:
219 219 ui.error(_("(%s)\n") % inst.hint)
220 220 except ImportError as inst:
221 221 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
222 222 m = stringutil.forcebytestr(inst).split()[-1]
223 223 if m in "mpatch bdiff".split():
224 224 ui.error(_("(did you forget to compile extensions?)\n"))
225 225 elif m in "zlib".split():
226 226 ui.error(_("(is your Python install correct?)\n"))
227 227 except IOError as inst:
228 228 if util.safehasattr(inst, "code"):
229 229 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
230 230 elif util.safehasattr(inst, "reason"):
231 231 try: # usually it is in the form (errno, strerror)
232 232 reason = inst.reason.args[1]
233 233 except (AttributeError, IndexError):
234 234 # it might be anything, for example a string
235 235 reason = inst.reason
236 236 if isinstance(reason, pycompat.unicode):
237 237 # SSLError of Python 2.7.9 contains a unicode
238 238 reason = encoding.unitolocal(reason)
239 239 ui.error(_("abort: error: %s\n") % reason)
240 240 elif (util.safehasattr(inst, "args")
241 241 and inst.args and inst.args[0] == errno.EPIPE):
242 242 pass
243 243 elif getattr(inst, "strerror", None):
244 244 if getattr(inst, "filename", None):
245 245 ui.error(_("abort: %s: %s\n") % (
246 246 encoding.strtolocal(inst.strerror),
247 247 stringutil.forcebytestr(inst.filename)))
248 248 else:
249 249 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
250 250 else:
251 251 raise
252 252 except OSError as inst:
253 253 if getattr(inst, "filename", None) is not None:
254 254 ui.error(_("abort: %s: '%s'\n") % (
255 255 encoding.strtolocal(inst.strerror),
256 256 stringutil.forcebytestr(inst.filename)))
257 257 else:
258 258 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
259 259 except MemoryError:
260 260 ui.error(_("abort: out of memory\n"))
261 261 except SystemExit as inst:
262 262 # Commands shouldn't sys.exit directly, but give a return code.
263 263 # Just in case catch this and and pass exit code to caller.
264 264 return inst.code
265 265 except socket.error as inst:
266 266 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
267 267
268 268 return -1
269 269
270 270 def checknewlabel(repo, lbl, kind):
271 271 # Do not use the "kind" parameter in ui output.
272 272 # It makes strings difficult to translate.
273 273 if lbl in ['tip', '.', 'null']:
274 274 raise error.Abort(_("the name '%s' is reserved") % lbl)
275 275 for c in (':', '\0', '\n', '\r'):
276 276 if c in lbl:
277 277 raise error.Abort(
278 278 _("%r cannot be used in a name") % pycompat.bytestr(c))
279 279 try:
280 280 int(lbl)
281 281 raise error.Abort(_("cannot use an integer as a name"))
282 282 except ValueError:
283 283 pass
284 284 if lbl.strip() != lbl:
285 285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
286 286
287 287 def checkfilename(f):
288 288 '''Check that the filename f is an acceptable filename for a tracked file'''
289 289 if '\r' in f or '\n' in f:
290 290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
291 291 % pycompat.bytestr(f))
292 292
293 293 def checkportable(ui, f):
294 294 '''Check if filename f is portable and warn or abort depending on config'''
295 295 checkfilename(f)
296 296 abort, warn = checkportabilityalert(ui)
297 297 if abort or warn:
298 298 msg = util.checkwinfilename(f)
299 299 if msg:
300 300 msg = "%s: %s" % (msg, procutil.shellquote(f))
301 301 if abort:
302 302 raise error.Abort(msg)
303 303 ui.warn(_("warning: %s\n") % msg)
304 304
305 305 def checkportabilityalert(ui):
306 306 '''check if the user's config requests nothing, a warning, or abort for
307 307 non-portable filenames'''
308 308 val = ui.config('ui', 'portablefilenames')
309 309 lval = val.lower()
310 310 bval = stringutil.parsebool(val)
311 311 abort = pycompat.iswindows or lval == 'abort'
312 312 warn = bval or lval == 'warn'
313 313 if bval is None and not (warn or abort or lval == 'ignore'):
314 314 raise error.ConfigError(
315 315 _("ui.portablefilenames value is invalid ('%s')") % val)
316 316 return abort, warn
317 317
318 318 class casecollisionauditor(object):
319 319 def __init__(self, ui, abort, dirstate):
320 320 self._ui = ui
321 321 self._abort = abort
322 322 allfiles = '\0'.join(dirstate._map)
323 323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
324 324 self._dirstate = dirstate
325 325 # The purpose of _newfiles is so that we don't complain about
326 326 # case collisions if someone were to call this object with the
327 327 # same filename twice.
328 328 self._newfiles = set()
329 329
330 330 def __call__(self, f):
331 331 if f in self._newfiles:
332 332 return
333 333 fl = encoding.lower(f)
334 334 if fl in self._loweredfiles and f not in self._dirstate:
335 335 msg = _('possible case-folding collision for %s') % f
336 336 if self._abort:
337 337 raise error.Abort(msg)
338 338 self._ui.warn(_("warning: %s\n") % msg)
339 339 self._loweredfiles.add(fl)
340 340 self._newfiles.add(f)
341 341
342 342 def filteredhash(repo, maxrev):
343 343 """build hash of filtered revisions in the current repoview.
344 344
345 345 Multiple caches perform up-to-date validation by checking that the
346 346 tiprev and tipnode stored in the cache file match the current repository.
347 347 However, this is not sufficient for validating repoviews because the set
348 348 of revisions in the view may change without the repository tiprev and
349 349 tipnode changing.
350 350
351 351 This function hashes all the revs filtered from the view and returns
352 352 that SHA-1 digest.
353 353 """
354 354 cl = repo.changelog
355 355 if not cl.filteredrevs:
356 356 return None
357 357 key = None
358 358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
359 359 if revs:
360 360 s = hashlib.sha1()
361 361 for rev in revs:
362 362 s.update('%d;' % rev)
363 363 key = s.digest()
364 364 return key
365 365
366 366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
367 367 '''yield every hg repository under path, always recursively.
368 368 The recurse flag will only control recursion into repo working dirs'''
369 369 def errhandler(err):
370 370 if err.filename == path:
371 371 raise err
372 372 samestat = getattr(os.path, 'samestat', None)
373 373 if followsym and samestat is not None:
374 374 def adddir(dirlst, dirname):
375 375 dirstat = os.stat(dirname)
376 376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
377 377 if not match:
378 378 dirlst.append(dirstat)
379 379 return not match
380 380 else:
381 381 followsym = False
382 382
383 383 if (seen_dirs is None) and followsym:
384 384 seen_dirs = []
385 385 adddir(seen_dirs, path)
386 386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
387 387 dirs.sort()
388 388 if '.hg' in dirs:
389 389 yield root # found a repository
390 390 qroot = os.path.join(root, '.hg', 'patches')
391 391 if os.path.isdir(os.path.join(qroot, '.hg')):
392 392 yield qroot # we have a patch queue repo here
393 393 if recurse:
394 394 # avoid recursing inside the .hg directory
395 395 dirs.remove('.hg')
396 396 else:
397 397 dirs[:] = [] # don't descend further
398 398 elif followsym:
399 399 newdirs = []
400 400 for d in dirs:
401 401 fname = os.path.join(root, d)
402 402 if adddir(seen_dirs, fname):
403 403 if os.path.islink(fname):
404 404 for hgname in walkrepos(fname, True, seen_dirs):
405 405 yield hgname
406 406 else:
407 407 newdirs.append(d)
408 408 dirs[:] = newdirs
409 409
410 410 def binnode(ctx):
411 411 """Return binary node id for a given basectx"""
412 412 node = ctx.node()
413 413 if node is None:
414 414 return wdirid
415 415 return node
416 416
417 417 def intrev(ctx):
418 418 """Return integer for a given basectx that can be used in comparison or
419 419 arithmetic operation"""
420 420 rev = ctx.rev()
421 421 if rev is None:
422 422 return wdirrev
423 423 return rev
424 424
425 425 def formatchangeid(ctx):
426 426 """Format changectx as '{rev}:{node|formatnode}', which is the default
427 427 template provided by logcmdutil.changesettemplater"""
428 428 repo = ctx.repo()
429 429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
430 430
431 431 def formatrevnode(ui, rev, node):
432 432 """Format given revision and node depending on the current verbosity"""
433 433 if ui.debugflag:
434 434 hexfunc = hex
435 435 else:
436 436 hexfunc = short
437 437 return '%d:%s' % (rev, hexfunc(node))
438 438
439 439 def resolvehexnodeidprefix(repo, prefix):
440 440 try:
441 441 # Uses unfiltered repo because it's faster when prefix is ambiguous/
442 442 # This matches the shortesthexnodeidprefix() function below.
443 443 node = repo.unfiltered().changelog._partialmatch(prefix)
444 444 except error.AmbiguousPrefixLookupError:
445 445 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
446 446 if revset:
447 447 # Clear config to avoid infinite recursion
448 448 configoverrides = {('experimental',
449 449 'revisions.disambiguatewithin'): None}
450 450 with repo.ui.configoverride(configoverrides):
451 451 revs = repo.anyrevs([revset], user=True)
452 452 matches = []
453 453 for rev in revs:
454 454 node = repo.changelog.node(rev)
455 455 if hex(node).startswith(prefix):
456 456 matches.append(node)
457 457 if len(matches) == 1:
458 458 return matches[0]
459 459 raise
460 460 if node is None:
461 461 return
462 462 repo.changelog.rev(node) # make sure node isn't filtered
463 463 return node
464 464
465 def shortesthexnodeidprefix(repo, node, minlength=1):
466 """Find the shortest unambiguous prefix that matches hexnode."""
465 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
466 """Find the shortest unambiguous prefix that matches hexnode.
467
468 If "cache" is not None, it must be a dictionary that can be used for
469 caching between calls to this method.
470 """
467 471 # _partialmatch() of filtered changelog could take O(len(repo)) time,
468 472 # which would be unacceptably slow. so we look for hash collision in
469 473 # unfiltered space, which means some hashes may be slightly longer.
470 474 cl = repo.unfiltered().changelog
471 475
472 476 def isrev(prefix):
473 477 try:
474 478 i = int(prefix)
475 479 # if we are a pure int, then starting with zero will not be
476 480 # confused as a rev; or, obviously, if the int is larger
477 481 # than the value of the tip rev
478 482 if prefix[0:1] == b'0' or i > len(cl):
479 483 return False
480 484 return True
481 485 except ValueError:
482 486 return False
483 487
484 488 def disambiguate(prefix):
485 489 """Disambiguate against revnums."""
486 490 hexnode = hex(node)
487 491 for length in range(len(prefix), len(hexnode) + 1):
488 492 prefix = hexnode[:length]
489 493 if not isrev(prefix):
490 494 return prefix
491 495
492 496 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
493 497 if revset:
494 revs = repo.anyrevs([revset], user=True)
498 revs = None
499 if cache is not None:
500 revs = cache.get('disambiguationrevset')
501 if revs is None:
502 revs = repo.anyrevs([revset], user=True)
503 if cache is not None:
504 cache['disambiguationrevset'] = revs
495 505 if cl.rev(node) in revs:
496 506 hexnode = hex(node)
497 507 for length in range(minlength, len(hexnode) + 1):
498 508 matches = []
499 509 prefix = hexnode[:length]
500 510 for rev in revs:
501 511 otherhexnode = repo[rev].hex()
502 512 if prefix == otherhexnode[:length]:
503 513 matches.append(otherhexnode)
504 514 if len(matches) == 1:
505 515 return disambiguate(prefix)
506 516
507 517 try:
508 518 return disambiguate(cl.shortest(node, minlength))
509 519 except error.LookupError:
510 520 raise error.RepoLookupError()
511 521
512 522 def isrevsymbol(repo, symbol):
513 523 """Checks if a symbol exists in the repo.
514 524
515 525 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
516 526 symbol is an ambiguous nodeid prefix.
517 527 """
518 528 try:
519 529 revsymbol(repo, symbol)
520 530 return True
521 531 except error.RepoLookupError:
522 532 return False
523 533
524 534 def revsymbol(repo, symbol):
525 535 """Returns a context given a single revision symbol (as string).
526 536
527 537 This is similar to revsingle(), but accepts only a single revision symbol,
528 538 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
529 539 not "max(public())".
530 540 """
531 541 if not isinstance(symbol, bytes):
532 542 msg = ("symbol (%s of type %s) was not a string, did you mean "
533 543 "repo[symbol]?" % (symbol, type(symbol)))
534 544 raise error.ProgrammingError(msg)
535 545 try:
536 546 if symbol in ('.', 'tip', 'null'):
537 547 return repo[symbol]
538 548
539 549 try:
540 550 r = int(symbol)
541 551 if '%d' % r != symbol:
542 552 raise ValueError
543 553 l = len(repo.changelog)
544 554 if r < 0:
545 555 r += l
546 556 if r < 0 or r >= l and r != wdirrev:
547 557 raise ValueError
548 558 return repo[r]
549 559 except error.FilteredIndexError:
550 560 raise
551 561 except (ValueError, OverflowError, IndexError):
552 562 pass
553 563
554 564 if len(symbol) == 40:
555 565 try:
556 566 node = bin(symbol)
557 567 rev = repo.changelog.rev(node)
558 568 return repo[rev]
559 569 except error.FilteredLookupError:
560 570 raise
561 571 except (TypeError, LookupError):
562 572 pass
563 573
564 574 # look up bookmarks through the name interface
565 575 try:
566 576 node = repo.names.singlenode(repo, symbol)
567 577 rev = repo.changelog.rev(node)
568 578 return repo[rev]
569 579 except KeyError:
570 580 pass
571 581
572 582 node = resolvehexnodeidprefix(repo, symbol)
573 583 if node is not None:
574 584 rev = repo.changelog.rev(node)
575 585 return repo[rev]
576 586
577 587 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
578 588
579 589 except error.WdirUnsupported:
580 590 return repo[None]
581 591 except (error.FilteredIndexError, error.FilteredLookupError,
582 592 error.FilteredRepoLookupError):
583 593 raise _filterederror(repo, symbol)
584 594
585 595 def _filterederror(repo, changeid):
586 596 """build an exception to be raised about a filtered changeid
587 597
588 598 This is extracted in a function to help extensions (eg: evolve) to
589 599 experiment with various message variants."""
590 600 if repo.filtername.startswith('visible'):
591 601
592 602 # Check if the changeset is obsolete
593 603 unfilteredrepo = repo.unfiltered()
594 604 ctx = revsymbol(unfilteredrepo, changeid)
595 605
596 606 # If the changeset is obsolete, enrich the message with the reason
597 607 # that made this changeset not visible
598 608 if ctx.obsolete():
599 609 msg = obsutil._getfilteredreason(repo, changeid, ctx)
600 610 else:
601 611 msg = _("hidden revision '%s'") % changeid
602 612
603 613 hint = _('use --hidden to access hidden revisions')
604 614
605 615 return error.FilteredRepoLookupError(msg, hint=hint)
606 616 msg = _("filtered revision '%s' (not in '%s' subset)")
607 617 msg %= (changeid, repo.filtername)
608 618 return error.FilteredRepoLookupError(msg)
609 619
610 620 def revsingle(repo, revspec, default='.', localalias=None):
611 621 if not revspec and revspec != 0:
612 622 return repo[default]
613 623
614 624 l = revrange(repo, [revspec], localalias=localalias)
615 625 if not l:
616 626 raise error.Abort(_('empty revision set'))
617 627 return repo[l.last()]
618 628
619 629 def _pairspec(revspec):
620 630 tree = revsetlang.parse(revspec)
621 631 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
622 632
623 633 def revpair(repo, revs):
624 634 if not revs:
625 635 return repo['.'], repo[None]
626 636
627 637 l = revrange(repo, revs)
628 638
629 639 if not l:
630 640 first = second = None
631 641 elif l.isascending():
632 642 first = l.min()
633 643 second = l.max()
634 644 elif l.isdescending():
635 645 first = l.max()
636 646 second = l.min()
637 647 else:
638 648 first = l.first()
639 649 second = l.last()
640 650
641 651 if first is None:
642 652 raise error.Abort(_('empty revision range'))
643 653 if (first == second and len(revs) >= 2
644 654 and not all(revrange(repo, [r]) for r in revs)):
645 655 raise error.Abort(_('empty revision on one side of range'))
646 656
647 657 # if top-level is range expression, the result must always be a pair
648 658 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
649 659 return repo[first], repo[None]
650 660
651 661 return repo[first], repo[second]
652 662
653 663 def revrange(repo, specs, localalias=None):
654 664 """Execute 1 to many revsets and return the union.
655 665
656 666 This is the preferred mechanism for executing revsets using user-specified
657 667 config options, such as revset aliases.
658 668
659 669 The revsets specified by ``specs`` will be executed via a chained ``OR``
660 670 expression. If ``specs`` is empty, an empty result is returned.
661 671
662 672 ``specs`` can contain integers, in which case they are assumed to be
663 673 revision numbers.
664 674
665 675 It is assumed the revsets are already formatted. If you have arguments
666 676 that need to be expanded in the revset, call ``revsetlang.formatspec()``
667 677 and pass the result as an element of ``specs``.
668 678
669 679 Specifying a single revset is allowed.
670 680
671 681 Returns a ``revset.abstractsmartset`` which is a list-like interface over
672 682 integer revisions.
673 683 """
674 684 allspecs = []
675 685 for spec in specs:
676 686 if isinstance(spec, int):
677 687 spec = revsetlang.formatspec('rev(%d)', spec)
678 688 allspecs.append(spec)
679 689 return repo.anyrevs(allspecs, user=True, localalias=localalias)
680 690
681 691 def meaningfulparents(repo, ctx):
682 692 """Return list of meaningful (or all if debug) parentrevs for rev.
683 693
684 694 For merges (two non-nullrev revisions) both parents are meaningful.
685 695 Otherwise the first parent revision is considered meaningful if it
686 696 is not the preceding revision.
687 697 """
688 698 parents = ctx.parents()
689 699 if len(parents) > 1:
690 700 return parents
691 701 if repo.ui.debugflag:
692 702 return [parents[0], repo['null']]
693 703 if parents[0].rev() >= intrev(ctx) - 1:
694 704 return []
695 705 return parents
696 706
697 707 def expandpats(pats):
698 708 '''Expand bare globs when running on windows.
699 709 On posix we assume it already has already been done by sh.'''
700 710 if not util.expandglobs:
701 711 return list(pats)
702 712 ret = []
703 713 for kindpat in pats:
704 714 kind, pat = matchmod._patsplit(kindpat, None)
705 715 if kind is None:
706 716 try:
707 717 globbed = glob.glob(pat)
708 718 except re.error:
709 719 globbed = [pat]
710 720 if globbed:
711 721 ret.extend(globbed)
712 722 continue
713 723 ret.append(kindpat)
714 724 return ret
715 725
716 726 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
717 727 badfn=None):
718 728 '''Return a matcher and the patterns that were used.
719 729 The matcher will warn about bad matches, unless an alternate badfn callback
720 730 is provided.'''
721 731 if pats == ("",):
722 732 pats = []
723 733 if opts is None:
724 734 opts = {}
725 735 if not globbed and default == 'relpath':
726 736 pats = expandpats(pats or [])
727 737
728 738 def bad(f, msg):
729 739 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
730 740
731 741 if badfn is None:
732 742 badfn = bad
733 743
734 744 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
735 745 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
736 746
737 747 if m.always():
738 748 pats = []
739 749 return m, pats
740 750
741 751 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
742 752 badfn=None):
743 753 '''Return a matcher that will warn about bad matches.'''
744 754 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
745 755
746 756 def matchall(repo):
747 757 '''Return a matcher that will efficiently match everything.'''
748 758 return matchmod.always(repo.root, repo.getcwd())
749 759
750 760 def matchfiles(repo, files, badfn=None):
751 761 '''Return a matcher that will efficiently match exactly these files.'''
752 762 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
753 763
754 764 def parsefollowlinespattern(repo, rev, pat, msg):
755 765 """Return a file name from `pat` pattern suitable for usage in followlines
756 766 logic.
757 767 """
758 768 if not matchmod.patkind(pat):
759 769 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
760 770 else:
761 771 ctx = repo[rev]
762 772 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
763 773 files = [f for f in ctx if m(f)]
764 774 if len(files) != 1:
765 775 raise error.ParseError(msg)
766 776 return files[0]
767 777
768 778 def origpath(ui, repo, filepath):
769 779 '''customize where .orig files are created
770 780
771 781 Fetch user defined path from config file: [ui] origbackuppath = <path>
772 782 Fall back to default (filepath with .orig suffix) if not specified
773 783 '''
774 784 origbackuppath = ui.config('ui', 'origbackuppath')
775 785 if not origbackuppath:
776 786 return filepath + ".orig"
777 787
778 788 # Convert filepath from an absolute path into a path inside the repo.
779 789 filepathfromroot = util.normpath(os.path.relpath(filepath,
780 790 start=repo.root))
781 791
782 792 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
783 793 origbackupdir = origvfs.dirname(filepathfromroot)
784 794 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
785 795 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
786 796
787 797 # Remove any files that conflict with the backup file's path
788 798 for f in reversed(list(util.finddirs(filepathfromroot))):
789 799 if origvfs.isfileorlink(f):
790 800 ui.note(_('removing conflicting file: %s\n')
791 801 % origvfs.join(f))
792 802 origvfs.unlink(f)
793 803 break
794 804
795 805 origvfs.makedirs(origbackupdir)
796 806
797 807 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
798 808 ui.note(_('removing conflicting directory: %s\n')
799 809 % origvfs.join(filepathfromroot))
800 810 origvfs.rmtree(filepathfromroot, forcibly=True)
801 811
802 812 return origvfs.join(filepathfromroot)
803 813
804 814 class _containsnode(object):
805 815 """proxy __contains__(node) to container.__contains__ which accepts revs"""
806 816
807 817 def __init__(self, repo, revcontainer):
808 818 self._torev = repo.changelog.rev
809 819 self._revcontains = revcontainer.__contains__
810 820
811 821 def __contains__(self, node):
812 822 return self._revcontains(self._torev(node))
813 823
814 824 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
815 825 fixphase=False, targetphase=None, backup=True):
816 826 """do common cleanups when old nodes are replaced by new nodes
817 827
818 828 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
819 829 (we might also want to move working directory parent in the future)
820 830
821 831 By default, bookmark moves are calculated automatically from 'replacements',
822 832 but 'moves' can be used to override that. Also, 'moves' may include
823 833 additional bookmark moves that should not have associated obsmarkers.
824 834
825 835 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
826 836 have replacements. operation is a string, like "rebase".
827 837
828 838 metadata is dictionary containing metadata to be stored in obsmarker if
829 839 obsolescence is enabled.
830 840 """
831 841 assert fixphase or targetphase is None
832 842 if not replacements and not moves:
833 843 return
834 844
835 845 # translate mapping's other forms
836 846 if not util.safehasattr(replacements, 'items'):
837 847 replacements = {n: () for n in replacements}
838 848
839 849 # Calculate bookmark movements
840 850 if moves is None:
841 851 moves = {}
842 852 # Unfiltered repo is needed since nodes in replacements might be hidden.
843 853 unfi = repo.unfiltered()
844 854 for oldnode, newnodes in replacements.items():
845 855 if oldnode in moves:
846 856 continue
847 857 if len(newnodes) > 1:
848 858 # usually a split, take the one with biggest rev number
849 859 newnode = next(unfi.set('max(%ln)', newnodes)).node()
850 860 elif len(newnodes) == 0:
851 861 # move bookmark backwards
852 862 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
853 863 list(replacements)))
854 864 if roots:
855 865 newnode = roots[0].node()
856 866 else:
857 867 newnode = nullid
858 868 else:
859 869 newnode = newnodes[0]
860 870 moves[oldnode] = newnode
861 871
862 872 allnewnodes = [n for ns in replacements.values() for n in ns]
863 873 toretract = {}
864 874 toadvance = {}
865 875 if fixphase:
866 876 precursors = {}
867 877 for oldnode, newnodes in replacements.items():
868 878 for newnode in newnodes:
869 879 precursors.setdefault(newnode, []).append(oldnode)
870 880
871 881 allnewnodes.sort(key=lambda n: unfi[n].rev())
872 882 newphases = {}
873 883 def phase(ctx):
874 884 return newphases.get(ctx.node(), ctx.phase())
875 885 for newnode in allnewnodes:
876 886 ctx = unfi[newnode]
877 887 parentphase = max(phase(p) for p in ctx.parents())
878 888 if targetphase is None:
879 889 oldphase = max(unfi[oldnode].phase()
880 890 for oldnode in precursors[newnode])
881 891 newphase = max(oldphase, parentphase)
882 892 else:
883 893 newphase = max(targetphase, parentphase)
884 894 newphases[newnode] = newphase
885 895 if newphase > ctx.phase():
886 896 toretract.setdefault(newphase, []).append(newnode)
887 897 elif newphase < ctx.phase():
888 898 toadvance.setdefault(newphase, []).append(newnode)
889 899
890 900 with repo.transaction('cleanup') as tr:
891 901 # Move bookmarks
892 902 bmarks = repo._bookmarks
893 903 bmarkchanges = []
894 904 for oldnode, newnode in moves.items():
895 905 oldbmarks = repo.nodebookmarks(oldnode)
896 906 if not oldbmarks:
897 907 continue
898 908 from . import bookmarks # avoid import cycle
899 909 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
900 910 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
901 911 hex(oldnode), hex(newnode)))
902 912 # Delete divergent bookmarks being parents of related newnodes
903 913 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
904 914 allnewnodes, newnode, oldnode)
905 915 deletenodes = _containsnode(repo, deleterevs)
906 916 for name in oldbmarks:
907 917 bmarkchanges.append((name, newnode))
908 918 for b in bookmarks.divergent2delete(repo, deletenodes, name):
909 919 bmarkchanges.append((b, None))
910 920
911 921 if bmarkchanges:
912 922 bmarks.applychanges(repo, tr, bmarkchanges)
913 923
914 924 for phase, nodes in toretract.items():
915 925 phases.retractboundary(repo, tr, phase, nodes)
916 926 for phase, nodes in toadvance.items():
917 927 phases.advanceboundary(repo, tr, phase, nodes)
918 928
919 929 # Obsolete or strip nodes
920 930 if obsolete.isenabled(repo, obsolete.createmarkersopt):
921 931 # If a node is already obsoleted, and we want to obsolete it
922 932 # without a successor, skip that obssolete request since it's
923 933 # unnecessary. That's the "if s or not isobs(n)" check below.
924 934 # Also sort the node in topology order, that might be useful for
925 935 # some obsstore logic.
926 936 # NOTE: the filtering and sorting might belong to createmarkers.
927 937 isobs = unfi.obsstore.successors.__contains__
928 938 torev = unfi.changelog.rev
929 939 sortfunc = lambda ns: torev(ns[0])
930 940 rels = [(unfi[n], tuple(unfi[m] for m in s))
931 941 for n, s in sorted(replacements.items(), key=sortfunc)
932 942 if s or not isobs(n)]
933 943 if rels:
934 944 obsolete.createmarkers(repo, rels, operation=operation,
935 945 metadata=metadata)
936 946 else:
937 947 from . import repair # avoid import cycle
938 948 tostrip = list(replacements)
939 949 if tostrip:
940 950 repair.delayedstrip(repo.ui, repo, tostrip, operation,
941 951 backup=backup)
942 952
943 953 def addremove(repo, matcher, prefix, opts=None):
944 954 if opts is None:
945 955 opts = {}
946 956 m = matcher
947 957 dry_run = opts.get('dry_run')
948 958 try:
949 959 similarity = float(opts.get('similarity') or 0)
950 960 except ValueError:
951 961 raise error.Abort(_('similarity must be a number'))
952 962 if similarity < 0 or similarity > 100:
953 963 raise error.Abort(_('similarity must be between 0 and 100'))
954 964 similarity /= 100.0
955 965
956 966 ret = 0
957 967 join = lambda f: os.path.join(prefix, f)
958 968
959 969 wctx = repo[None]
960 970 for subpath in sorted(wctx.substate):
961 971 submatch = matchmod.subdirmatcher(subpath, m)
962 972 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
963 973 sub = wctx.sub(subpath)
964 974 try:
965 975 if sub.addremove(submatch, prefix, opts):
966 976 ret = 1
967 977 except error.LookupError:
968 978 repo.ui.status(_("skipping missing subrepository: %s\n")
969 979 % join(subpath))
970 980
971 981 rejected = []
972 982 def badfn(f, msg):
973 983 if f in m.files():
974 984 m.bad(f, msg)
975 985 rejected.append(f)
976 986
977 987 badmatch = matchmod.badmatch(m, badfn)
978 988 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
979 989 badmatch)
980 990
981 991 unknownset = set(unknown + forgotten)
982 992 toprint = unknownset.copy()
983 993 toprint.update(deleted)
984 994 for abs in sorted(toprint):
985 995 if repo.ui.verbose or not m.exact(abs):
986 996 if abs in unknownset:
987 997 status = _('adding %s\n') % m.uipath(abs)
988 998 else:
989 999 status = _('removing %s\n') % m.uipath(abs)
990 1000 repo.ui.status(status)
991 1001
992 1002 renames = _findrenames(repo, m, added + unknown, removed + deleted,
993 1003 similarity)
994 1004
995 1005 if not dry_run:
996 1006 _markchanges(repo, unknown + forgotten, deleted, renames)
997 1007
998 1008 for f in rejected:
999 1009 if f in m.files():
1000 1010 return 1
1001 1011 return ret
1002 1012
1003 1013 def marktouched(repo, files, similarity=0.0):
1004 1014 '''Assert that files have somehow been operated upon. files are relative to
1005 1015 the repo root.'''
1006 1016 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1007 1017 rejected = []
1008 1018
1009 1019 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1010 1020
1011 1021 if repo.ui.verbose:
1012 1022 unknownset = set(unknown + forgotten)
1013 1023 toprint = unknownset.copy()
1014 1024 toprint.update(deleted)
1015 1025 for abs in sorted(toprint):
1016 1026 if abs in unknownset:
1017 1027 status = _('adding %s\n') % abs
1018 1028 else:
1019 1029 status = _('removing %s\n') % abs
1020 1030 repo.ui.status(status)
1021 1031
1022 1032 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1023 1033 similarity)
1024 1034
1025 1035 _markchanges(repo, unknown + forgotten, deleted, renames)
1026 1036
1027 1037 for f in rejected:
1028 1038 if f in m.files():
1029 1039 return 1
1030 1040 return 0
1031 1041
1032 1042 def _interestingfiles(repo, matcher):
1033 1043 '''Walk dirstate with matcher, looking for files that addremove would care
1034 1044 about.
1035 1045
1036 1046 This is different from dirstate.status because it doesn't care about
1037 1047 whether files are modified or clean.'''
1038 1048 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1039 1049 audit_path = pathutil.pathauditor(repo.root, cached=True)
1040 1050
1041 1051 ctx = repo[None]
1042 1052 dirstate = repo.dirstate
1043 1053 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1044 1054 unknown=True, ignored=False, full=False)
1045 1055 for abs, st in walkresults.iteritems():
1046 1056 dstate = dirstate[abs]
1047 1057 if dstate == '?' and audit_path.check(abs):
1048 1058 unknown.append(abs)
1049 1059 elif dstate != 'r' and not st:
1050 1060 deleted.append(abs)
1051 1061 elif dstate == 'r' and st:
1052 1062 forgotten.append(abs)
1053 1063 # for finding renames
1054 1064 elif dstate == 'r' and not st:
1055 1065 removed.append(abs)
1056 1066 elif dstate == 'a':
1057 1067 added.append(abs)
1058 1068
1059 1069 return added, unknown, deleted, removed, forgotten
1060 1070
1061 1071 def _findrenames(repo, matcher, added, removed, similarity):
1062 1072 '''Find renames from removed files to added ones.'''
1063 1073 renames = {}
1064 1074 if similarity > 0:
1065 1075 for old, new, score in similar.findrenames(repo, added, removed,
1066 1076 similarity):
1067 1077 if (repo.ui.verbose or not matcher.exact(old)
1068 1078 or not matcher.exact(new)):
1069 1079 repo.ui.status(_('recording removal of %s as rename to %s '
1070 1080 '(%d%% similar)\n') %
1071 1081 (matcher.rel(old), matcher.rel(new),
1072 1082 score * 100))
1073 1083 renames[new] = old
1074 1084 return renames
1075 1085
1076 1086 def _markchanges(repo, unknown, deleted, renames):
1077 1087 '''Marks the files in unknown as added, the files in deleted as removed,
1078 1088 and the files in renames as copied.'''
1079 1089 wctx = repo[None]
1080 1090 with repo.wlock():
1081 1091 wctx.forget(deleted)
1082 1092 wctx.add(unknown)
1083 1093 for new, old in renames.iteritems():
1084 1094 wctx.copy(old, new)
1085 1095
1086 1096 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1087 1097 """Update the dirstate to reflect the intent of copying src to dst. For
1088 1098 different reasons it might not end with dst being marked as copied from src.
1089 1099 """
1090 1100 origsrc = repo.dirstate.copied(src) or src
1091 1101 if dst == origsrc: # copying back a copy?
1092 1102 if repo.dirstate[dst] not in 'mn' and not dryrun:
1093 1103 repo.dirstate.normallookup(dst)
1094 1104 else:
1095 1105 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1096 1106 if not ui.quiet:
1097 1107 ui.warn(_("%s has not been committed yet, so no copy "
1098 1108 "data will be stored for %s.\n")
1099 1109 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1100 1110 if repo.dirstate[dst] in '?r' and not dryrun:
1101 1111 wctx.add([dst])
1102 1112 elif not dryrun:
1103 1113 wctx.copy(origsrc, dst)
1104 1114
1105 1115 def readrequires(opener, supported):
1106 1116 '''Reads and parses .hg/requires and checks if all entries found
1107 1117 are in the list of supported features.'''
1108 1118 requirements = set(opener.read("requires").splitlines())
1109 1119 missings = []
1110 1120 for r in requirements:
1111 1121 if r not in supported:
1112 1122 if not r or not r[0:1].isalnum():
1113 1123 raise error.RequirementError(_(".hg/requires file is corrupt"))
1114 1124 missings.append(r)
1115 1125 missings.sort()
1116 1126 if missings:
1117 1127 raise error.RequirementError(
1118 1128 _("repository requires features unknown to this Mercurial: %s")
1119 1129 % " ".join(missings),
1120 1130 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1121 1131 " for more information"))
1122 1132 return requirements
1123 1133
1124 1134 def writerequires(opener, requirements):
1125 1135 with opener('requires', 'w') as fp:
1126 1136 for r in sorted(requirements):
1127 1137 fp.write("%s\n" % r)
1128 1138
1129 1139 class filecachesubentry(object):
1130 1140 def __init__(self, path, stat):
1131 1141 self.path = path
1132 1142 self.cachestat = None
1133 1143 self._cacheable = None
1134 1144
1135 1145 if stat:
1136 1146 self.cachestat = filecachesubentry.stat(self.path)
1137 1147
1138 1148 if self.cachestat:
1139 1149 self._cacheable = self.cachestat.cacheable()
1140 1150 else:
1141 1151 # None means we don't know yet
1142 1152 self._cacheable = None
1143 1153
1144 1154 def refresh(self):
1145 1155 if self.cacheable():
1146 1156 self.cachestat = filecachesubentry.stat(self.path)
1147 1157
1148 1158 def cacheable(self):
1149 1159 if self._cacheable is not None:
1150 1160 return self._cacheable
1151 1161
1152 1162 # we don't know yet, assume it is for now
1153 1163 return True
1154 1164
1155 1165 def changed(self):
1156 1166 # no point in going further if we can't cache it
1157 1167 if not self.cacheable():
1158 1168 return True
1159 1169
1160 1170 newstat = filecachesubentry.stat(self.path)
1161 1171
1162 1172 # we may not know if it's cacheable yet, check again now
1163 1173 if newstat and self._cacheable is None:
1164 1174 self._cacheable = newstat.cacheable()
1165 1175
1166 1176 # check again
1167 1177 if not self._cacheable:
1168 1178 return True
1169 1179
1170 1180 if self.cachestat != newstat:
1171 1181 self.cachestat = newstat
1172 1182 return True
1173 1183 else:
1174 1184 return False
1175 1185
1176 1186 @staticmethod
1177 1187 def stat(path):
1178 1188 try:
1179 1189 return util.cachestat(path)
1180 1190 except OSError as e:
1181 1191 if e.errno != errno.ENOENT:
1182 1192 raise
1183 1193
1184 1194 class filecacheentry(object):
1185 1195 def __init__(self, paths, stat=True):
1186 1196 self._entries = []
1187 1197 for path in paths:
1188 1198 self._entries.append(filecachesubentry(path, stat))
1189 1199
1190 1200 def changed(self):
1191 1201 '''true if any entry has changed'''
1192 1202 for entry in self._entries:
1193 1203 if entry.changed():
1194 1204 return True
1195 1205 return False
1196 1206
1197 1207 def refresh(self):
1198 1208 for entry in self._entries:
1199 1209 entry.refresh()
1200 1210
1201 1211 class filecache(object):
1202 1212 """A property like decorator that tracks files under .hg/ for updates.
1203 1213
1204 1214 On first access, the files defined as arguments are stat()ed and the
1205 1215 results cached. The decorated function is called. The results are stashed
1206 1216 away in a ``_filecache`` dict on the object whose method is decorated.
1207 1217
1208 1218 On subsequent access, the cached result is returned.
1209 1219
1210 1220 On external property set operations, stat() calls are performed and the new
1211 1221 value is cached.
1212 1222
1213 1223 On property delete operations, cached data is removed.
1214 1224
1215 1225 When using the property API, cached data is always returned, if available:
1216 1226 no stat() is performed to check if the file has changed and if the function
1217 1227 needs to be called to reflect file changes.
1218 1228
1219 1229 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1220 1230 can populate an entry before the property's getter is called. In this case,
1221 1231 entries in ``_filecache`` will be used during property operations,
1222 1232 if available. If the underlying file changes, it is up to external callers
1223 1233 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1224 1234 method result as well as possibly calling ``del obj._filecache[attr]`` to
1225 1235 remove the ``filecacheentry``.
1226 1236 """
1227 1237
1228 1238 def __init__(self, *paths):
1229 1239 self.paths = paths
1230 1240
1231 1241 def join(self, obj, fname):
1232 1242 """Used to compute the runtime path of a cached file.
1233 1243
1234 1244 Users should subclass filecache and provide their own version of this
1235 1245 function to call the appropriate join function on 'obj' (an instance
1236 1246 of the class that its member function was decorated).
1237 1247 """
1238 1248 raise NotImplementedError
1239 1249
1240 1250 def __call__(self, func):
1241 1251 self.func = func
1242 1252 self.sname = func.__name__
1243 1253 self.name = pycompat.sysbytes(self.sname)
1244 1254 return self
1245 1255
1246 1256 def __get__(self, obj, type=None):
1247 1257 # if accessed on the class, return the descriptor itself.
1248 1258 if obj is None:
1249 1259 return self
1250 1260 # do we need to check if the file changed?
1251 1261 if self.sname in obj.__dict__:
1252 1262 assert self.name in obj._filecache, self.name
1253 1263 return obj.__dict__[self.sname]
1254 1264
1255 1265 entry = obj._filecache.get(self.name)
1256 1266
1257 1267 if entry:
1258 1268 if entry.changed():
1259 1269 entry.obj = self.func(obj)
1260 1270 else:
1261 1271 paths = [self.join(obj, path) for path in self.paths]
1262 1272
1263 1273 # We stat -before- creating the object so our cache doesn't lie if
1264 1274 # a writer modified between the time we read and stat
1265 1275 entry = filecacheentry(paths, True)
1266 1276 entry.obj = self.func(obj)
1267 1277
1268 1278 obj._filecache[self.name] = entry
1269 1279
1270 1280 obj.__dict__[self.sname] = entry.obj
1271 1281 return entry.obj
1272 1282
1273 1283 def __set__(self, obj, value):
1274 1284 if self.name not in obj._filecache:
1275 1285 # we add an entry for the missing value because X in __dict__
1276 1286 # implies X in _filecache
1277 1287 paths = [self.join(obj, path) for path in self.paths]
1278 1288 ce = filecacheentry(paths, False)
1279 1289 obj._filecache[self.name] = ce
1280 1290 else:
1281 1291 ce = obj._filecache[self.name]
1282 1292
1283 1293 ce.obj = value # update cached copy
1284 1294 obj.__dict__[self.sname] = value # update copy returned by obj.x
1285 1295
1286 1296 def __delete__(self, obj):
1287 1297 try:
1288 1298 del obj.__dict__[self.sname]
1289 1299 except KeyError:
1290 1300 raise AttributeError(self.sname)
1291 1301
1292 1302 def extdatasource(repo, source):
1293 1303 """Gather a map of rev -> value dict from the specified source
1294 1304
1295 1305 A source spec is treated as a URL, with a special case shell: type
1296 1306 for parsing the output from a shell command.
1297 1307
1298 1308 The data is parsed as a series of newline-separated records where
1299 1309 each record is a revision specifier optionally followed by a space
1300 1310 and a freeform string value. If the revision is known locally, it
1301 1311 is converted to a rev, otherwise the record is skipped.
1302 1312
1303 1313 Note that both key and value are treated as UTF-8 and converted to
1304 1314 the local encoding. This allows uniformity between local and
1305 1315 remote data sources.
1306 1316 """
1307 1317
1308 1318 spec = repo.ui.config("extdata", source)
1309 1319 if not spec:
1310 1320 raise error.Abort(_("unknown extdata source '%s'") % source)
1311 1321
1312 1322 data = {}
1313 1323 src = proc = None
1314 1324 try:
1315 1325 if spec.startswith("shell:"):
1316 1326 # external commands should be run relative to the repo root
1317 1327 cmd = spec[6:]
1318 1328 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1319 1329 close_fds=procutil.closefds,
1320 1330 stdout=subprocess.PIPE, cwd=repo.root)
1321 1331 src = proc.stdout
1322 1332 else:
1323 1333 # treat as a URL or file
1324 1334 src = url.open(repo.ui, spec)
1325 1335 for l in src:
1326 1336 if " " in l:
1327 1337 k, v = l.strip().split(" ", 1)
1328 1338 else:
1329 1339 k, v = l.strip(), ""
1330 1340
1331 1341 k = encoding.tolocal(k)
1332 1342 try:
1333 1343 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1334 1344 except (error.LookupError, error.RepoLookupError):
1335 1345 pass # we ignore data for nodes that don't exist locally
1336 1346 finally:
1337 1347 if proc:
1338 1348 proc.communicate()
1339 1349 if src:
1340 1350 src.close()
1341 1351 if proc and proc.returncode != 0:
1342 1352 raise error.Abort(_("extdata command '%s' failed: %s")
1343 1353 % (cmd, procutil.explainexit(proc.returncode)))
1344 1354
1345 1355 return data
1346 1356
1347 1357 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1348 1358 if lock is None:
1349 1359 raise error.LockInheritanceContractViolation(
1350 1360 'lock can only be inherited while held')
1351 1361 if environ is None:
1352 1362 environ = {}
1353 1363 with lock.inherit() as locker:
1354 1364 environ[envvar] = locker
1355 1365 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1356 1366
1357 1367 def wlocksub(repo, cmd, *args, **kwargs):
1358 1368 """run cmd as a subprocess that allows inheriting repo's wlock
1359 1369
1360 1370 This can only be called while the wlock is held. This takes all the
1361 1371 arguments that ui.system does, and returns the exit code of the
1362 1372 subprocess."""
1363 1373 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1364 1374 **kwargs)
1365 1375
1366 1376 class progress(object):
1367 1377 def __init__(self, ui, topic, unit="", total=None):
1368 1378 self.ui = ui
1369 1379 self.pos = 0
1370 1380 self.topic = topic
1371 1381 self.unit = unit
1372 1382 self.total = total
1373 1383
1374 1384 def __enter__(self):
1375 1385 return self
1376 1386
1377 1387 def __exit__(self, exc_type, exc_value, exc_tb):
1378 1388 self.complete()
1379 1389
1380 1390 def update(self, pos, item="", total=None):
1381 1391 assert pos is not None
1382 1392 if total:
1383 1393 self.total = total
1384 1394 self.pos = pos
1385 1395 self._print(item)
1386 1396
1387 1397 def increment(self, step=1, item="", total=None):
1388 1398 self.update(self.pos + step, item, total)
1389 1399
1390 1400 def complete(self):
1391 1401 self.ui.progress(self.topic, None)
1392 1402
1393 1403 def _print(self, item):
1394 1404 self.ui.progress(self.topic, self.pos, item, self.unit,
1395 1405 self.total)
1396 1406
1397 1407 def gdinitconfig(ui):
1398 1408 """helper function to know if a repo should be created as general delta
1399 1409 """
1400 1410 # experimental config: format.generaldelta
1401 1411 return (ui.configbool('format', 'generaldelta')
1402 1412 or ui.configbool('format', 'usegeneraldelta')
1403 1413 or ui.configbool('format', 'sparse-revlog'))
1404 1414
1405 1415 def gddeltaconfig(ui):
1406 1416 """helper function to know if incoming delta should be optimised
1407 1417 """
1408 1418 # experimental config: format.generaldelta
1409 1419 return ui.configbool('format', 'generaldelta')
1410 1420
1411 1421 class simplekeyvaluefile(object):
1412 1422 """A simple file with key=value lines
1413 1423
1414 1424 Keys must be alphanumerics and start with a letter, values must not
1415 1425 contain '\n' characters"""
1416 1426 firstlinekey = '__firstline'
1417 1427
1418 1428 def __init__(self, vfs, path, keys=None):
1419 1429 self.vfs = vfs
1420 1430 self.path = path
1421 1431
1422 1432 def read(self, firstlinenonkeyval=False):
1423 1433 """Read the contents of a simple key-value file
1424 1434
1425 1435 'firstlinenonkeyval' indicates whether the first line of file should
1426 1436 be treated as a key-value pair or reuturned fully under the
1427 1437 __firstline key."""
1428 1438 lines = self.vfs.readlines(self.path)
1429 1439 d = {}
1430 1440 if firstlinenonkeyval:
1431 1441 if not lines:
1432 1442 e = _("empty simplekeyvalue file")
1433 1443 raise error.CorruptedState(e)
1434 1444 # we don't want to include '\n' in the __firstline
1435 1445 d[self.firstlinekey] = lines[0][:-1]
1436 1446 del lines[0]
1437 1447
1438 1448 try:
1439 1449 # the 'if line.strip()' part prevents us from failing on empty
1440 1450 # lines which only contain '\n' therefore are not skipped
1441 1451 # by 'if line'
1442 1452 updatedict = dict(line[:-1].split('=', 1) for line in lines
1443 1453 if line.strip())
1444 1454 if self.firstlinekey in updatedict:
1445 1455 e = _("%r can't be used as a key")
1446 1456 raise error.CorruptedState(e % self.firstlinekey)
1447 1457 d.update(updatedict)
1448 1458 except ValueError as e:
1449 1459 raise error.CorruptedState(str(e))
1450 1460 return d
1451 1461
1452 1462 def write(self, data, firstline=None):
1453 1463 """Write key=>value mapping to a file
1454 1464 data is a dict. Keys must be alphanumerical and start with a letter.
1455 1465 Values must not contain newline characters.
1456 1466
1457 1467 If 'firstline' is not None, it is written to file before
1458 1468 everything else, as it is, not in a key=value form"""
1459 1469 lines = []
1460 1470 if firstline is not None:
1461 1471 lines.append('%s\n' % firstline)
1462 1472
1463 1473 for k, v in data.items():
1464 1474 if k == self.firstlinekey:
1465 1475 e = "key name '%s' is reserved" % self.firstlinekey
1466 1476 raise error.ProgrammingError(e)
1467 1477 if not k[0:1].isalpha():
1468 1478 e = "keys must start with a letter in a key-value file"
1469 1479 raise error.ProgrammingError(e)
1470 1480 if not k.isalnum():
1471 1481 e = "invalid key name in a simple key-value file"
1472 1482 raise error.ProgrammingError(e)
1473 1483 if '\n' in v:
1474 1484 e = "invalid value in a simple key-value file"
1475 1485 raise error.ProgrammingError(e)
1476 1486 lines.append("%s=%s\n" % (k, v))
1477 1487 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1478 1488 fp.write(''.join(lines))
1479 1489
1480 1490 _reportobsoletedsource = [
1481 1491 'debugobsolete',
1482 1492 'pull',
1483 1493 'push',
1484 1494 'serve',
1485 1495 'unbundle',
1486 1496 ]
1487 1497
1488 1498 _reportnewcssource = [
1489 1499 'pull',
1490 1500 'unbundle',
1491 1501 ]
1492 1502
1493 1503 def prefetchfiles(repo, revs, match):
1494 1504 """Invokes the registered file prefetch functions, allowing extensions to
1495 1505 ensure the corresponding files are available locally, before the command
1496 1506 uses them."""
1497 1507 if match:
1498 1508 # The command itself will complain about files that don't exist, so
1499 1509 # don't duplicate the message.
1500 1510 match = matchmod.badmatch(match, lambda fn, msg: None)
1501 1511 else:
1502 1512 match = matchall(repo)
1503 1513
1504 1514 fileprefetchhooks(repo, revs, match)
1505 1515
1506 1516 # a list of (repo, revs, match) prefetch functions
1507 1517 fileprefetchhooks = util.hooks()
1508 1518
1509 1519 # A marker that tells the evolve extension to suppress its own reporting
1510 1520 _reportstroubledchangesets = True
1511 1521
1512 1522 def registersummarycallback(repo, otr, txnname=''):
1513 1523 """register a callback to issue a summary after the transaction is closed
1514 1524 """
1515 1525 def txmatch(sources):
1516 1526 return any(txnname.startswith(source) for source in sources)
1517 1527
1518 1528 categories = []
1519 1529
1520 1530 def reportsummary(func):
1521 1531 """decorator for report callbacks."""
1522 1532 # The repoview life cycle is shorter than the one of the actual
1523 1533 # underlying repository. So the filtered object can die before the
1524 1534 # weakref is used leading to troubles. We keep a reference to the
1525 1535 # unfiltered object and restore the filtering when retrieving the
1526 1536 # repository through the weakref.
1527 1537 filtername = repo.filtername
1528 1538 reporef = weakref.ref(repo.unfiltered())
1529 1539 def wrapped(tr):
1530 1540 repo = reporef()
1531 1541 if filtername:
1532 1542 repo = repo.filtered(filtername)
1533 1543 func(repo, tr)
1534 1544 newcat = '%02i-txnreport' % len(categories)
1535 1545 otr.addpostclose(newcat, wrapped)
1536 1546 categories.append(newcat)
1537 1547 return wrapped
1538 1548
1539 1549 if txmatch(_reportobsoletedsource):
1540 1550 @reportsummary
1541 1551 def reportobsoleted(repo, tr):
1542 1552 obsoleted = obsutil.getobsoleted(repo, tr)
1543 1553 if obsoleted:
1544 1554 repo.ui.status(_('obsoleted %i changesets\n')
1545 1555 % len(obsoleted))
1546 1556
1547 1557 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1548 1558 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1549 1559 instabilitytypes = [
1550 1560 ('orphan', 'orphan'),
1551 1561 ('phase-divergent', 'phasedivergent'),
1552 1562 ('content-divergent', 'contentdivergent'),
1553 1563 ]
1554 1564
1555 1565 def getinstabilitycounts(repo):
1556 1566 filtered = repo.changelog.filteredrevs
1557 1567 counts = {}
1558 1568 for instability, revset in instabilitytypes:
1559 1569 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1560 1570 filtered)
1561 1571 return counts
1562 1572
1563 1573 oldinstabilitycounts = getinstabilitycounts(repo)
1564 1574 @reportsummary
1565 1575 def reportnewinstabilities(repo, tr):
1566 1576 newinstabilitycounts = getinstabilitycounts(repo)
1567 1577 for instability, revset in instabilitytypes:
1568 1578 delta = (newinstabilitycounts[instability] -
1569 1579 oldinstabilitycounts[instability])
1570 1580 msg = getinstabilitymessage(delta, instability)
1571 1581 if msg:
1572 1582 repo.ui.warn(msg)
1573 1583
1574 1584 if txmatch(_reportnewcssource):
1575 1585 @reportsummary
1576 1586 def reportnewcs(repo, tr):
1577 1587 """Report the range of new revisions pulled/unbundled."""
1578 1588 newrevs = tr.changes.get('revs', pycompat.xrange(0, 0))
1579 1589 if not newrevs:
1580 1590 return
1581 1591
1582 1592 # Compute the bounds of new revisions' range, excluding obsoletes.
1583 1593 unfi = repo.unfiltered()
1584 1594 revs = unfi.revs('%ld and not obsolete()', newrevs)
1585 1595 if not revs:
1586 1596 # Got only obsoletes.
1587 1597 return
1588 1598 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1589 1599
1590 1600 if minrev == maxrev:
1591 1601 revrange = minrev
1592 1602 else:
1593 1603 revrange = '%s:%s' % (minrev, maxrev)
1594 1604 repo.ui.status(_('new changesets %s\n') % revrange)
1595 1605
1596 1606 @reportsummary
1597 1607 def reportphasechanges(repo, tr):
1598 1608 """Report statistics of phase changes for changesets pre-existing
1599 1609 pull/unbundle.
1600 1610 """
1601 1611 newrevs = tr.changes.get('revs', pycompat.xrange(0, 0))
1602 1612 phasetracking = tr.changes.get('phases', {})
1603 1613 if not phasetracking:
1604 1614 return
1605 1615 published = [
1606 1616 rev for rev, (old, new) in phasetracking.iteritems()
1607 1617 if new == phases.public and rev not in newrevs
1608 1618 ]
1609 1619 if not published:
1610 1620 return
1611 1621 repo.ui.status(_('%d local changesets published\n')
1612 1622 % len(published))
1613 1623
1614 1624 def getinstabilitymessage(delta, instability):
1615 1625 """function to return the message to show warning about new instabilities
1616 1626
1617 1627 exists as a separate function so that extension can wrap to show more
1618 1628 information like how to fix instabilities"""
1619 1629 if delta > 0:
1620 1630 return _('%i new %s changesets\n') % (delta, instability)
1621 1631
1622 1632 def nodesummaries(repo, nodes, maxnumnodes=4):
1623 1633 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1624 1634 return ' '.join(short(h) for h in nodes)
1625 1635 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1626 1636 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1627 1637
1628 1638 def enforcesinglehead(repo, tr, desc):
1629 1639 """check that no named branch has multiple heads"""
1630 1640 if desc in ('strip', 'repair'):
1631 1641 # skip the logic during strip
1632 1642 return
1633 1643 visible = repo.filtered('visible')
1634 1644 # possible improvement: we could restrict the check to affected branch
1635 1645 for name, heads in visible.branchmap().iteritems():
1636 1646 if len(heads) > 1:
1637 1647 msg = _('rejecting multiple heads on branch "%s"')
1638 1648 msg %= name
1639 1649 hint = _('%d heads: %s')
1640 1650 hint %= (len(heads), nodesummaries(repo, heads))
1641 1651 raise error.Abort(msg, hint=hint)
1642 1652
1643 1653 def wrapconvertsink(sink):
1644 1654 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1645 1655 before it is used, whether or not the convert extension was formally loaded.
1646 1656 """
1647 1657 return sink
1648 1658
1649 1659 def unhidehashlikerevs(repo, specs, hiddentype):
1650 1660 """parse the user specs and unhide changesets whose hash or revision number
1651 1661 is passed.
1652 1662
1653 1663 hiddentype can be: 1) 'warn': warn while unhiding changesets
1654 1664 2) 'nowarn': don't warn while unhiding changesets
1655 1665
1656 1666 returns a repo object with the required changesets unhidden
1657 1667 """
1658 1668 if not repo.filtername or not repo.ui.configbool('experimental',
1659 1669 'directaccess'):
1660 1670 return repo
1661 1671
1662 1672 if repo.filtername not in ('visible', 'visible-hidden'):
1663 1673 return repo
1664 1674
1665 1675 symbols = set()
1666 1676 for spec in specs:
1667 1677 try:
1668 1678 tree = revsetlang.parse(spec)
1669 1679 except error.ParseError: # will be reported by scmutil.revrange()
1670 1680 continue
1671 1681
1672 1682 symbols.update(revsetlang.gethashlikesymbols(tree))
1673 1683
1674 1684 if not symbols:
1675 1685 return repo
1676 1686
1677 1687 revs = _getrevsfromsymbols(repo, symbols)
1678 1688
1679 1689 if not revs:
1680 1690 return repo
1681 1691
1682 1692 if hiddentype == 'warn':
1683 1693 unfi = repo.unfiltered()
1684 1694 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1685 1695 repo.ui.warn(_("warning: accessing hidden changesets for write "
1686 1696 "operation: %s\n") % revstr)
1687 1697
1688 1698 # we have to use new filtername to separate branch/tags cache until we can
1689 1699 # disbale these cache when revisions are dynamically pinned.
1690 1700 return repo.filtered('visible-hidden', revs)
1691 1701
1692 1702 def _getrevsfromsymbols(repo, symbols):
1693 1703 """parse the list of symbols and returns a set of revision numbers of hidden
1694 1704 changesets present in symbols"""
1695 1705 revs = set()
1696 1706 unfi = repo.unfiltered()
1697 1707 unficl = unfi.changelog
1698 1708 cl = repo.changelog
1699 1709 tiprev = len(unficl)
1700 1710 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1701 1711 for s in symbols:
1702 1712 try:
1703 1713 n = int(s)
1704 1714 if n <= tiprev:
1705 1715 if not allowrevnums:
1706 1716 continue
1707 1717 else:
1708 1718 if n not in cl:
1709 1719 revs.add(n)
1710 1720 continue
1711 1721 except ValueError:
1712 1722 pass
1713 1723
1714 1724 try:
1715 1725 s = resolvehexnodeidprefix(unfi, s)
1716 1726 except (error.LookupError, error.WdirUnsupported):
1717 1727 s = None
1718 1728
1719 1729 if s is not None:
1720 1730 rev = unficl.rev(s)
1721 1731 if rev not in cl:
1722 1732 revs.add(rev)
1723 1733
1724 1734 return revs
1725 1735
1726 1736 def bookmarkrevs(repo, mark):
1727 1737 """
1728 1738 Select revisions reachable by a given bookmark
1729 1739 """
1730 1740 return repo.revs("ancestors(bookmark(%s)) - "
1731 1741 "ancestors(head() and not bookmark(%s)) - "
1732 1742 "ancestors(bookmark() and not bookmark(%s))",
1733 1743 mark, mark, mark)
@@ -1,717 +1,718 b''
1 1 # templatefuncs.py - common template functions
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 bin,
15 15 wdirid,
16 16 )
17 17 from . import (
18 18 color,
19 19 encoding,
20 20 error,
21 21 minirst,
22 22 obsutil,
23 23 registrar,
24 24 revset as revsetmod,
25 25 revsetlang,
26 26 scmutil,
27 27 templatefilters,
28 28 templatekw,
29 29 templateutil,
30 30 util,
31 31 )
32 32 from .utils import (
33 33 dateutil,
34 34 stringutil,
35 35 )
36 36
37 37 evalrawexp = templateutil.evalrawexp
38 38 evalwrapped = templateutil.evalwrapped
39 39 evalfuncarg = templateutil.evalfuncarg
40 40 evalboolean = templateutil.evalboolean
41 41 evaldate = templateutil.evaldate
42 42 evalinteger = templateutil.evalinteger
43 43 evalstring = templateutil.evalstring
44 44 evalstringliteral = templateutil.evalstringliteral
45 45
46 46 # dict of template built-in functions
47 47 funcs = {}
48 48 templatefunc = registrar.templatefunc(funcs)
49 49
50 50 @templatefunc('date(date[, fmt])')
51 51 def date(context, mapping, args):
52 52 """Format a date. See :hg:`help dates` for formatting
53 53 strings. The default is a Unix date format, including the timezone:
54 54 "Mon Sep 04 15:13:13 2006 0700"."""
55 55 if not (1 <= len(args) <= 2):
56 56 # i18n: "date" is a keyword
57 57 raise error.ParseError(_("date expects one or two arguments"))
58 58
59 59 date = evaldate(context, mapping, args[0],
60 60 # i18n: "date" is a keyword
61 61 _("date expects a date information"))
62 62 fmt = None
63 63 if len(args) == 2:
64 64 fmt = evalstring(context, mapping, args[1])
65 65 if fmt is None:
66 66 return dateutil.datestr(date)
67 67 else:
68 68 return dateutil.datestr(date, fmt)
69 69
70 70 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
71 71 def dict_(context, mapping, args):
72 72 """Construct a dict from key-value pairs. A key may be omitted if
73 73 a value expression can provide an unambiguous name."""
74 74 data = util.sortdict()
75 75
76 76 for v in args['args']:
77 77 k = templateutil.findsymbolicname(v)
78 78 if not k:
79 79 raise error.ParseError(_('dict key cannot be inferred'))
80 80 if k in data or k in args['kwargs']:
81 81 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
82 82 data[k] = evalfuncarg(context, mapping, v)
83 83
84 84 data.update((k, evalfuncarg(context, mapping, v))
85 85 for k, v in args['kwargs'].iteritems())
86 86 return templateutil.hybriddict(data)
87 87
88 88 @templatefunc('diff([includepattern [, excludepattern]])', requires={'ctx'})
89 89 def diff(context, mapping, args):
90 90 """Show a diff, optionally
91 91 specifying files to include or exclude."""
92 92 if len(args) > 2:
93 93 # i18n: "diff" is a keyword
94 94 raise error.ParseError(_("diff expects zero, one, or two arguments"))
95 95
96 96 def getpatterns(i):
97 97 if i < len(args):
98 98 s = evalstring(context, mapping, args[i]).strip()
99 99 if s:
100 100 return [s]
101 101 return []
102 102
103 103 ctx = context.resource(mapping, 'ctx')
104 104 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
105 105
106 106 return ''.join(chunks)
107 107
108 108 @templatefunc('extdata(source)', argspec='source', requires={'ctx', 'cache'})
109 109 def extdata(context, mapping, args):
110 110 """Show a text read from the specified extdata source. (EXPERIMENTAL)"""
111 111 if 'source' not in args:
112 112 # i18n: "extdata" is a keyword
113 113 raise error.ParseError(_('extdata expects one argument'))
114 114
115 115 source = evalstring(context, mapping, args['source'])
116 116 if not source:
117 117 sym = templateutil.findsymbolicname(args['source'])
118 118 if sym:
119 119 raise error.ParseError(_('empty data source specified'),
120 120 hint=_("did you mean extdata('%s')?") % sym)
121 121 else:
122 122 raise error.ParseError(_('empty data source specified'))
123 123 cache = context.resource(mapping, 'cache').setdefault('extdata', {})
124 124 ctx = context.resource(mapping, 'ctx')
125 125 if source in cache:
126 126 data = cache[source]
127 127 else:
128 128 data = cache[source] = scmutil.extdatasource(ctx.repo(), source)
129 129 return data.get(ctx.rev(), '')
130 130
131 131 @templatefunc('files(pattern)', requires={'ctx'})
132 132 def files(context, mapping, args):
133 133 """All files of the current changeset matching the pattern. See
134 134 :hg:`help patterns`."""
135 135 if not len(args) == 1:
136 136 # i18n: "files" is a keyword
137 137 raise error.ParseError(_("files expects one argument"))
138 138
139 139 raw = evalstring(context, mapping, args[0])
140 140 ctx = context.resource(mapping, 'ctx')
141 141 m = ctx.match([raw])
142 142 files = list(ctx.matches(m))
143 143 return templateutil.compatlist(context, mapping, "file", files)
144 144
145 145 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
146 146 def fill(context, mapping, args):
147 147 """Fill many
148 148 paragraphs with optional indentation. See the "fill" filter."""
149 149 if not (1 <= len(args) <= 4):
150 150 # i18n: "fill" is a keyword
151 151 raise error.ParseError(_("fill expects one to four arguments"))
152 152
153 153 text = evalstring(context, mapping, args[0])
154 154 width = 76
155 155 initindent = ''
156 156 hangindent = ''
157 157 if 2 <= len(args) <= 4:
158 158 width = evalinteger(context, mapping, args[1],
159 159 # i18n: "fill" is a keyword
160 160 _("fill expects an integer width"))
161 161 try:
162 162 initindent = evalstring(context, mapping, args[2])
163 163 hangindent = evalstring(context, mapping, args[3])
164 164 except IndexError:
165 165 pass
166 166
167 167 return templatefilters.fill(text, width, initindent, hangindent)
168 168
169 169 @templatefunc('filter(iterable[, expr])')
170 170 def filter_(context, mapping, args):
171 171 """Remove empty elements from a list or a dict. If expr specified, it's
172 172 applied to each element to test emptiness."""
173 173 if not (1 <= len(args) <= 2):
174 174 # i18n: "filter" is a keyword
175 175 raise error.ParseError(_("filter expects one or two arguments"))
176 176 iterable = evalwrapped(context, mapping, args[0])
177 177 if len(args) == 1:
178 178 def select(w):
179 179 return w.tobool(context, mapping)
180 180 else:
181 181 def select(w):
182 182 if not isinstance(w, templateutil.mappable):
183 183 raise error.ParseError(_("not filterable by expression"))
184 184 lm = context.overlaymap(mapping, w.tomap(context))
185 185 return evalboolean(context, lm, args[1])
186 186 return iterable.filter(context, mapping, select)
187 187
188 188 @templatefunc('formatnode(node)', requires={'ui'})
189 189 def formatnode(context, mapping, args):
190 190 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
191 191 if len(args) != 1:
192 192 # i18n: "formatnode" is a keyword
193 193 raise error.ParseError(_("formatnode expects one argument"))
194 194
195 195 ui = context.resource(mapping, 'ui')
196 196 node = evalstring(context, mapping, args[0])
197 197 if ui.debugflag:
198 198 return node
199 199 return templatefilters.short(node)
200 200
201 201 @templatefunc('mailmap(author)', requires={'repo', 'cache'})
202 202 def mailmap(context, mapping, args):
203 203 """Return the author, updated according to the value
204 204 set in the .mailmap file"""
205 205 if len(args) != 1:
206 206 raise error.ParseError(_("mailmap expects one argument"))
207 207
208 208 author = evalstring(context, mapping, args[0])
209 209
210 210 cache = context.resource(mapping, 'cache')
211 211 repo = context.resource(mapping, 'repo')
212 212
213 213 if 'mailmap' not in cache:
214 214 data = repo.wvfs.tryread('.mailmap')
215 215 cache['mailmap'] = stringutil.parsemailmap(data)
216 216
217 217 return stringutil.mapname(cache['mailmap'], author)
218 218
219 219 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
220 220 argspec='text width fillchar left')
221 221 def pad(context, mapping, args):
222 222 """Pad text with a
223 223 fill character."""
224 224 if 'text' not in args or 'width' not in args:
225 225 # i18n: "pad" is a keyword
226 226 raise error.ParseError(_("pad() expects two to four arguments"))
227 227
228 228 width = evalinteger(context, mapping, args['width'],
229 229 # i18n: "pad" is a keyword
230 230 _("pad() expects an integer width"))
231 231
232 232 text = evalstring(context, mapping, args['text'])
233 233
234 234 left = False
235 235 fillchar = ' '
236 236 if 'fillchar' in args:
237 237 fillchar = evalstring(context, mapping, args['fillchar'])
238 238 if len(color.stripeffects(fillchar)) != 1:
239 239 # i18n: "pad" is a keyword
240 240 raise error.ParseError(_("pad() expects a single fill character"))
241 241 if 'left' in args:
242 242 left = evalboolean(context, mapping, args['left'])
243 243
244 244 fillwidth = width - encoding.colwidth(color.stripeffects(text))
245 245 if fillwidth <= 0:
246 246 return text
247 247 if left:
248 248 return fillchar * fillwidth + text
249 249 else:
250 250 return text + fillchar * fillwidth
251 251
252 252 @templatefunc('indent(text, indentchars[, firstline])')
253 253 def indent(context, mapping, args):
254 254 """Indents all non-empty lines
255 255 with the characters given in the indentchars string. An optional
256 256 third parameter will override the indent for the first line only
257 257 if present."""
258 258 if not (2 <= len(args) <= 3):
259 259 # i18n: "indent" is a keyword
260 260 raise error.ParseError(_("indent() expects two or three arguments"))
261 261
262 262 text = evalstring(context, mapping, args[0])
263 263 indent = evalstring(context, mapping, args[1])
264 264
265 265 if len(args) == 3:
266 266 firstline = evalstring(context, mapping, args[2])
267 267 else:
268 268 firstline = indent
269 269
270 270 # the indent function doesn't indent the first line, so we do it here
271 271 return templatefilters.indent(firstline + text, indent)
272 272
273 273 @templatefunc('get(dict, key)')
274 274 def get(context, mapping, args):
275 275 """Get an attribute/key from an object. Some keywords
276 276 are complex types. This function allows you to obtain the value of an
277 277 attribute on these types."""
278 278 if len(args) != 2:
279 279 # i18n: "get" is a keyword
280 280 raise error.ParseError(_("get() expects two arguments"))
281 281
282 282 dictarg = evalwrapped(context, mapping, args[0])
283 283 key = evalrawexp(context, mapping, args[1])
284 284 try:
285 285 return dictarg.getmember(context, mapping, key)
286 286 except error.ParseError as err:
287 287 # i18n: "get" is a keyword
288 288 hint = _("get() expects a dict as first argument")
289 289 raise error.ParseError(bytes(err), hint=hint)
290 290
291 291 @templatefunc('if(expr, then[, else])')
292 292 def if_(context, mapping, args):
293 293 """Conditionally execute based on the result of
294 294 an expression."""
295 295 if not (2 <= len(args) <= 3):
296 296 # i18n: "if" is a keyword
297 297 raise error.ParseError(_("if expects two or three arguments"))
298 298
299 299 test = evalboolean(context, mapping, args[0])
300 300 if test:
301 301 return evalrawexp(context, mapping, args[1])
302 302 elif len(args) == 3:
303 303 return evalrawexp(context, mapping, args[2])
304 304
305 305 @templatefunc('ifcontains(needle, haystack, then[, else])')
306 306 def ifcontains(context, mapping, args):
307 307 """Conditionally execute based
308 308 on whether the item "needle" is in "haystack"."""
309 309 if not (3 <= len(args) <= 4):
310 310 # i18n: "ifcontains" is a keyword
311 311 raise error.ParseError(_("ifcontains expects three or four arguments"))
312 312
313 313 haystack = evalwrapped(context, mapping, args[1])
314 314 try:
315 315 needle = evalrawexp(context, mapping, args[0])
316 316 found = haystack.contains(context, mapping, needle)
317 317 except error.ParseError:
318 318 found = False
319 319
320 320 if found:
321 321 return evalrawexp(context, mapping, args[2])
322 322 elif len(args) == 4:
323 323 return evalrawexp(context, mapping, args[3])
324 324
325 325 @templatefunc('ifeq(expr1, expr2, then[, else])')
326 326 def ifeq(context, mapping, args):
327 327 """Conditionally execute based on
328 328 whether 2 items are equivalent."""
329 329 if not (3 <= len(args) <= 4):
330 330 # i18n: "ifeq" is a keyword
331 331 raise error.ParseError(_("ifeq expects three or four arguments"))
332 332
333 333 test = evalstring(context, mapping, args[0])
334 334 match = evalstring(context, mapping, args[1])
335 335 if test == match:
336 336 return evalrawexp(context, mapping, args[2])
337 337 elif len(args) == 4:
338 338 return evalrawexp(context, mapping, args[3])
339 339
340 340 @templatefunc('join(list, sep)')
341 341 def join(context, mapping, args):
342 342 """Join items in a list with a delimiter."""
343 343 if not (1 <= len(args) <= 2):
344 344 # i18n: "join" is a keyword
345 345 raise error.ParseError(_("join expects one or two arguments"))
346 346
347 347 joinset = evalwrapped(context, mapping, args[0])
348 348 joiner = " "
349 349 if len(args) > 1:
350 350 joiner = evalstring(context, mapping, args[1])
351 351 return joinset.join(context, mapping, joiner)
352 352
353 353 @templatefunc('label(label, expr)', requires={'ui'})
354 354 def label(context, mapping, args):
355 355 """Apply a label to generated content. Content with
356 356 a label applied can result in additional post-processing, such as
357 357 automatic colorization."""
358 358 if len(args) != 2:
359 359 # i18n: "label" is a keyword
360 360 raise error.ParseError(_("label expects two arguments"))
361 361
362 362 ui = context.resource(mapping, 'ui')
363 363 thing = evalstring(context, mapping, args[1])
364 364 # preserve unknown symbol as literal so effects like 'red', 'bold',
365 365 # etc. don't need to be quoted
366 366 label = evalstringliteral(context, mapping, args[0])
367 367
368 368 return ui.label(thing, label)
369 369
370 370 @templatefunc('latesttag([pattern])')
371 371 def latesttag(context, mapping, args):
372 372 """The global tags matching the given pattern on the
373 373 most recent globally tagged ancestor of this changeset.
374 374 If no such tags exist, the "{tag}" template resolves to
375 375 the string "null". See :hg:`help revisions.patterns` for the pattern
376 376 syntax.
377 377 """
378 378 if len(args) > 1:
379 379 # i18n: "latesttag" is a keyword
380 380 raise error.ParseError(_("latesttag expects at most one argument"))
381 381
382 382 pattern = None
383 383 if len(args) == 1:
384 384 pattern = evalstring(context, mapping, args[0])
385 385 return templatekw.showlatesttags(context, mapping, pattern)
386 386
387 387 @templatefunc('localdate(date[, tz])')
388 388 def localdate(context, mapping, args):
389 389 """Converts a date to the specified timezone.
390 390 The default is local date."""
391 391 if not (1 <= len(args) <= 2):
392 392 # i18n: "localdate" is a keyword
393 393 raise error.ParseError(_("localdate expects one or two arguments"))
394 394
395 395 date = evaldate(context, mapping, args[0],
396 396 # i18n: "localdate" is a keyword
397 397 _("localdate expects a date information"))
398 398 if len(args) >= 2:
399 399 tzoffset = None
400 400 tz = evalfuncarg(context, mapping, args[1])
401 401 if isinstance(tz, bytes):
402 402 tzoffset, remainder = dateutil.parsetimezone(tz)
403 403 if remainder:
404 404 tzoffset = None
405 405 if tzoffset is None:
406 406 try:
407 407 tzoffset = int(tz)
408 408 except (TypeError, ValueError):
409 409 # i18n: "localdate" is a keyword
410 410 raise error.ParseError(_("localdate expects a timezone"))
411 411 else:
412 412 tzoffset = dateutil.makedate()[1]
413 413 return templateutil.date((date[0], tzoffset))
414 414
415 415 @templatefunc('max(iterable)')
416 416 def max_(context, mapping, args, **kwargs):
417 417 """Return the max of an iterable"""
418 418 if len(args) != 1:
419 419 # i18n: "max" is a keyword
420 420 raise error.ParseError(_("max expects one argument"))
421 421
422 422 iterable = evalwrapped(context, mapping, args[0])
423 423 try:
424 424 return iterable.getmax(context, mapping)
425 425 except error.ParseError as err:
426 426 # i18n: "max" is a keyword
427 427 hint = _("max first argument should be an iterable")
428 428 raise error.ParseError(bytes(err), hint=hint)
429 429
430 430 @templatefunc('min(iterable)')
431 431 def min_(context, mapping, args, **kwargs):
432 432 """Return the min of an iterable"""
433 433 if len(args) != 1:
434 434 # i18n: "min" is a keyword
435 435 raise error.ParseError(_("min expects one argument"))
436 436
437 437 iterable = evalwrapped(context, mapping, args[0])
438 438 try:
439 439 return iterable.getmin(context, mapping)
440 440 except error.ParseError as err:
441 441 # i18n: "min" is a keyword
442 442 hint = _("min first argument should be an iterable")
443 443 raise error.ParseError(bytes(err), hint=hint)
444 444
445 445 @templatefunc('mod(a, b)')
446 446 def mod(context, mapping, args):
447 447 """Calculate a mod b such that a / b + a mod b == a"""
448 448 if not len(args) == 2:
449 449 # i18n: "mod" is a keyword
450 450 raise error.ParseError(_("mod expects two arguments"))
451 451
452 452 func = lambda a, b: a % b
453 453 return templateutil.runarithmetic(context, mapping,
454 454 (func, args[0], args[1]))
455 455
456 456 @templatefunc('obsfateoperations(markers)')
457 457 def obsfateoperations(context, mapping, args):
458 458 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
459 459 if len(args) != 1:
460 460 # i18n: "obsfateoperations" is a keyword
461 461 raise error.ParseError(_("obsfateoperations expects one argument"))
462 462
463 463 markers = evalfuncarg(context, mapping, args[0])
464 464
465 465 try:
466 466 data = obsutil.markersoperations(markers)
467 467 return templateutil.hybridlist(data, name='operation')
468 468 except (TypeError, KeyError):
469 469 # i18n: "obsfateoperations" is a keyword
470 470 errmsg = _("obsfateoperations first argument should be an iterable")
471 471 raise error.ParseError(errmsg)
472 472
473 473 @templatefunc('obsfatedate(markers)')
474 474 def obsfatedate(context, mapping, args):
475 475 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
476 476 if len(args) != 1:
477 477 # i18n: "obsfatedate" is a keyword
478 478 raise error.ParseError(_("obsfatedate expects one argument"))
479 479
480 480 markers = evalfuncarg(context, mapping, args[0])
481 481
482 482 try:
483 483 # TODO: maybe this has to be a wrapped list of date wrappers?
484 484 data = obsutil.markersdates(markers)
485 485 return templateutil.hybridlist(data, name='date', fmt='%d %d')
486 486 except (TypeError, KeyError):
487 487 # i18n: "obsfatedate" is a keyword
488 488 errmsg = _("obsfatedate first argument should be an iterable")
489 489 raise error.ParseError(errmsg)
490 490
491 491 @templatefunc('obsfateusers(markers)')
492 492 def obsfateusers(context, mapping, args):
493 493 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
494 494 if len(args) != 1:
495 495 # i18n: "obsfateusers" is a keyword
496 496 raise error.ParseError(_("obsfateusers expects one argument"))
497 497
498 498 markers = evalfuncarg(context, mapping, args[0])
499 499
500 500 try:
501 501 data = obsutil.markersusers(markers)
502 502 return templateutil.hybridlist(data, name='user')
503 503 except (TypeError, KeyError, ValueError):
504 504 # i18n: "obsfateusers" is a keyword
505 505 msg = _("obsfateusers first argument should be an iterable of "
506 506 "obsmakers")
507 507 raise error.ParseError(msg)
508 508
509 509 @templatefunc('obsfateverb(successors, markers)')
510 510 def obsfateverb(context, mapping, args):
511 511 """Compute obsfate related information based on successors (EXPERIMENTAL)"""
512 512 if len(args) != 2:
513 513 # i18n: "obsfateverb" is a keyword
514 514 raise error.ParseError(_("obsfateverb expects two arguments"))
515 515
516 516 successors = evalfuncarg(context, mapping, args[0])
517 517 markers = evalfuncarg(context, mapping, args[1])
518 518
519 519 try:
520 520 return obsutil.obsfateverb(successors, markers)
521 521 except TypeError:
522 522 # i18n: "obsfateverb" is a keyword
523 523 errmsg = _("obsfateverb first argument should be countable")
524 524 raise error.ParseError(errmsg)
525 525
526 526 @templatefunc('relpath(path)', requires={'repo'})
527 527 def relpath(context, mapping, args):
528 528 """Convert a repository-absolute path into a filesystem path relative to
529 529 the current working directory."""
530 530 if len(args) != 1:
531 531 # i18n: "relpath" is a keyword
532 532 raise error.ParseError(_("relpath expects one argument"))
533 533
534 534 repo = context.resource(mapping, 'repo')
535 535 path = evalstring(context, mapping, args[0])
536 536 return repo.pathto(path)
537 537
538 538 @templatefunc('revset(query[, formatargs...])', requires={'repo', 'cache'})
539 539 def revset(context, mapping, args):
540 540 """Execute a revision set query. See
541 541 :hg:`help revset`."""
542 542 if not len(args) > 0:
543 543 # i18n: "revset" is a keyword
544 544 raise error.ParseError(_("revset expects one or more arguments"))
545 545
546 546 raw = evalstring(context, mapping, args[0])
547 547 repo = context.resource(mapping, 'repo')
548 548
549 549 def query(expr):
550 550 m = revsetmod.match(repo.ui, expr, lookup=revsetmod.lookupfn(repo))
551 551 return m(repo)
552 552
553 553 if len(args) > 1:
554 554 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
555 555 revs = query(revsetlang.formatspec(raw, *formatargs))
556 556 revs = list(revs)
557 557 else:
558 558 cache = context.resource(mapping, 'cache')
559 559 revsetcache = cache.setdefault("revsetcache", {})
560 560 if raw in revsetcache:
561 561 revs = revsetcache[raw]
562 562 else:
563 563 revs = query(raw)
564 564 revs = list(revs)
565 565 revsetcache[raw] = revs
566 566 return templatekw.showrevslist(context, mapping, "revision", revs)
567 567
568 568 @templatefunc('rstdoc(text, style)')
569 569 def rstdoc(context, mapping, args):
570 570 """Format reStructuredText."""
571 571 if len(args) != 2:
572 572 # i18n: "rstdoc" is a keyword
573 573 raise error.ParseError(_("rstdoc expects two arguments"))
574 574
575 575 text = evalstring(context, mapping, args[0])
576 576 style = evalstring(context, mapping, args[1])
577 577
578 578 return minirst.format(text, style=style, keep=['verbose'])[0]
579 579
580 580 @templatefunc('separate(sep, args...)', argspec='sep *args')
581 581 def separate(context, mapping, args):
582 582 """Add a separator between non-empty arguments."""
583 583 if 'sep' not in args:
584 584 # i18n: "separate" is a keyword
585 585 raise error.ParseError(_("separate expects at least one argument"))
586 586
587 587 sep = evalstring(context, mapping, args['sep'])
588 588 first = True
589 589 for arg in args['args']:
590 590 argstr = evalstring(context, mapping, arg)
591 591 if not argstr:
592 592 continue
593 593 if first:
594 594 first = False
595 595 else:
596 596 yield sep
597 597 yield argstr
598 598
599 @templatefunc('shortest(node, minlength=4)', requires={'repo'})
599 @templatefunc('shortest(node, minlength=4)', requires={'repo', 'cache'})
600 600 def shortest(context, mapping, args):
601 601 """Obtain the shortest representation of
602 602 a node."""
603 603 if not (1 <= len(args) <= 2):
604 604 # i18n: "shortest" is a keyword
605 605 raise error.ParseError(_("shortest() expects one or two arguments"))
606 606
607 607 hexnode = evalstring(context, mapping, args[0])
608 608
609 609 minlength = 4
610 610 if len(args) > 1:
611 611 minlength = evalinteger(context, mapping, args[1],
612 612 # i18n: "shortest" is a keyword
613 613 _("shortest() expects an integer minlength"))
614 614
615 615 repo = context.resource(mapping, 'repo')
616 616 if len(hexnode) > 40:
617 617 return hexnode
618 618 elif len(hexnode) == 40:
619 619 try:
620 620 node = bin(hexnode)
621 621 except TypeError:
622 622 return hexnode
623 623 else:
624 624 try:
625 625 node = scmutil.resolvehexnodeidprefix(repo, hexnode)
626 626 except error.WdirUnsupported:
627 627 node = wdirid
628 628 except error.LookupError:
629 629 return hexnode
630 630 if not node:
631 631 return hexnode
632 cache = context.resource(mapping, 'cache')
632 633 try:
633 return scmutil.shortesthexnodeidprefix(repo, node, minlength)
634 return scmutil.shortesthexnodeidprefix(repo, node, minlength, cache)
634 635 except error.RepoLookupError:
635 636 return hexnode
636 637
637 638 @templatefunc('strip(text[, chars])')
638 639 def strip(context, mapping, args):
639 640 """Strip characters from a string. By default,
640 641 strips all leading and trailing whitespace."""
641 642 if not (1 <= len(args) <= 2):
642 643 # i18n: "strip" is a keyword
643 644 raise error.ParseError(_("strip expects one or two arguments"))
644 645
645 646 text = evalstring(context, mapping, args[0])
646 647 if len(args) == 2:
647 648 chars = evalstring(context, mapping, args[1])
648 649 return text.strip(chars)
649 650 return text.strip()
650 651
651 652 @templatefunc('sub(pattern, replacement, expression)')
652 653 def sub(context, mapping, args):
653 654 """Perform text substitution
654 655 using regular expressions."""
655 656 if len(args) != 3:
656 657 # i18n: "sub" is a keyword
657 658 raise error.ParseError(_("sub expects three arguments"))
658 659
659 660 pat = evalstring(context, mapping, args[0])
660 661 rpl = evalstring(context, mapping, args[1])
661 662 src = evalstring(context, mapping, args[2])
662 663 try:
663 664 patre = re.compile(pat)
664 665 except re.error:
665 666 # i18n: "sub" is a keyword
666 667 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
667 668 try:
668 669 yield patre.sub(rpl, src)
669 670 except re.error:
670 671 # i18n: "sub" is a keyword
671 672 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
672 673
673 674 @templatefunc('startswith(pattern, text)')
674 675 def startswith(context, mapping, args):
675 676 """Returns the value from the "text" argument
676 677 if it begins with the content from the "pattern" argument."""
677 678 if len(args) != 2:
678 679 # i18n: "startswith" is a keyword
679 680 raise error.ParseError(_("startswith expects two arguments"))
680 681
681 682 patn = evalstring(context, mapping, args[0])
682 683 text = evalstring(context, mapping, args[1])
683 684 if text.startswith(patn):
684 685 return text
685 686 return ''
686 687
687 688 @templatefunc('word(number, text[, separator])')
688 689 def word(context, mapping, args):
689 690 """Return the nth word from a string."""
690 691 if not (2 <= len(args) <= 3):
691 692 # i18n: "word" is a keyword
692 693 raise error.ParseError(_("word expects two or three arguments, got %d")
693 694 % len(args))
694 695
695 696 num = evalinteger(context, mapping, args[0],
696 697 # i18n: "word" is a keyword
697 698 _("word expects an integer index"))
698 699 text = evalstring(context, mapping, args[1])
699 700 if len(args) == 3:
700 701 splitter = evalstring(context, mapping, args[2])
701 702 else:
702 703 splitter = None
703 704
704 705 tokens = text.split(splitter)
705 706 if num >= len(tokens) or num < -len(tokens):
706 707 return ''
707 708 else:
708 709 return tokens[num]
709 710
710 711 def loadfunction(ui, extname, registrarobj):
711 712 """Load template function from specified registrarobj
712 713 """
713 714 for name, func in registrarobj._table.iteritems():
714 715 funcs[name] = func
715 716
716 717 # tell hggettext to extract docstrings from these functions:
717 718 i18nfunctions = funcs.values()
General Comments 0
You need to be logged in to leave comments. Login now