##// END OF EJS Templates
shortest: cache disambiguation revset...
Martin von Zweigbergk -
r38889:3588e41f default
parent child Browse files
Show More
@@ -1,1733 +1,1743 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28
28
29 from . import (
29 from . import (
30 encoding,
30 encoding,
31 error,
31 error,
32 match as matchmod,
32 match as matchmod,
33 obsolete,
33 obsolete,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 revsetlang,
38 revsetlang,
39 similar,
39 similar,
40 url,
40 url,
41 util,
41 util,
42 vfs,
42 vfs,
43 )
43 )
44
44
45 from .utils import (
45 from .utils import (
46 procutil,
46 procutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 if pycompat.iswindows:
50 if pycompat.iswindows:
51 from . import scmwindows as scmplatform
51 from . import scmwindows as scmplatform
52 else:
52 else:
53 from . import scmposix as scmplatform
53 from . import scmposix as scmplatform
54
54
55 termsize = scmplatform.termsize
55 termsize = scmplatform.termsize
56
56
57 class status(tuple):
57 class status(tuple):
58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
59 and 'ignored' properties are only relevant to the working copy.
59 and 'ignored' properties are only relevant to the working copy.
60 '''
60 '''
61
61
62 __slots__ = ()
62 __slots__ = ()
63
63
64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
65 clean):
65 clean):
66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
67 ignored, clean))
67 ignored, clean))
68
68
69 @property
69 @property
70 def modified(self):
70 def modified(self):
71 '''files that have been modified'''
71 '''files that have been modified'''
72 return self[0]
72 return self[0]
73
73
74 @property
74 @property
75 def added(self):
75 def added(self):
76 '''files that have been added'''
76 '''files that have been added'''
77 return self[1]
77 return self[1]
78
78
79 @property
79 @property
80 def removed(self):
80 def removed(self):
81 '''files that have been removed'''
81 '''files that have been removed'''
82 return self[2]
82 return self[2]
83
83
84 @property
84 @property
85 def deleted(self):
85 def deleted(self):
86 '''files that are in the dirstate, but have been deleted from the
86 '''files that are in the dirstate, but have been deleted from the
87 working copy (aka "missing")
87 working copy (aka "missing")
88 '''
88 '''
89 return self[3]
89 return self[3]
90
90
91 @property
91 @property
92 def unknown(self):
92 def unknown(self):
93 '''files not in the dirstate that are not ignored'''
93 '''files not in the dirstate that are not ignored'''
94 return self[4]
94 return self[4]
95
95
96 @property
96 @property
97 def ignored(self):
97 def ignored(self):
98 '''files not in the dirstate that are ignored (by _dirignore())'''
98 '''files not in the dirstate that are ignored (by _dirignore())'''
99 return self[5]
99 return self[5]
100
100
101 @property
101 @property
102 def clean(self):
102 def clean(self):
103 '''files that have not been modified'''
103 '''files that have not been modified'''
104 return self[6]
104 return self[6]
105
105
106 def __repr__(self, *args, **kwargs):
106 def __repr__(self, *args, **kwargs):
107 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
107 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
108 r'unknown=%s, ignored=%s, clean=%s>') %
108 r'unknown=%s, ignored=%s, clean=%s>') %
109 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
109 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
110
110
111 def itersubrepos(ctx1, ctx2):
111 def itersubrepos(ctx1, ctx2):
112 """find subrepos in ctx1 or ctx2"""
112 """find subrepos in ctx1 or ctx2"""
113 # Create a (subpath, ctx) mapping where we prefer subpaths from
113 # Create a (subpath, ctx) mapping where we prefer subpaths from
114 # ctx1. The subpaths from ctx2 are important when the .hgsub file
114 # ctx1. The subpaths from ctx2 are important when the .hgsub file
115 # has been modified (in ctx2) but not yet committed (in ctx1).
115 # has been modified (in ctx2) but not yet committed (in ctx1).
116 subpaths = dict.fromkeys(ctx2.substate, ctx2)
116 subpaths = dict.fromkeys(ctx2.substate, ctx2)
117 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
117 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
118
118
119 missing = set()
119 missing = set()
120
120
121 for subpath in ctx2.substate:
121 for subpath in ctx2.substate:
122 if subpath not in ctx1.substate:
122 if subpath not in ctx1.substate:
123 del subpaths[subpath]
123 del subpaths[subpath]
124 missing.add(subpath)
124 missing.add(subpath)
125
125
126 for subpath, ctx in sorted(subpaths.iteritems()):
126 for subpath, ctx in sorted(subpaths.iteritems()):
127 yield subpath, ctx.sub(subpath)
127 yield subpath, ctx.sub(subpath)
128
128
129 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
129 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
130 # status and diff will have an accurate result when it does
130 # status and diff will have an accurate result when it does
131 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
131 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
132 # against itself.
132 # against itself.
133 for subpath in missing:
133 for subpath in missing:
134 yield subpath, ctx2.nullsub(subpath, ctx1)
134 yield subpath, ctx2.nullsub(subpath, ctx1)
135
135
136 def nochangesfound(ui, repo, excluded=None):
136 def nochangesfound(ui, repo, excluded=None):
137 '''Report no changes for push/pull, excluded is None or a list of
137 '''Report no changes for push/pull, excluded is None or a list of
138 nodes excluded from the push/pull.
138 nodes excluded from the push/pull.
139 '''
139 '''
140 secretlist = []
140 secretlist = []
141 if excluded:
141 if excluded:
142 for n in excluded:
142 for n in excluded:
143 ctx = repo[n]
143 ctx = repo[n]
144 if ctx.phase() >= phases.secret and not ctx.extinct():
144 if ctx.phase() >= phases.secret and not ctx.extinct():
145 secretlist.append(n)
145 secretlist.append(n)
146
146
147 if secretlist:
147 if secretlist:
148 ui.status(_("no changes found (ignored %d secret changesets)\n")
148 ui.status(_("no changes found (ignored %d secret changesets)\n")
149 % len(secretlist))
149 % len(secretlist))
150 else:
150 else:
151 ui.status(_("no changes found\n"))
151 ui.status(_("no changes found\n"))
152
152
153 def callcatch(ui, func):
153 def callcatch(ui, func):
154 """call func() with global exception handling
154 """call func() with global exception handling
155
155
156 return func() if no exception happens. otherwise do some error handling
156 return func() if no exception happens. otherwise do some error handling
157 and return an exit code accordingly. does not handle all exceptions.
157 and return an exit code accordingly. does not handle all exceptions.
158 """
158 """
159 try:
159 try:
160 try:
160 try:
161 return func()
161 return func()
162 except: # re-raises
162 except: # re-raises
163 ui.traceback()
163 ui.traceback()
164 raise
164 raise
165 # Global exception handling, alphabetically
165 # Global exception handling, alphabetically
166 # Mercurial-specific first, followed by built-in and library exceptions
166 # Mercurial-specific first, followed by built-in and library exceptions
167 except error.LockHeld as inst:
167 except error.LockHeld as inst:
168 if inst.errno == errno.ETIMEDOUT:
168 if inst.errno == errno.ETIMEDOUT:
169 reason = _('timed out waiting for lock held by %r') % inst.locker
169 reason = _('timed out waiting for lock held by %r') % inst.locker
170 else:
170 else:
171 reason = _('lock held by %r') % inst.locker
171 reason = _('lock held by %r') % inst.locker
172 ui.error(_("abort: %s: %s\n") % (
172 ui.error(_("abort: %s: %s\n") % (
173 inst.desc or stringutil.forcebytestr(inst.filename), reason))
173 inst.desc or stringutil.forcebytestr(inst.filename), reason))
174 if not inst.locker:
174 if not inst.locker:
175 ui.error(_("(lock might be very busy)\n"))
175 ui.error(_("(lock might be very busy)\n"))
176 except error.LockUnavailable as inst:
176 except error.LockUnavailable as inst:
177 ui.error(_("abort: could not lock %s: %s\n") %
177 ui.error(_("abort: could not lock %s: %s\n") %
178 (inst.desc or stringutil.forcebytestr(inst.filename),
178 (inst.desc or stringutil.forcebytestr(inst.filename),
179 encoding.strtolocal(inst.strerror)))
179 encoding.strtolocal(inst.strerror)))
180 except error.OutOfBandError as inst:
180 except error.OutOfBandError as inst:
181 if inst.args:
181 if inst.args:
182 msg = _("abort: remote error:\n")
182 msg = _("abort: remote error:\n")
183 else:
183 else:
184 msg = _("abort: remote error\n")
184 msg = _("abort: remote error\n")
185 ui.error(msg)
185 ui.error(msg)
186 if inst.args:
186 if inst.args:
187 ui.error(''.join(inst.args))
187 ui.error(''.join(inst.args))
188 if inst.hint:
188 if inst.hint:
189 ui.error('(%s)\n' % inst.hint)
189 ui.error('(%s)\n' % inst.hint)
190 except error.RepoError as inst:
190 except error.RepoError as inst:
191 ui.error(_("abort: %s!\n") % inst)
191 ui.error(_("abort: %s!\n") % inst)
192 if inst.hint:
192 if inst.hint:
193 ui.error(_("(%s)\n") % inst.hint)
193 ui.error(_("(%s)\n") % inst.hint)
194 except error.ResponseError as inst:
194 except error.ResponseError as inst:
195 ui.error(_("abort: %s") % inst.args[0])
195 ui.error(_("abort: %s") % inst.args[0])
196 msg = inst.args[1]
196 msg = inst.args[1]
197 if isinstance(msg, type(u'')):
197 if isinstance(msg, type(u'')):
198 msg = pycompat.sysbytes(msg)
198 msg = pycompat.sysbytes(msg)
199 if not isinstance(msg, bytes):
199 if not isinstance(msg, bytes):
200 ui.error(" %r\n" % (msg,))
200 ui.error(" %r\n" % (msg,))
201 elif not msg:
201 elif not msg:
202 ui.error(_(" empty string\n"))
202 ui.error(_(" empty string\n"))
203 else:
203 else:
204 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
204 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
205 except error.CensoredNodeError as inst:
205 except error.CensoredNodeError as inst:
206 ui.error(_("abort: file censored %s!\n") % inst)
206 ui.error(_("abort: file censored %s!\n") % inst)
207 except error.RevlogError as inst:
207 except error.RevlogError as inst:
208 ui.error(_("abort: %s!\n") % inst)
208 ui.error(_("abort: %s!\n") % inst)
209 except error.InterventionRequired as inst:
209 except error.InterventionRequired as inst:
210 ui.error("%s\n" % inst)
210 ui.error("%s\n" % inst)
211 if inst.hint:
211 if inst.hint:
212 ui.error(_("(%s)\n") % inst.hint)
212 ui.error(_("(%s)\n") % inst.hint)
213 return 1
213 return 1
214 except error.WdirUnsupported:
214 except error.WdirUnsupported:
215 ui.error(_("abort: working directory revision cannot be specified\n"))
215 ui.error(_("abort: working directory revision cannot be specified\n"))
216 except error.Abort as inst:
216 except error.Abort as inst:
217 ui.error(_("abort: %s\n") % inst)
217 ui.error(_("abort: %s\n") % inst)
218 if inst.hint:
218 if inst.hint:
219 ui.error(_("(%s)\n") % inst.hint)
219 ui.error(_("(%s)\n") % inst.hint)
220 except ImportError as inst:
220 except ImportError as inst:
221 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
221 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
222 m = stringutil.forcebytestr(inst).split()[-1]
222 m = stringutil.forcebytestr(inst).split()[-1]
223 if m in "mpatch bdiff".split():
223 if m in "mpatch bdiff".split():
224 ui.error(_("(did you forget to compile extensions?)\n"))
224 ui.error(_("(did you forget to compile extensions?)\n"))
225 elif m in "zlib".split():
225 elif m in "zlib".split():
226 ui.error(_("(is your Python install correct?)\n"))
226 ui.error(_("(is your Python install correct?)\n"))
227 except IOError as inst:
227 except IOError as inst:
228 if util.safehasattr(inst, "code"):
228 if util.safehasattr(inst, "code"):
229 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
229 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
230 elif util.safehasattr(inst, "reason"):
230 elif util.safehasattr(inst, "reason"):
231 try: # usually it is in the form (errno, strerror)
231 try: # usually it is in the form (errno, strerror)
232 reason = inst.reason.args[1]
232 reason = inst.reason.args[1]
233 except (AttributeError, IndexError):
233 except (AttributeError, IndexError):
234 # it might be anything, for example a string
234 # it might be anything, for example a string
235 reason = inst.reason
235 reason = inst.reason
236 if isinstance(reason, pycompat.unicode):
236 if isinstance(reason, pycompat.unicode):
237 # SSLError of Python 2.7.9 contains a unicode
237 # SSLError of Python 2.7.9 contains a unicode
238 reason = encoding.unitolocal(reason)
238 reason = encoding.unitolocal(reason)
239 ui.error(_("abort: error: %s\n") % reason)
239 ui.error(_("abort: error: %s\n") % reason)
240 elif (util.safehasattr(inst, "args")
240 elif (util.safehasattr(inst, "args")
241 and inst.args and inst.args[0] == errno.EPIPE):
241 and inst.args and inst.args[0] == errno.EPIPE):
242 pass
242 pass
243 elif getattr(inst, "strerror", None):
243 elif getattr(inst, "strerror", None):
244 if getattr(inst, "filename", None):
244 if getattr(inst, "filename", None):
245 ui.error(_("abort: %s: %s\n") % (
245 ui.error(_("abort: %s: %s\n") % (
246 encoding.strtolocal(inst.strerror),
246 encoding.strtolocal(inst.strerror),
247 stringutil.forcebytestr(inst.filename)))
247 stringutil.forcebytestr(inst.filename)))
248 else:
248 else:
249 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
249 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
250 else:
250 else:
251 raise
251 raise
252 except OSError as inst:
252 except OSError as inst:
253 if getattr(inst, "filename", None) is not None:
253 if getattr(inst, "filename", None) is not None:
254 ui.error(_("abort: %s: '%s'\n") % (
254 ui.error(_("abort: %s: '%s'\n") % (
255 encoding.strtolocal(inst.strerror),
255 encoding.strtolocal(inst.strerror),
256 stringutil.forcebytestr(inst.filename)))
256 stringutil.forcebytestr(inst.filename)))
257 else:
257 else:
258 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
259 except MemoryError:
259 except MemoryError:
260 ui.error(_("abort: out of memory\n"))
260 ui.error(_("abort: out of memory\n"))
261 except SystemExit as inst:
261 except SystemExit as inst:
262 # Commands shouldn't sys.exit directly, but give a return code.
262 # Commands shouldn't sys.exit directly, but give a return code.
263 # Just in case catch this and and pass exit code to caller.
263 # Just in case catch this and and pass exit code to caller.
264 return inst.code
264 return inst.code
265 except socket.error as inst:
265 except socket.error as inst:
266 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
266 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
267
267
268 return -1
268 return -1
269
269
270 def checknewlabel(repo, lbl, kind):
270 def checknewlabel(repo, lbl, kind):
271 # Do not use the "kind" parameter in ui output.
271 # Do not use the "kind" parameter in ui output.
272 # It makes strings difficult to translate.
272 # It makes strings difficult to translate.
273 if lbl in ['tip', '.', 'null']:
273 if lbl in ['tip', '.', 'null']:
274 raise error.Abort(_("the name '%s' is reserved") % lbl)
274 raise error.Abort(_("the name '%s' is reserved") % lbl)
275 for c in (':', '\0', '\n', '\r'):
275 for c in (':', '\0', '\n', '\r'):
276 if c in lbl:
276 if c in lbl:
277 raise error.Abort(
277 raise error.Abort(
278 _("%r cannot be used in a name") % pycompat.bytestr(c))
278 _("%r cannot be used in a name") % pycompat.bytestr(c))
279 try:
279 try:
280 int(lbl)
280 int(lbl)
281 raise error.Abort(_("cannot use an integer as a name"))
281 raise error.Abort(_("cannot use an integer as a name"))
282 except ValueError:
282 except ValueError:
283 pass
283 pass
284 if lbl.strip() != lbl:
284 if lbl.strip() != lbl:
285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
286
286
287 def checkfilename(f):
287 def checkfilename(f):
288 '''Check that the filename f is an acceptable filename for a tracked file'''
288 '''Check that the filename f is an acceptable filename for a tracked file'''
289 if '\r' in f or '\n' in f:
289 if '\r' in f or '\n' in f:
290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
291 % pycompat.bytestr(f))
291 % pycompat.bytestr(f))
292
292
293 def checkportable(ui, f):
293 def checkportable(ui, f):
294 '''Check if filename f is portable and warn or abort depending on config'''
294 '''Check if filename f is portable and warn or abort depending on config'''
295 checkfilename(f)
295 checkfilename(f)
296 abort, warn = checkportabilityalert(ui)
296 abort, warn = checkportabilityalert(ui)
297 if abort or warn:
297 if abort or warn:
298 msg = util.checkwinfilename(f)
298 msg = util.checkwinfilename(f)
299 if msg:
299 if msg:
300 msg = "%s: %s" % (msg, procutil.shellquote(f))
300 msg = "%s: %s" % (msg, procutil.shellquote(f))
301 if abort:
301 if abort:
302 raise error.Abort(msg)
302 raise error.Abort(msg)
303 ui.warn(_("warning: %s\n") % msg)
303 ui.warn(_("warning: %s\n") % msg)
304
304
305 def checkportabilityalert(ui):
305 def checkportabilityalert(ui):
306 '''check if the user's config requests nothing, a warning, or abort for
306 '''check if the user's config requests nothing, a warning, or abort for
307 non-portable filenames'''
307 non-portable filenames'''
308 val = ui.config('ui', 'portablefilenames')
308 val = ui.config('ui', 'portablefilenames')
309 lval = val.lower()
309 lval = val.lower()
310 bval = stringutil.parsebool(val)
310 bval = stringutil.parsebool(val)
311 abort = pycompat.iswindows or lval == 'abort'
311 abort = pycompat.iswindows or lval == 'abort'
312 warn = bval or lval == 'warn'
312 warn = bval or lval == 'warn'
313 if bval is None and not (warn or abort or lval == 'ignore'):
313 if bval is None and not (warn or abort or lval == 'ignore'):
314 raise error.ConfigError(
314 raise error.ConfigError(
315 _("ui.portablefilenames value is invalid ('%s')") % val)
315 _("ui.portablefilenames value is invalid ('%s')") % val)
316 return abort, warn
316 return abort, warn
317
317
318 class casecollisionauditor(object):
318 class casecollisionauditor(object):
319 def __init__(self, ui, abort, dirstate):
319 def __init__(self, ui, abort, dirstate):
320 self._ui = ui
320 self._ui = ui
321 self._abort = abort
321 self._abort = abort
322 allfiles = '\0'.join(dirstate._map)
322 allfiles = '\0'.join(dirstate._map)
323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
324 self._dirstate = dirstate
324 self._dirstate = dirstate
325 # The purpose of _newfiles is so that we don't complain about
325 # The purpose of _newfiles is so that we don't complain about
326 # case collisions if someone were to call this object with the
326 # case collisions if someone were to call this object with the
327 # same filename twice.
327 # same filename twice.
328 self._newfiles = set()
328 self._newfiles = set()
329
329
330 def __call__(self, f):
330 def __call__(self, f):
331 if f in self._newfiles:
331 if f in self._newfiles:
332 return
332 return
333 fl = encoding.lower(f)
333 fl = encoding.lower(f)
334 if fl in self._loweredfiles and f not in self._dirstate:
334 if fl in self._loweredfiles and f not in self._dirstate:
335 msg = _('possible case-folding collision for %s') % f
335 msg = _('possible case-folding collision for %s') % f
336 if self._abort:
336 if self._abort:
337 raise error.Abort(msg)
337 raise error.Abort(msg)
338 self._ui.warn(_("warning: %s\n") % msg)
338 self._ui.warn(_("warning: %s\n") % msg)
339 self._loweredfiles.add(fl)
339 self._loweredfiles.add(fl)
340 self._newfiles.add(f)
340 self._newfiles.add(f)
341
341
342 def filteredhash(repo, maxrev):
342 def filteredhash(repo, maxrev):
343 """build hash of filtered revisions in the current repoview.
343 """build hash of filtered revisions in the current repoview.
344
344
345 Multiple caches perform up-to-date validation by checking that the
345 Multiple caches perform up-to-date validation by checking that the
346 tiprev and tipnode stored in the cache file match the current repository.
346 tiprev and tipnode stored in the cache file match the current repository.
347 However, this is not sufficient for validating repoviews because the set
347 However, this is not sufficient for validating repoviews because the set
348 of revisions in the view may change without the repository tiprev and
348 of revisions in the view may change without the repository tiprev and
349 tipnode changing.
349 tipnode changing.
350
350
351 This function hashes all the revs filtered from the view and returns
351 This function hashes all the revs filtered from the view and returns
352 that SHA-1 digest.
352 that SHA-1 digest.
353 """
353 """
354 cl = repo.changelog
354 cl = repo.changelog
355 if not cl.filteredrevs:
355 if not cl.filteredrevs:
356 return None
356 return None
357 key = None
357 key = None
358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
359 if revs:
359 if revs:
360 s = hashlib.sha1()
360 s = hashlib.sha1()
361 for rev in revs:
361 for rev in revs:
362 s.update('%d;' % rev)
362 s.update('%d;' % rev)
363 key = s.digest()
363 key = s.digest()
364 return key
364 return key
365
365
366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
367 '''yield every hg repository under path, always recursively.
367 '''yield every hg repository under path, always recursively.
368 The recurse flag will only control recursion into repo working dirs'''
368 The recurse flag will only control recursion into repo working dirs'''
369 def errhandler(err):
369 def errhandler(err):
370 if err.filename == path:
370 if err.filename == path:
371 raise err
371 raise err
372 samestat = getattr(os.path, 'samestat', None)
372 samestat = getattr(os.path, 'samestat', None)
373 if followsym and samestat is not None:
373 if followsym and samestat is not None:
374 def adddir(dirlst, dirname):
374 def adddir(dirlst, dirname):
375 dirstat = os.stat(dirname)
375 dirstat = os.stat(dirname)
376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
377 if not match:
377 if not match:
378 dirlst.append(dirstat)
378 dirlst.append(dirstat)
379 return not match
379 return not match
380 else:
380 else:
381 followsym = False
381 followsym = False
382
382
383 if (seen_dirs is None) and followsym:
383 if (seen_dirs is None) and followsym:
384 seen_dirs = []
384 seen_dirs = []
385 adddir(seen_dirs, path)
385 adddir(seen_dirs, path)
386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
387 dirs.sort()
387 dirs.sort()
388 if '.hg' in dirs:
388 if '.hg' in dirs:
389 yield root # found a repository
389 yield root # found a repository
390 qroot = os.path.join(root, '.hg', 'patches')
390 qroot = os.path.join(root, '.hg', 'patches')
391 if os.path.isdir(os.path.join(qroot, '.hg')):
391 if os.path.isdir(os.path.join(qroot, '.hg')):
392 yield qroot # we have a patch queue repo here
392 yield qroot # we have a patch queue repo here
393 if recurse:
393 if recurse:
394 # avoid recursing inside the .hg directory
394 # avoid recursing inside the .hg directory
395 dirs.remove('.hg')
395 dirs.remove('.hg')
396 else:
396 else:
397 dirs[:] = [] # don't descend further
397 dirs[:] = [] # don't descend further
398 elif followsym:
398 elif followsym:
399 newdirs = []
399 newdirs = []
400 for d in dirs:
400 for d in dirs:
401 fname = os.path.join(root, d)
401 fname = os.path.join(root, d)
402 if adddir(seen_dirs, fname):
402 if adddir(seen_dirs, fname):
403 if os.path.islink(fname):
403 if os.path.islink(fname):
404 for hgname in walkrepos(fname, True, seen_dirs):
404 for hgname in walkrepos(fname, True, seen_dirs):
405 yield hgname
405 yield hgname
406 else:
406 else:
407 newdirs.append(d)
407 newdirs.append(d)
408 dirs[:] = newdirs
408 dirs[:] = newdirs
409
409
410 def binnode(ctx):
410 def binnode(ctx):
411 """Return binary node id for a given basectx"""
411 """Return binary node id for a given basectx"""
412 node = ctx.node()
412 node = ctx.node()
413 if node is None:
413 if node is None:
414 return wdirid
414 return wdirid
415 return node
415 return node
416
416
417 def intrev(ctx):
417 def intrev(ctx):
418 """Return integer for a given basectx that can be used in comparison or
418 """Return integer for a given basectx that can be used in comparison or
419 arithmetic operation"""
419 arithmetic operation"""
420 rev = ctx.rev()
420 rev = ctx.rev()
421 if rev is None:
421 if rev is None:
422 return wdirrev
422 return wdirrev
423 return rev
423 return rev
424
424
425 def formatchangeid(ctx):
425 def formatchangeid(ctx):
426 """Format changectx as '{rev}:{node|formatnode}', which is the default
426 """Format changectx as '{rev}:{node|formatnode}', which is the default
427 template provided by logcmdutil.changesettemplater"""
427 template provided by logcmdutil.changesettemplater"""
428 repo = ctx.repo()
428 repo = ctx.repo()
429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
430
430
431 def formatrevnode(ui, rev, node):
431 def formatrevnode(ui, rev, node):
432 """Format given revision and node depending on the current verbosity"""
432 """Format given revision and node depending on the current verbosity"""
433 if ui.debugflag:
433 if ui.debugflag:
434 hexfunc = hex
434 hexfunc = hex
435 else:
435 else:
436 hexfunc = short
436 hexfunc = short
437 return '%d:%s' % (rev, hexfunc(node))
437 return '%d:%s' % (rev, hexfunc(node))
438
438
439 def resolvehexnodeidprefix(repo, prefix):
439 def resolvehexnodeidprefix(repo, prefix):
440 try:
440 try:
441 # Uses unfiltered repo because it's faster when prefix is ambiguous/
441 # Uses unfiltered repo because it's faster when prefix is ambiguous/
442 # This matches the shortesthexnodeidprefix() function below.
442 # This matches the shortesthexnodeidprefix() function below.
443 node = repo.unfiltered().changelog._partialmatch(prefix)
443 node = repo.unfiltered().changelog._partialmatch(prefix)
444 except error.AmbiguousPrefixLookupError:
444 except error.AmbiguousPrefixLookupError:
445 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
445 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
446 if revset:
446 if revset:
447 # Clear config to avoid infinite recursion
447 # Clear config to avoid infinite recursion
448 configoverrides = {('experimental',
448 configoverrides = {('experimental',
449 'revisions.disambiguatewithin'): None}
449 'revisions.disambiguatewithin'): None}
450 with repo.ui.configoverride(configoverrides):
450 with repo.ui.configoverride(configoverrides):
451 revs = repo.anyrevs([revset], user=True)
451 revs = repo.anyrevs([revset], user=True)
452 matches = []
452 matches = []
453 for rev in revs:
453 for rev in revs:
454 node = repo.changelog.node(rev)
454 node = repo.changelog.node(rev)
455 if hex(node).startswith(prefix):
455 if hex(node).startswith(prefix):
456 matches.append(node)
456 matches.append(node)
457 if len(matches) == 1:
457 if len(matches) == 1:
458 return matches[0]
458 return matches[0]
459 raise
459 raise
460 if node is None:
460 if node is None:
461 return
461 return
462 repo.changelog.rev(node) # make sure node isn't filtered
462 repo.changelog.rev(node) # make sure node isn't filtered
463 return node
463 return node
464
464
465 def shortesthexnodeidprefix(repo, node, minlength=1):
465 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
466 """Find the shortest unambiguous prefix that matches hexnode."""
466 """Find the shortest unambiguous prefix that matches hexnode.
467
468 If "cache" is not None, it must be a dictionary that can be used for
469 caching between calls to this method.
470 """
467 # _partialmatch() of filtered changelog could take O(len(repo)) time,
471 # _partialmatch() of filtered changelog could take O(len(repo)) time,
468 # which would be unacceptably slow. so we look for hash collision in
472 # which would be unacceptably slow. so we look for hash collision in
469 # unfiltered space, which means some hashes may be slightly longer.
473 # unfiltered space, which means some hashes may be slightly longer.
470 cl = repo.unfiltered().changelog
474 cl = repo.unfiltered().changelog
471
475
472 def isrev(prefix):
476 def isrev(prefix):
473 try:
477 try:
474 i = int(prefix)
478 i = int(prefix)
475 # if we are a pure int, then starting with zero will not be
479 # if we are a pure int, then starting with zero will not be
476 # confused as a rev; or, obviously, if the int is larger
480 # confused as a rev; or, obviously, if the int is larger
477 # than the value of the tip rev
481 # than the value of the tip rev
478 if prefix[0:1] == b'0' or i > len(cl):
482 if prefix[0:1] == b'0' or i > len(cl):
479 return False
483 return False
480 return True
484 return True
481 except ValueError:
485 except ValueError:
482 return False
486 return False
483
487
484 def disambiguate(prefix):
488 def disambiguate(prefix):
485 """Disambiguate against revnums."""
489 """Disambiguate against revnums."""
486 hexnode = hex(node)
490 hexnode = hex(node)
487 for length in range(len(prefix), len(hexnode) + 1):
491 for length in range(len(prefix), len(hexnode) + 1):
488 prefix = hexnode[:length]
492 prefix = hexnode[:length]
489 if not isrev(prefix):
493 if not isrev(prefix):
490 return prefix
494 return prefix
491
495
492 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
496 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
493 if revset:
497 if revset:
494 revs = repo.anyrevs([revset], user=True)
498 revs = None
499 if cache is not None:
500 revs = cache.get('disambiguationrevset')
501 if revs is None:
502 revs = repo.anyrevs([revset], user=True)
503 if cache is not None:
504 cache['disambiguationrevset'] = revs
495 if cl.rev(node) in revs:
505 if cl.rev(node) in revs:
496 hexnode = hex(node)
506 hexnode = hex(node)
497 for length in range(minlength, len(hexnode) + 1):
507 for length in range(minlength, len(hexnode) + 1):
498 matches = []
508 matches = []
499 prefix = hexnode[:length]
509 prefix = hexnode[:length]
500 for rev in revs:
510 for rev in revs:
501 otherhexnode = repo[rev].hex()
511 otherhexnode = repo[rev].hex()
502 if prefix == otherhexnode[:length]:
512 if prefix == otherhexnode[:length]:
503 matches.append(otherhexnode)
513 matches.append(otherhexnode)
504 if len(matches) == 1:
514 if len(matches) == 1:
505 return disambiguate(prefix)
515 return disambiguate(prefix)
506
516
507 try:
517 try:
508 return disambiguate(cl.shortest(node, minlength))
518 return disambiguate(cl.shortest(node, minlength))
509 except error.LookupError:
519 except error.LookupError:
510 raise error.RepoLookupError()
520 raise error.RepoLookupError()
511
521
512 def isrevsymbol(repo, symbol):
522 def isrevsymbol(repo, symbol):
513 """Checks if a symbol exists in the repo.
523 """Checks if a symbol exists in the repo.
514
524
515 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
525 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
516 symbol is an ambiguous nodeid prefix.
526 symbol is an ambiguous nodeid prefix.
517 """
527 """
518 try:
528 try:
519 revsymbol(repo, symbol)
529 revsymbol(repo, symbol)
520 return True
530 return True
521 except error.RepoLookupError:
531 except error.RepoLookupError:
522 return False
532 return False
523
533
524 def revsymbol(repo, symbol):
534 def revsymbol(repo, symbol):
525 """Returns a context given a single revision symbol (as string).
535 """Returns a context given a single revision symbol (as string).
526
536
527 This is similar to revsingle(), but accepts only a single revision symbol,
537 This is similar to revsingle(), but accepts only a single revision symbol,
528 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
538 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
529 not "max(public())".
539 not "max(public())".
530 """
540 """
531 if not isinstance(symbol, bytes):
541 if not isinstance(symbol, bytes):
532 msg = ("symbol (%s of type %s) was not a string, did you mean "
542 msg = ("symbol (%s of type %s) was not a string, did you mean "
533 "repo[symbol]?" % (symbol, type(symbol)))
543 "repo[symbol]?" % (symbol, type(symbol)))
534 raise error.ProgrammingError(msg)
544 raise error.ProgrammingError(msg)
535 try:
545 try:
536 if symbol in ('.', 'tip', 'null'):
546 if symbol in ('.', 'tip', 'null'):
537 return repo[symbol]
547 return repo[symbol]
538
548
539 try:
549 try:
540 r = int(symbol)
550 r = int(symbol)
541 if '%d' % r != symbol:
551 if '%d' % r != symbol:
542 raise ValueError
552 raise ValueError
543 l = len(repo.changelog)
553 l = len(repo.changelog)
544 if r < 0:
554 if r < 0:
545 r += l
555 r += l
546 if r < 0 or r >= l and r != wdirrev:
556 if r < 0 or r >= l and r != wdirrev:
547 raise ValueError
557 raise ValueError
548 return repo[r]
558 return repo[r]
549 except error.FilteredIndexError:
559 except error.FilteredIndexError:
550 raise
560 raise
551 except (ValueError, OverflowError, IndexError):
561 except (ValueError, OverflowError, IndexError):
552 pass
562 pass
553
563
554 if len(symbol) == 40:
564 if len(symbol) == 40:
555 try:
565 try:
556 node = bin(symbol)
566 node = bin(symbol)
557 rev = repo.changelog.rev(node)
567 rev = repo.changelog.rev(node)
558 return repo[rev]
568 return repo[rev]
559 except error.FilteredLookupError:
569 except error.FilteredLookupError:
560 raise
570 raise
561 except (TypeError, LookupError):
571 except (TypeError, LookupError):
562 pass
572 pass
563
573
564 # look up bookmarks through the name interface
574 # look up bookmarks through the name interface
565 try:
575 try:
566 node = repo.names.singlenode(repo, symbol)
576 node = repo.names.singlenode(repo, symbol)
567 rev = repo.changelog.rev(node)
577 rev = repo.changelog.rev(node)
568 return repo[rev]
578 return repo[rev]
569 except KeyError:
579 except KeyError:
570 pass
580 pass
571
581
572 node = resolvehexnodeidprefix(repo, symbol)
582 node = resolvehexnodeidprefix(repo, symbol)
573 if node is not None:
583 if node is not None:
574 rev = repo.changelog.rev(node)
584 rev = repo.changelog.rev(node)
575 return repo[rev]
585 return repo[rev]
576
586
577 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
587 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
578
588
579 except error.WdirUnsupported:
589 except error.WdirUnsupported:
580 return repo[None]
590 return repo[None]
581 except (error.FilteredIndexError, error.FilteredLookupError,
591 except (error.FilteredIndexError, error.FilteredLookupError,
582 error.FilteredRepoLookupError):
592 error.FilteredRepoLookupError):
583 raise _filterederror(repo, symbol)
593 raise _filterederror(repo, symbol)
584
594
585 def _filterederror(repo, changeid):
595 def _filterederror(repo, changeid):
586 """build an exception to be raised about a filtered changeid
596 """build an exception to be raised about a filtered changeid
587
597
588 This is extracted in a function to help extensions (eg: evolve) to
598 This is extracted in a function to help extensions (eg: evolve) to
589 experiment with various message variants."""
599 experiment with various message variants."""
590 if repo.filtername.startswith('visible'):
600 if repo.filtername.startswith('visible'):
591
601
592 # Check if the changeset is obsolete
602 # Check if the changeset is obsolete
593 unfilteredrepo = repo.unfiltered()
603 unfilteredrepo = repo.unfiltered()
594 ctx = revsymbol(unfilteredrepo, changeid)
604 ctx = revsymbol(unfilteredrepo, changeid)
595
605
596 # If the changeset is obsolete, enrich the message with the reason
606 # If the changeset is obsolete, enrich the message with the reason
597 # that made this changeset not visible
607 # that made this changeset not visible
598 if ctx.obsolete():
608 if ctx.obsolete():
599 msg = obsutil._getfilteredreason(repo, changeid, ctx)
609 msg = obsutil._getfilteredreason(repo, changeid, ctx)
600 else:
610 else:
601 msg = _("hidden revision '%s'") % changeid
611 msg = _("hidden revision '%s'") % changeid
602
612
603 hint = _('use --hidden to access hidden revisions')
613 hint = _('use --hidden to access hidden revisions')
604
614
605 return error.FilteredRepoLookupError(msg, hint=hint)
615 return error.FilteredRepoLookupError(msg, hint=hint)
606 msg = _("filtered revision '%s' (not in '%s' subset)")
616 msg = _("filtered revision '%s' (not in '%s' subset)")
607 msg %= (changeid, repo.filtername)
617 msg %= (changeid, repo.filtername)
608 return error.FilteredRepoLookupError(msg)
618 return error.FilteredRepoLookupError(msg)
609
619
610 def revsingle(repo, revspec, default='.', localalias=None):
620 def revsingle(repo, revspec, default='.', localalias=None):
611 if not revspec and revspec != 0:
621 if not revspec and revspec != 0:
612 return repo[default]
622 return repo[default]
613
623
614 l = revrange(repo, [revspec], localalias=localalias)
624 l = revrange(repo, [revspec], localalias=localalias)
615 if not l:
625 if not l:
616 raise error.Abort(_('empty revision set'))
626 raise error.Abort(_('empty revision set'))
617 return repo[l.last()]
627 return repo[l.last()]
618
628
619 def _pairspec(revspec):
629 def _pairspec(revspec):
620 tree = revsetlang.parse(revspec)
630 tree = revsetlang.parse(revspec)
621 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
631 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
622
632
623 def revpair(repo, revs):
633 def revpair(repo, revs):
624 if not revs:
634 if not revs:
625 return repo['.'], repo[None]
635 return repo['.'], repo[None]
626
636
627 l = revrange(repo, revs)
637 l = revrange(repo, revs)
628
638
629 if not l:
639 if not l:
630 first = second = None
640 first = second = None
631 elif l.isascending():
641 elif l.isascending():
632 first = l.min()
642 first = l.min()
633 second = l.max()
643 second = l.max()
634 elif l.isdescending():
644 elif l.isdescending():
635 first = l.max()
645 first = l.max()
636 second = l.min()
646 second = l.min()
637 else:
647 else:
638 first = l.first()
648 first = l.first()
639 second = l.last()
649 second = l.last()
640
650
641 if first is None:
651 if first is None:
642 raise error.Abort(_('empty revision range'))
652 raise error.Abort(_('empty revision range'))
643 if (first == second and len(revs) >= 2
653 if (first == second and len(revs) >= 2
644 and not all(revrange(repo, [r]) for r in revs)):
654 and not all(revrange(repo, [r]) for r in revs)):
645 raise error.Abort(_('empty revision on one side of range'))
655 raise error.Abort(_('empty revision on one side of range'))
646
656
647 # if top-level is range expression, the result must always be a pair
657 # if top-level is range expression, the result must always be a pair
648 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
658 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
649 return repo[first], repo[None]
659 return repo[first], repo[None]
650
660
651 return repo[first], repo[second]
661 return repo[first], repo[second]
652
662
653 def revrange(repo, specs, localalias=None):
663 def revrange(repo, specs, localalias=None):
654 """Execute 1 to many revsets and return the union.
664 """Execute 1 to many revsets and return the union.
655
665
656 This is the preferred mechanism for executing revsets using user-specified
666 This is the preferred mechanism for executing revsets using user-specified
657 config options, such as revset aliases.
667 config options, such as revset aliases.
658
668
659 The revsets specified by ``specs`` will be executed via a chained ``OR``
669 The revsets specified by ``specs`` will be executed via a chained ``OR``
660 expression. If ``specs`` is empty, an empty result is returned.
670 expression. If ``specs`` is empty, an empty result is returned.
661
671
662 ``specs`` can contain integers, in which case they are assumed to be
672 ``specs`` can contain integers, in which case they are assumed to be
663 revision numbers.
673 revision numbers.
664
674
665 It is assumed the revsets are already formatted. If you have arguments
675 It is assumed the revsets are already formatted. If you have arguments
666 that need to be expanded in the revset, call ``revsetlang.formatspec()``
676 that need to be expanded in the revset, call ``revsetlang.formatspec()``
667 and pass the result as an element of ``specs``.
677 and pass the result as an element of ``specs``.
668
678
669 Specifying a single revset is allowed.
679 Specifying a single revset is allowed.
670
680
671 Returns a ``revset.abstractsmartset`` which is a list-like interface over
681 Returns a ``revset.abstractsmartset`` which is a list-like interface over
672 integer revisions.
682 integer revisions.
673 """
683 """
674 allspecs = []
684 allspecs = []
675 for spec in specs:
685 for spec in specs:
676 if isinstance(spec, int):
686 if isinstance(spec, int):
677 spec = revsetlang.formatspec('rev(%d)', spec)
687 spec = revsetlang.formatspec('rev(%d)', spec)
678 allspecs.append(spec)
688 allspecs.append(spec)
679 return repo.anyrevs(allspecs, user=True, localalias=localalias)
689 return repo.anyrevs(allspecs, user=True, localalias=localalias)
680
690
681 def meaningfulparents(repo, ctx):
691 def meaningfulparents(repo, ctx):
682 """Return list of meaningful (or all if debug) parentrevs for rev.
692 """Return list of meaningful (or all if debug) parentrevs for rev.
683
693
684 For merges (two non-nullrev revisions) both parents are meaningful.
694 For merges (two non-nullrev revisions) both parents are meaningful.
685 Otherwise the first parent revision is considered meaningful if it
695 Otherwise the first parent revision is considered meaningful if it
686 is not the preceding revision.
696 is not the preceding revision.
687 """
697 """
688 parents = ctx.parents()
698 parents = ctx.parents()
689 if len(parents) > 1:
699 if len(parents) > 1:
690 return parents
700 return parents
691 if repo.ui.debugflag:
701 if repo.ui.debugflag:
692 return [parents[0], repo['null']]
702 return [parents[0], repo['null']]
693 if parents[0].rev() >= intrev(ctx) - 1:
703 if parents[0].rev() >= intrev(ctx) - 1:
694 return []
704 return []
695 return parents
705 return parents
696
706
697 def expandpats(pats):
707 def expandpats(pats):
698 '''Expand bare globs when running on windows.
708 '''Expand bare globs when running on windows.
699 On posix we assume it already has already been done by sh.'''
709 On posix we assume it already has already been done by sh.'''
700 if not util.expandglobs:
710 if not util.expandglobs:
701 return list(pats)
711 return list(pats)
702 ret = []
712 ret = []
703 for kindpat in pats:
713 for kindpat in pats:
704 kind, pat = matchmod._patsplit(kindpat, None)
714 kind, pat = matchmod._patsplit(kindpat, None)
705 if kind is None:
715 if kind is None:
706 try:
716 try:
707 globbed = glob.glob(pat)
717 globbed = glob.glob(pat)
708 except re.error:
718 except re.error:
709 globbed = [pat]
719 globbed = [pat]
710 if globbed:
720 if globbed:
711 ret.extend(globbed)
721 ret.extend(globbed)
712 continue
722 continue
713 ret.append(kindpat)
723 ret.append(kindpat)
714 return ret
724 return ret
715
725
716 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
726 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
717 badfn=None):
727 badfn=None):
718 '''Return a matcher and the patterns that were used.
728 '''Return a matcher and the patterns that were used.
719 The matcher will warn about bad matches, unless an alternate badfn callback
729 The matcher will warn about bad matches, unless an alternate badfn callback
720 is provided.'''
730 is provided.'''
721 if pats == ("",):
731 if pats == ("",):
722 pats = []
732 pats = []
723 if opts is None:
733 if opts is None:
724 opts = {}
734 opts = {}
725 if not globbed and default == 'relpath':
735 if not globbed and default == 'relpath':
726 pats = expandpats(pats or [])
736 pats = expandpats(pats or [])
727
737
728 def bad(f, msg):
738 def bad(f, msg):
729 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
739 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
730
740
731 if badfn is None:
741 if badfn is None:
732 badfn = bad
742 badfn = bad
733
743
734 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
744 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
735 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
745 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
736
746
737 if m.always():
747 if m.always():
738 pats = []
748 pats = []
739 return m, pats
749 return m, pats
740
750
741 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
751 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
742 badfn=None):
752 badfn=None):
743 '''Return a matcher that will warn about bad matches.'''
753 '''Return a matcher that will warn about bad matches.'''
744 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
754 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
745
755
746 def matchall(repo):
756 def matchall(repo):
747 '''Return a matcher that will efficiently match everything.'''
757 '''Return a matcher that will efficiently match everything.'''
748 return matchmod.always(repo.root, repo.getcwd())
758 return matchmod.always(repo.root, repo.getcwd())
749
759
750 def matchfiles(repo, files, badfn=None):
760 def matchfiles(repo, files, badfn=None):
751 '''Return a matcher that will efficiently match exactly these files.'''
761 '''Return a matcher that will efficiently match exactly these files.'''
752 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
762 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
753
763
754 def parsefollowlinespattern(repo, rev, pat, msg):
764 def parsefollowlinespattern(repo, rev, pat, msg):
755 """Return a file name from `pat` pattern suitable for usage in followlines
765 """Return a file name from `pat` pattern suitable for usage in followlines
756 logic.
766 logic.
757 """
767 """
758 if not matchmod.patkind(pat):
768 if not matchmod.patkind(pat):
759 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
769 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
760 else:
770 else:
761 ctx = repo[rev]
771 ctx = repo[rev]
762 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
772 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
763 files = [f for f in ctx if m(f)]
773 files = [f for f in ctx if m(f)]
764 if len(files) != 1:
774 if len(files) != 1:
765 raise error.ParseError(msg)
775 raise error.ParseError(msg)
766 return files[0]
776 return files[0]
767
777
768 def origpath(ui, repo, filepath):
778 def origpath(ui, repo, filepath):
769 '''customize where .orig files are created
779 '''customize where .orig files are created
770
780
771 Fetch user defined path from config file: [ui] origbackuppath = <path>
781 Fetch user defined path from config file: [ui] origbackuppath = <path>
772 Fall back to default (filepath with .orig suffix) if not specified
782 Fall back to default (filepath with .orig suffix) if not specified
773 '''
783 '''
774 origbackuppath = ui.config('ui', 'origbackuppath')
784 origbackuppath = ui.config('ui', 'origbackuppath')
775 if not origbackuppath:
785 if not origbackuppath:
776 return filepath + ".orig"
786 return filepath + ".orig"
777
787
778 # Convert filepath from an absolute path into a path inside the repo.
788 # Convert filepath from an absolute path into a path inside the repo.
779 filepathfromroot = util.normpath(os.path.relpath(filepath,
789 filepathfromroot = util.normpath(os.path.relpath(filepath,
780 start=repo.root))
790 start=repo.root))
781
791
782 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
792 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
783 origbackupdir = origvfs.dirname(filepathfromroot)
793 origbackupdir = origvfs.dirname(filepathfromroot)
784 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
794 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
785 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
795 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
786
796
787 # Remove any files that conflict with the backup file's path
797 # Remove any files that conflict with the backup file's path
788 for f in reversed(list(util.finddirs(filepathfromroot))):
798 for f in reversed(list(util.finddirs(filepathfromroot))):
789 if origvfs.isfileorlink(f):
799 if origvfs.isfileorlink(f):
790 ui.note(_('removing conflicting file: %s\n')
800 ui.note(_('removing conflicting file: %s\n')
791 % origvfs.join(f))
801 % origvfs.join(f))
792 origvfs.unlink(f)
802 origvfs.unlink(f)
793 break
803 break
794
804
795 origvfs.makedirs(origbackupdir)
805 origvfs.makedirs(origbackupdir)
796
806
797 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
807 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
798 ui.note(_('removing conflicting directory: %s\n')
808 ui.note(_('removing conflicting directory: %s\n')
799 % origvfs.join(filepathfromroot))
809 % origvfs.join(filepathfromroot))
800 origvfs.rmtree(filepathfromroot, forcibly=True)
810 origvfs.rmtree(filepathfromroot, forcibly=True)
801
811
802 return origvfs.join(filepathfromroot)
812 return origvfs.join(filepathfromroot)
803
813
804 class _containsnode(object):
814 class _containsnode(object):
805 """proxy __contains__(node) to container.__contains__ which accepts revs"""
815 """proxy __contains__(node) to container.__contains__ which accepts revs"""
806
816
807 def __init__(self, repo, revcontainer):
817 def __init__(self, repo, revcontainer):
808 self._torev = repo.changelog.rev
818 self._torev = repo.changelog.rev
809 self._revcontains = revcontainer.__contains__
819 self._revcontains = revcontainer.__contains__
810
820
811 def __contains__(self, node):
821 def __contains__(self, node):
812 return self._revcontains(self._torev(node))
822 return self._revcontains(self._torev(node))
813
823
814 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
824 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
815 fixphase=False, targetphase=None, backup=True):
825 fixphase=False, targetphase=None, backup=True):
816 """do common cleanups when old nodes are replaced by new nodes
826 """do common cleanups when old nodes are replaced by new nodes
817
827
818 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
828 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
819 (we might also want to move working directory parent in the future)
829 (we might also want to move working directory parent in the future)
820
830
821 By default, bookmark moves are calculated automatically from 'replacements',
831 By default, bookmark moves are calculated automatically from 'replacements',
822 but 'moves' can be used to override that. Also, 'moves' may include
832 but 'moves' can be used to override that. Also, 'moves' may include
823 additional bookmark moves that should not have associated obsmarkers.
833 additional bookmark moves that should not have associated obsmarkers.
824
834
825 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
835 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
826 have replacements. operation is a string, like "rebase".
836 have replacements. operation is a string, like "rebase".
827
837
828 metadata is dictionary containing metadata to be stored in obsmarker if
838 metadata is dictionary containing metadata to be stored in obsmarker if
829 obsolescence is enabled.
839 obsolescence is enabled.
830 """
840 """
831 assert fixphase or targetphase is None
841 assert fixphase or targetphase is None
832 if not replacements and not moves:
842 if not replacements and not moves:
833 return
843 return
834
844
835 # translate mapping's other forms
845 # translate mapping's other forms
836 if not util.safehasattr(replacements, 'items'):
846 if not util.safehasattr(replacements, 'items'):
837 replacements = {n: () for n in replacements}
847 replacements = {n: () for n in replacements}
838
848
839 # Calculate bookmark movements
849 # Calculate bookmark movements
840 if moves is None:
850 if moves is None:
841 moves = {}
851 moves = {}
842 # Unfiltered repo is needed since nodes in replacements might be hidden.
852 # Unfiltered repo is needed since nodes in replacements might be hidden.
843 unfi = repo.unfiltered()
853 unfi = repo.unfiltered()
844 for oldnode, newnodes in replacements.items():
854 for oldnode, newnodes in replacements.items():
845 if oldnode in moves:
855 if oldnode in moves:
846 continue
856 continue
847 if len(newnodes) > 1:
857 if len(newnodes) > 1:
848 # usually a split, take the one with biggest rev number
858 # usually a split, take the one with biggest rev number
849 newnode = next(unfi.set('max(%ln)', newnodes)).node()
859 newnode = next(unfi.set('max(%ln)', newnodes)).node()
850 elif len(newnodes) == 0:
860 elif len(newnodes) == 0:
851 # move bookmark backwards
861 # move bookmark backwards
852 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
862 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
853 list(replacements)))
863 list(replacements)))
854 if roots:
864 if roots:
855 newnode = roots[0].node()
865 newnode = roots[0].node()
856 else:
866 else:
857 newnode = nullid
867 newnode = nullid
858 else:
868 else:
859 newnode = newnodes[0]
869 newnode = newnodes[0]
860 moves[oldnode] = newnode
870 moves[oldnode] = newnode
861
871
862 allnewnodes = [n for ns in replacements.values() for n in ns]
872 allnewnodes = [n for ns in replacements.values() for n in ns]
863 toretract = {}
873 toretract = {}
864 toadvance = {}
874 toadvance = {}
865 if fixphase:
875 if fixphase:
866 precursors = {}
876 precursors = {}
867 for oldnode, newnodes in replacements.items():
877 for oldnode, newnodes in replacements.items():
868 for newnode in newnodes:
878 for newnode in newnodes:
869 precursors.setdefault(newnode, []).append(oldnode)
879 precursors.setdefault(newnode, []).append(oldnode)
870
880
871 allnewnodes.sort(key=lambda n: unfi[n].rev())
881 allnewnodes.sort(key=lambda n: unfi[n].rev())
872 newphases = {}
882 newphases = {}
873 def phase(ctx):
883 def phase(ctx):
874 return newphases.get(ctx.node(), ctx.phase())
884 return newphases.get(ctx.node(), ctx.phase())
875 for newnode in allnewnodes:
885 for newnode in allnewnodes:
876 ctx = unfi[newnode]
886 ctx = unfi[newnode]
877 parentphase = max(phase(p) for p in ctx.parents())
887 parentphase = max(phase(p) for p in ctx.parents())
878 if targetphase is None:
888 if targetphase is None:
879 oldphase = max(unfi[oldnode].phase()
889 oldphase = max(unfi[oldnode].phase()
880 for oldnode in precursors[newnode])
890 for oldnode in precursors[newnode])
881 newphase = max(oldphase, parentphase)
891 newphase = max(oldphase, parentphase)
882 else:
892 else:
883 newphase = max(targetphase, parentphase)
893 newphase = max(targetphase, parentphase)
884 newphases[newnode] = newphase
894 newphases[newnode] = newphase
885 if newphase > ctx.phase():
895 if newphase > ctx.phase():
886 toretract.setdefault(newphase, []).append(newnode)
896 toretract.setdefault(newphase, []).append(newnode)
887 elif newphase < ctx.phase():
897 elif newphase < ctx.phase():
888 toadvance.setdefault(newphase, []).append(newnode)
898 toadvance.setdefault(newphase, []).append(newnode)
889
899
890 with repo.transaction('cleanup') as tr:
900 with repo.transaction('cleanup') as tr:
891 # Move bookmarks
901 # Move bookmarks
892 bmarks = repo._bookmarks
902 bmarks = repo._bookmarks
893 bmarkchanges = []
903 bmarkchanges = []
894 for oldnode, newnode in moves.items():
904 for oldnode, newnode in moves.items():
895 oldbmarks = repo.nodebookmarks(oldnode)
905 oldbmarks = repo.nodebookmarks(oldnode)
896 if not oldbmarks:
906 if not oldbmarks:
897 continue
907 continue
898 from . import bookmarks # avoid import cycle
908 from . import bookmarks # avoid import cycle
899 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
909 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
900 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
910 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
901 hex(oldnode), hex(newnode)))
911 hex(oldnode), hex(newnode)))
902 # Delete divergent bookmarks being parents of related newnodes
912 # Delete divergent bookmarks being parents of related newnodes
903 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
913 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
904 allnewnodes, newnode, oldnode)
914 allnewnodes, newnode, oldnode)
905 deletenodes = _containsnode(repo, deleterevs)
915 deletenodes = _containsnode(repo, deleterevs)
906 for name in oldbmarks:
916 for name in oldbmarks:
907 bmarkchanges.append((name, newnode))
917 bmarkchanges.append((name, newnode))
908 for b in bookmarks.divergent2delete(repo, deletenodes, name):
918 for b in bookmarks.divergent2delete(repo, deletenodes, name):
909 bmarkchanges.append((b, None))
919 bmarkchanges.append((b, None))
910
920
911 if bmarkchanges:
921 if bmarkchanges:
912 bmarks.applychanges(repo, tr, bmarkchanges)
922 bmarks.applychanges(repo, tr, bmarkchanges)
913
923
914 for phase, nodes in toretract.items():
924 for phase, nodes in toretract.items():
915 phases.retractboundary(repo, tr, phase, nodes)
925 phases.retractboundary(repo, tr, phase, nodes)
916 for phase, nodes in toadvance.items():
926 for phase, nodes in toadvance.items():
917 phases.advanceboundary(repo, tr, phase, nodes)
927 phases.advanceboundary(repo, tr, phase, nodes)
918
928
919 # Obsolete or strip nodes
929 # Obsolete or strip nodes
920 if obsolete.isenabled(repo, obsolete.createmarkersopt):
930 if obsolete.isenabled(repo, obsolete.createmarkersopt):
921 # If a node is already obsoleted, and we want to obsolete it
931 # If a node is already obsoleted, and we want to obsolete it
922 # without a successor, skip that obssolete request since it's
932 # without a successor, skip that obssolete request since it's
923 # unnecessary. That's the "if s or not isobs(n)" check below.
933 # unnecessary. That's the "if s or not isobs(n)" check below.
924 # Also sort the node in topology order, that might be useful for
934 # Also sort the node in topology order, that might be useful for
925 # some obsstore logic.
935 # some obsstore logic.
926 # NOTE: the filtering and sorting might belong to createmarkers.
936 # NOTE: the filtering and sorting might belong to createmarkers.
927 isobs = unfi.obsstore.successors.__contains__
937 isobs = unfi.obsstore.successors.__contains__
928 torev = unfi.changelog.rev
938 torev = unfi.changelog.rev
929 sortfunc = lambda ns: torev(ns[0])
939 sortfunc = lambda ns: torev(ns[0])
930 rels = [(unfi[n], tuple(unfi[m] for m in s))
940 rels = [(unfi[n], tuple(unfi[m] for m in s))
931 for n, s in sorted(replacements.items(), key=sortfunc)
941 for n, s in sorted(replacements.items(), key=sortfunc)
932 if s or not isobs(n)]
942 if s or not isobs(n)]
933 if rels:
943 if rels:
934 obsolete.createmarkers(repo, rels, operation=operation,
944 obsolete.createmarkers(repo, rels, operation=operation,
935 metadata=metadata)
945 metadata=metadata)
936 else:
946 else:
937 from . import repair # avoid import cycle
947 from . import repair # avoid import cycle
938 tostrip = list(replacements)
948 tostrip = list(replacements)
939 if tostrip:
949 if tostrip:
940 repair.delayedstrip(repo.ui, repo, tostrip, operation,
950 repair.delayedstrip(repo.ui, repo, tostrip, operation,
941 backup=backup)
951 backup=backup)
942
952
943 def addremove(repo, matcher, prefix, opts=None):
953 def addremove(repo, matcher, prefix, opts=None):
944 if opts is None:
954 if opts is None:
945 opts = {}
955 opts = {}
946 m = matcher
956 m = matcher
947 dry_run = opts.get('dry_run')
957 dry_run = opts.get('dry_run')
948 try:
958 try:
949 similarity = float(opts.get('similarity') or 0)
959 similarity = float(opts.get('similarity') or 0)
950 except ValueError:
960 except ValueError:
951 raise error.Abort(_('similarity must be a number'))
961 raise error.Abort(_('similarity must be a number'))
952 if similarity < 0 or similarity > 100:
962 if similarity < 0 or similarity > 100:
953 raise error.Abort(_('similarity must be between 0 and 100'))
963 raise error.Abort(_('similarity must be between 0 and 100'))
954 similarity /= 100.0
964 similarity /= 100.0
955
965
956 ret = 0
966 ret = 0
957 join = lambda f: os.path.join(prefix, f)
967 join = lambda f: os.path.join(prefix, f)
958
968
959 wctx = repo[None]
969 wctx = repo[None]
960 for subpath in sorted(wctx.substate):
970 for subpath in sorted(wctx.substate):
961 submatch = matchmod.subdirmatcher(subpath, m)
971 submatch = matchmod.subdirmatcher(subpath, m)
962 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
972 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
963 sub = wctx.sub(subpath)
973 sub = wctx.sub(subpath)
964 try:
974 try:
965 if sub.addremove(submatch, prefix, opts):
975 if sub.addremove(submatch, prefix, opts):
966 ret = 1
976 ret = 1
967 except error.LookupError:
977 except error.LookupError:
968 repo.ui.status(_("skipping missing subrepository: %s\n")
978 repo.ui.status(_("skipping missing subrepository: %s\n")
969 % join(subpath))
979 % join(subpath))
970
980
971 rejected = []
981 rejected = []
972 def badfn(f, msg):
982 def badfn(f, msg):
973 if f in m.files():
983 if f in m.files():
974 m.bad(f, msg)
984 m.bad(f, msg)
975 rejected.append(f)
985 rejected.append(f)
976
986
977 badmatch = matchmod.badmatch(m, badfn)
987 badmatch = matchmod.badmatch(m, badfn)
978 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
988 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
979 badmatch)
989 badmatch)
980
990
981 unknownset = set(unknown + forgotten)
991 unknownset = set(unknown + forgotten)
982 toprint = unknownset.copy()
992 toprint = unknownset.copy()
983 toprint.update(deleted)
993 toprint.update(deleted)
984 for abs in sorted(toprint):
994 for abs in sorted(toprint):
985 if repo.ui.verbose or not m.exact(abs):
995 if repo.ui.verbose or not m.exact(abs):
986 if abs in unknownset:
996 if abs in unknownset:
987 status = _('adding %s\n') % m.uipath(abs)
997 status = _('adding %s\n') % m.uipath(abs)
988 else:
998 else:
989 status = _('removing %s\n') % m.uipath(abs)
999 status = _('removing %s\n') % m.uipath(abs)
990 repo.ui.status(status)
1000 repo.ui.status(status)
991
1001
992 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1002 renames = _findrenames(repo, m, added + unknown, removed + deleted,
993 similarity)
1003 similarity)
994
1004
995 if not dry_run:
1005 if not dry_run:
996 _markchanges(repo, unknown + forgotten, deleted, renames)
1006 _markchanges(repo, unknown + forgotten, deleted, renames)
997
1007
998 for f in rejected:
1008 for f in rejected:
999 if f in m.files():
1009 if f in m.files():
1000 return 1
1010 return 1
1001 return ret
1011 return ret
1002
1012
1003 def marktouched(repo, files, similarity=0.0):
1013 def marktouched(repo, files, similarity=0.0):
1004 '''Assert that files have somehow been operated upon. files are relative to
1014 '''Assert that files have somehow been operated upon. files are relative to
1005 the repo root.'''
1015 the repo root.'''
1006 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1016 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1007 rejected = []
1017 rejected = []
1008
1018
1009 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1019 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1010
1020
1011 if repo.ui.verbose:
1021 if repo.ui.verbose:
1012 unknownset = set(unknown + forgotten)
1022 unknownset = set(unknown + forgotten)
1013 toprint = unknownset.copy()
1023 toprint = unknownset.copy()
1014 toprint.update(deleted)
1024 toprint.update(deleted)
1015 for abs in sorted(toprint):
1025 for abs in sorted(toprint):
1016 if abs in unknownset:
1026 if abs in unknownset:
1017 status = _('adding %s\n') % abs
1027 status = _('adding %s\n') % abs
1018 else:
1028 else:
1019 status = _('removing %s\n') % abs
1029 status = _('removing %s\n') % abs
1020 repo.ui.status(status)
1030 repo.ui.status(status)
1021
1031
1022 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1032 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1023 similarity)
1033 similarity)
1024
1034
1025 _markchanges(repo, unknown + forgotten, deleted, renames)
1035 _markchanges(repo, unknown + forgotten, deleted, renames)
1026
1036
1027 for f in rejected:
1037 for f in rejected:
1028 if f in m.files():
1038 if f in m.files():
1029 return 1
1039 return 1
1030 return 0
1040 return 0
1031
1041
1032 def _interestingfiles(repo, matcher):
1042 def _interestingfiles(repo, matcher):
1033 '''Walk dirstate with matcher, looking for files that addremove would care
1043 '''Walk dirstate with matcher, looking for files that addremove would care
1034 about.
1044 about.
1035
1045
1036 This is different from dirstate.status because it doesn't care about
1046 This is different from dirstate.status because it doesn't care about
1037 whether files are modified or clean.'''
1047 whether files are modified or clean.'''
1038 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1048 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1039 audit_path = pathutil.pathauditor(repo.root, cached=True)
1049 audit_path = pathutil.pathauditor(repo.root, cached=True)
1040
1050
1041 ctx = repo[None]
1051 ctx = repo[None]
1042 dirstate = repo.dirstate
1052 dirstate = repo.dirstate
1043 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1053 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1044 unknown=True, ignored=False, full=False)
1054 unknown=True, ignored=False, full=False)
1045 for abs, st in walkresults.iteritems():
1055 for abs, st in walkresults.iteritems():
1046 dstate = dirstate[abs]
1056 dstate = dirstate[abs]
1047 if dstate == '?' and audit_path.check(abs):
1057 if dstate == '?' and audit_path.check(abs):
1048 unknown.append(abs)
1058 unknown.append(abs)
1049 elif dstate != 'r' and not st:
1059 elif dstate != 'r' and not st:
1050 deleted.append(abs)
1060 deleted.append(abs)
1051 elif dstate == 'r' and st:
1061 elif dstate == 'r' and st:
1052 forgotten.append(abs)
1062 forgotten.append(abs)
1053 # for finding renames
1063 # for finding renames
1054 elif dstate == 'r' and not st:
1064 elif dstate == 'r' and not st:
1055 removed.append(abs)
1065 removed.append(abs)
1056 elif dstate == 'a':
1066 elif dstate == 'a':
1057 added.append(abs)
1067 added.append(abs)
1058
1068
1059 return added, unknown, deleted, removed, forgotten
1069 return added, unknown, deleted, removed, forgotten
1060
1070
1061 def _findrenames(repo, matcher, added, removed, similarity):
1071 def _findrenames(repo, matcher, added, removed, similarity):
1062 '''Find renames from removed files to added ones.'''
1072 '''Find renames from removed files to added ones.'''
1063 renames = {}
1073 renames = {}
1064 if similarity > 0:
1074 if similarity > 0:
1065 for old, new, score in similar.findrenames(repo, added, removed,
1075 for old, new, score in similar.findrenames(repo, added, removed,
1066 similarity):
1076 similarity):
1067 if (repo.ui.verbose or not matcher.exact(old)
1077 if (repo.ui.verbose or not matcher.exact(old)
1068 or not matcher.exact(new)):
1078 or not matcher.exact(new)):
1069 repo.ui.status(_('recording removal of %s as rename to %s '
1079 repo.ui.status(_('recording removal of %s as rename to %s '
1070 '(%d%% similar)\n') %
1080 '(%d%% similar)\n') %
1071 (matcher.rel(old), matcher.rel(new),
1081 (matcher.rel(old), matcher.rel(new),
1072 score * 100))
1082 score * 100))
1073 renames[new] = old
1083 renames[new] = old
1074 return renames
1084 return renames
1075
1085
1076 def _markchanges(repo, unknown, deleted, renames):
1086 def _markchanges(repo, unknown, deleted, renames):
1077 '''Marks the files in unknown as added, the files in deleted as removed,
1087 '''Marks the files in unknown as added, the files in deleted as removed,
1078 and the files in renames as copied.'''
1088 and the files in renames as copied.'''
1079 wctx = repo[None]
1089 wctx = repo[None]
1080 with repo.wlock():
1090 with repo.wlock():
1081 wctx.forget(deleted)
1091 wctx.forget(deleted)
1082 wctx.add(unknown)
1092 wctx.add(unknown)
1083 for new, old in renames.iteritems():
1093 for new, old in renames.iteritems():
1084 wctx.copy(old, new)
1094 wctx.copy(old, new)
1085
1095
1086 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1096 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1087 """Update the dirstate to reflect the intent of copying src to dst. For
1097 """Update the dirstate to reflect the intent of copying src to dst. For
1088 different reasons it might not end with dst being marked as copied from src.
1098 different reasons it might not end with dst being marked as copied from src.
1089 """
1099 """
1090 origsrc = repo.dirstate.copied(src) or src
1100 origsrc = repo.dirstate.copied(src) or src
1091 if dst == origsrc: # copying back a copy?
1101 if dst == origsrc: # copying back a copy?
1092 if repo.dirstate[dst] not in 'mn' and not dryrun:
1102 if repo.dirstate[dst] not in 'mn' and not dryrun:
1093 repo.dirstate.normallookup(dst)
1103 repo.dirstate.normallookup(dst)
1094 else:
1104 else:
1095 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1105 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1096 if not ui.quiet:
1106 if not ui.quiet:
1097 ui.warn(_("%s has not been committed yet, so no copy "
1107 ui.warn(_("%s has not been committed yet, so no copy "
1098 "data will be stored for %s.\n")
1108 "data will be stored for %s.\n")
1099 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1109 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1100 if repo.dirstate[dst] in '?r' and not dryrun:
1110 if repo.dirstate[dst] in '?r' and not dryrun:
1101 wctx.add([dst])
1111 wctx.add([dst])
1102 elif not dryrun:
1112 elif not dryrun:
1103 wctx.copy(origsrc, dst)
1113 wctx.copy(origsrc, dst)
1104
1114
1105 def readrequires(opener, supported):
1115 def readrequires(opener, supported):
1106 '''Reads and parses .hg/requires and checks if all entries found
1116 '''Reads and parses .hg/requires and checks if all entries found
1107 are in the list of supported features.'''
1117 are in the list of supported features.'''
1108 requirements = set(opener.read("requires").splitlines())
1118 requirements = set(opener.read("requires").splitlines())
1109 missings = []
1119 missings = []
1110 for r in requirements:
1120 for r in requirements:
1111 if r not in supported:
1121 if r not in supported:
1112 if not r or not r[0:1].isalnum():
1122 if not r or not r[0:1].isalnum():
1113 raise error.RequirementError(_(".hg/requires file is corrupt"))
1123 raise error.RequirementError(_(".hg/requires file is corrupt"))
1114 missings.append(r)
1124 missings.append(r)
1115 missings.sort()
1125 missings.sort()
1116 if missings:
1126 if missings:
1117 raise error.RequirementError(
1127 raise error.RequirementError(
1118 _("repository requires features unknown to this Mercurial: %s")
1128 _("repository requires features unknown to this Mercurial: %s")
1119 % " ".join(missings),
1129 % " ".join(missings),
1120 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1130 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1121 " for more information"))
1131 " for more information"))
1122 return requirements
1132 return requirements
1123
1133
1124 def writerequires(opener, requirements):
1134 def writerequires(opener, requirements):
1125 with opener('requires', 'w') as fp:
1135 with opener('requires', 'w') as fp:
1126 for r in sorted(requirements):
1136 for r in sorted(requirements):
1127 fp.write("%s\n" % r)
1137 fp.write("%s\n" % r)
1128
1138
1129 class filecachesubentry(object):
1139 class filecachesubentry(object):
1130 def __init__(self, path, stat):
1140 def __init__(self, path, stat):
1131 self.path = path
1141 self.path = path
1132 self.cachestat = None
1142 self.cachestat = None
1133 self._cacheable = None
1143 self._cacheable = None
1134
1144
1135 if stat:
1145 if stat:
1136 self.cachestat = filecachesubentry.stat(self.path)
1146 self.cachestat = filecachesubentry.stat(self.path)
1137
1147
1138 if self.cachestat:
1148 if self.cachestat:
1139 self._cacheable = self.cachestat.cacheable()
1149 self._cacheable = self.cachestat.cacheable()
1140 else:
1150 else:
1141 # None means we don't know yet
1151 # None means we don't know yet
1142 self._cacheable = None
1152 self._cacheable = None
1143
1153
1144 def refresh(self):
1154 def refresh(self):
1145 if self.cacheable():
1155 if self.cacheable():
1146 self.cachestat = filecachesubentry.stat(self.path)
1156 self.cachestat = filecachesubentry.stat(self.path)
1147
1157
1148 def cacheable(self):
1158 def cacheable(self):
1149 if self._cacheable is not None:
1159 if self._cacheable is not None:
1150 return self._cacheable
1160 return self._cacheable
1151
1161
1152 # we don't know yet, assume it is for now
1162 # we don't know yet, assume it is for now
1153 return True
1163 return True
1154
1164
1155 def changed(self):
1165 def changed(self):
1156 # no point in going further if we can't cache it
1166 # no point in going further if we can't cache it
1157 if not self.cacheable():
1167 if not self.cacheable():
1158 return True
1168 return True
1159
1169
1160 newstat = filecachesubentry.stat(self.path)
1170 newstat = filecachesubentry.stat(self.path)
1161
1171
1162 # we may not know if it's cacheable yet, check again now
1172 # we may not know if it's cacheable yet, check again now
1163 if newstat and self._cacheable is None:
1173 if newstat and self._cacheable is None:
1164 self._cacheable = newstat.cacheable()
1174 self._cacheable = newstat.cacheable()
1165
1175
1166 # check again
1176 # check again
1167 if not self._cacheable:
1177 if not self._cacheable:
1168 return True
1178 return True
1169
1179
1170 if self.cachestat != newstat:
1180 if self.cachestat != newstat:
1171 self.cachestat = newstat
1181 self.cachestat = newstat
1172 return True
1182 return True
1173 else:
1183 else:
1174 return False
1184 return False
1175
1185
1176 @staticmethod
1186 @staticmethod
1177 def stat(path):
1187 def stat(path):
1178 try:
1188 try:
1179 return util.cachestat(path)
1189 return util.cachestat(path)
1180 except OSError as e:
1190 except OSError as e:
1181 if e.errno != errno.ENOENT:
1191 if e.errno != errno.ENOENT:
1182 raise
1192 raise
1183
1193
1184 class filecacheentry(object):
1194 class filecacheentry(object):
1185 def __init__(self, paths, stat=True):
1195 def __init__(self, paths, stat=True):
1186 self._entries = []
1196 self._entries = []
1187 for path in paths:
1197 for path in paths:
1188 self._entries.append(filecachesubentry(path, stat))
1198 self._entries.append(filecachesubentry(path, stat))
1189
1199
1190 def changed(self):
1200 def changed(self):
1191 '''true if any entry has changed'''
1201 '''true if any entry has changed'''
1192 for entry in self._entries:
1202 for entry in self._entries:
1193 if entry.changed():
1203 if entry.changed():
1194 return True
1204 return True
1195 return False
1205 return False
1196
1206
1197 def refresh(self):
1207 def refresh(self):
1198 for entry in self._entries:
1208 for entry in self._entries:
1199 entry.refresh()
1209 entry.refresh()
1200
1210
1201 class filecache(object):
1211 class filecache(object):
1202 """A property like decorator that tracks files under .hg/ for updates.
1212 """A property like decorator that tracks files under .hg/ for updates.
1203
1213
1204 On first access, the files defined as arguments are stat()ed and the
1214 On first access, the files defined as arguments are stat()ed and the
1205 results cached. The decorated function is called. The results are stashed
1215 results cached. The decorated function is called. The results are stashed
1206 away in a ``_filecache`` dict on the object whose method is decorated.
1216 away in a ``_filecache`` dict on the object whose method is decorated.
1207
1217
1208 On subsequent access, the cached result is returned.
1218 On subsequent access, the cached result is returned.
1209
1219
1210 On external property set operations, stat() calls are performed and the new
1220 On external property set operations, stat() calls are performed and the new
1211 value is cached.
1221 value is cached.
1212
1222
1213 On property delete operations, cached data is removed.
1223 On property delete operations, cached data is removed.
1214
1224
1215 When using the property API, cached data is always returned, if available:
1225 When using the property API, cached data is always returned, if available:
1216 no stat() is performed to check if the file has changed and if the function
1226 no stat() is performed to check if the file has changed and if the function
1217 needs to be called to reflect file changes.
1227 needs to be called to reflect file changes.
1218
1228
1219 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1229 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1220 can populate an entry before the property's getter is called. In this case,
1230 can populate an entry before the property's getter is called. In this case,
1221 entries in ``_filecache`` will be used during property operations,
1231 entries in ``_filecache`` will be used during property operations,
1222 if available. If the underlying file changes, it is up to external callers
1232 if available. If the underlying file changes, it is up to external callers
1223 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1233 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1224 method result as well as possibly calling ``del obj._filecache[attr]`` to
1234 method result as well as possibly calling ``del obj._filecache[attr]`` to
1225 remove the ``filecacheentry``.
1235 remove the ``filecacheentry``.
1226 """
1236 """
1227
1237
1228 def __init__(self, *paths):
1238 def __init__(self, *paths):
1229 self.paths = paths
1239 self.paths = paths
1230
1240
1231 def join(self, obj, fname):
1241 def join(self, obj, fname):
1232 """Used to compute the runtime path of a cached file.
1242 """Used to compute the runtime path of a cached file.
1233
1243
1234 Users should subclass filecache and provide their own version of this
1244 Users should subclass filecache and provide their own version of this
1235 function to call the appropriate join function on 'obj' (an instance
1245 function to call the appropriate join function on 'obj' (an instance
1236 of the class that its member function was decorated).
1246 of the class that its member function was decorated).
1237 """
1247 """
1238 raise NotImplementedError
1248 raise NotImplementedError
1239
1249
1240 def __call__(self, func):
1250 def __call__(self, func):
1241 self.func = func
1251 self.func = func
1242 self.sname = func.__name__
1252 self.sname = func.__name__
1243 self.name = pycompat.sysbytes(self.sname)
1253 self.name = pycompat.sysbytes(self.sname)
1244 return self
1254 return self
1245
1255
1246 def __get__(self, obj, type=None):
1256 def __get__(self, obj, type=None):
1247 # if accessed on the class, return the descriptor itself.
1257 # if accessed on the class, return the descriptor itself.
1248 if obj is None:
1258 if obj is None:
1249 return self
1259 return self
1250 # do we need to check if the file changed?
1260 # do we need to check if the file changed?
1251 if self.sname in obj.__dict__:
1261 if self.sname in obj.__dict__:
1252 assert self.name in obj._filecache, self.name
1262 assert self.name in obj._filecache, self.name
1253 return obj.__dict__[self.sname]
1263 return obj.__dict__[self.sname]
1254
1264
1255 entry = obj._filecache.get(self.name)
1265 entry = obj._filecache.get(self.name)
1256
1266
1257 if entry:
1267 if entry:
1258 if entry.changed():
1268 if entry.changed():
1259 entry.obj = self.func(obj)
1269 entry.obj = self.func(obj)
1260 else:
1270 else:
1261 paths = [self.join(obj, path) for path in self.paths]
1271 paths = [self.join(obj, path) for path in self.paths]
1262
1272
1263 # We stat -before- creating the object so our cache doesn't lie if
1273 # We stat -before- creating the object so our cache doesn't lie if
1264 # a writer modified between the time we read and stat
1274 # a writer modified between the time we read and stat
1265 entry = filecacheentry(paths, True)
1275 entry = filecacheentry(paths, True)
1266 entry.obj = self.func(obj)
1276 entry.obj = self.func(obj)
1267
1277
1268 obj._filecache[self.name] = entry
1278 obj._filecache[self.name] = entry
1269
1279
1270 obj.__dict__[self.sname] = entry.obj
1280 obj.__dict__[self.sname] = entry.obj
1271 return entry.obj
1281 return entry.obj
1272
1282
1273 def __set__(self, obj, value):
1283 def __set__(self, obj, value):
1274 if self.name not in obj._filecache:
1284 if self.name not in obj._filecache:
1275 # we add an entry for the missing value because X in __dict__
1285 # we add an entry for the missing value because X in __dict__
1276 # implies X in _filecache
1286 # implies X in _filecache
1277 paths = [self.join(obj, path) for path in self.paths]
1287 paths = [self.join(obj, path) for path in self.paths]
1278 ce = filecacheentry(paths, False)
1288 ce = filecacheentry(paths, False)
1279 obj._filecache[self.name] = ce
1289 obj._filecache[self.name] = ce
1280 else:
1290 else:
1281 ce = obj._filecache[self.name]
1291 ce = obj._filecache[self.name]
1282
1292
1283 ce.obj = value # update cached copy
1293 ce.obj = value # update cached copy
1284 obj.__dict__[self.sname] = value # update copy returned by obj.x
1294 obj.__dict__[self.sname] = value # update copy returned by obj.x
1285
1295
1286 def __delete__(self, obj):
1296 def __delete__(self, obj):
1287 try:
1297 try:
1288 del obj.__dict__[self.sname]
1298 del obj.__dict__[self.sname]
1289 except KeyError:
1299 except KeyError:
1290 raise AttributeError(self.sname)
1300 raise AttributeError(self.sname)
1291
1301
1292 def extdatasource(repo, source):
1302 def extdatasource(repo, source):
1293 """Gather a map of rev -> value dict from the specified source
1303 """Gather a map of rev -> value dict from the specified source
1294
1304
1295 A source spec is treated as a URL, with a special case shell: type
1305 A source spec is treated as a URL, with a special case shell: type
1296 for parsing the output from a shell command.
1306 for parsing the output from a shell command.
1297
1307
1298 The data is parsed as a series of newline-separated records where
1308 The data is parsed as a series of newline-separated records where
1299 each record is a revision specifier optionally followed by a space
1309 each record is a revision specifier optionally followed by a space
1300 and a freeform string value. If the revision is known locally, it
1310 and a freeform string value. If the revision is known locally, it
1301 is converted to a rev, otherwise the record is skipped.
1311 is converted to a rev, otherwise the record is skipped.
1302
1312
1303 Note that both key and value are treated as UTF-8 and converted to
1313 Note that both key and value are treated as UTF-8 and converted to
1304 the local encoding. This allows uniformity between local and
1314 the local encoding. This allows uniformity between local and
1305 remote data sources.
1315 remote data sources.
1306 """
1316 """
1307
1317
1308 spec = repo.ui.config("extdata", source)
1318 spec = repo.ui.config("extdata", source)
1309 if not spec:
1319 if not spec:
1310 raise error.Abort(_("unknown extdata source '%s'") % source)
1320 raise error.Abort(_("unknown extdata source '%s'") % source)
1311
1321
1312 data = {}
1322 data = {}
1313 src = proc = None
1323 src = proc = None
1314 try:
1324 try:
1315 if spec.startswith("shell:"):
1325 if spec.startswith("shell:"):
1316 # external commands should be run relative to the repo root
1326 # external commands should be run relative to the repo root
1317 cmd = spec[6:]
1327 cmd = spec[6:]
1318 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1328 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1319 close_fds=procutil.closefds,
1329 close_fds=procutil.closefds,
1320 stdout=subprocess.PIPE, cwd=repo.root)
1330 stdout=subprocess.PIPE, cwd=repo.root)
1321 src = proc.stdout
1331 src = proc.stdout
1322 else:
1332 else:
1323 # treat as a URL or file
1333 # treat as a URL or file
1324 src = url.open(repo.ui, spec)
1334 src = url.open(repo.ui, spec)
1325 for l in src:
1335 for l in src:
1326 if " " in l:
1336 if " " in l:
1327 k, v = l.strip().split(" ", 1)
1337 k, v = l.strip().split(" ", 1)
1328 else:
1338 else:
1329 k, v = l.strip(), ""
1339 k, v = l.strip(), ""
1330
1340
1331 k = encoding.tolocal(k)
1341 k = encoding.tolocal(k)
1332 try:
1342 try:
1333 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1343 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1334 except (error.LookupError, error.RepoLookupError):
1344 except (error.LookupError, error.RepoLookupError):
1335 pass # we ignore data for nodes that don't exist locally
1345 pass # we ignore data for nodes that don't exist locally
1336 finally:
1346 finally:
1337 if proc:
1347 if proc:
1338 proc.communicate()
1348 proc.communicate()
1339 if src:
1349 if src:
1340 src.close()
1350 src.close()
1341 if proc and proc.returncode != 0:
1351 if proc and proc.returncode != 0:
1342 raise error.Abort(_("extdata command '%s' failed: %s")
1352 raise error.Abort(_("extdata command '%s' failed: %s")
1343 % (cmd, procutil.explainexit(proc.returncode)))
1353 % (cmd, procutil.explainexit(proc.returncode)))
1344
1354
1345 return data
1355 return data
1346
1356
1347 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1357 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1348 if lock is None:
1358 if lock is None:
1349 raise error.LockInheritanceContractViolation(
1359 raise error.LockInheritanceContractViolation(
1350 'lock can only be inherited while held')
1360 'lock can only be inherited while held')
1351 if environ is None:
1361 if environ is None:
1352 environ = {}
1362 environ = {}
1353 with lock.inherit() as locker:
1363 with lock.inherit() as locker:
1354 environ[envvar] = locker
1364 environ[envvar] = locker
1355 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1365 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1356
1366
1357 def wlocksub(repo, cmd, *args, **kwargs):
1367 def wlocksub(repo, cmd, *args, **kwargs):
1358 """run cmd as a subprocess that allows inheriting repo's wlock
1368 """run cmd as a subprocess that allows inheriting repo's wlock
1359
1369
1360 This can only be called while the wlock is held. This takes all the
1370 This can only be called while the wlock is held. This takes all the
1361 arguments that ui.system does, and returns the exit code of the
1371 arguments that ui.system does, and returns the exit code of the
1362 subprocess."""
1372 subprocess."""
1363 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1373 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1364 **kwargs)
1374 **kwargs)
1365
1375
1366 class progress(object):
1376 class progress(object):
1367 def __init__(self, ui, topic, unit="", total=None):
1377 def __init__(self, ui, topic, unit="", total=None):
1368 self.ui = ui
1378 self.ui = ui
1369 self.pos = 0
1379 self.pos = 0
1370 self.topic = topic
1380 self.topic = topic
1371 self.unit = unit
1381 self.unit = unit
1372 self.total = total
1382 self.total = total
1373
1383
1374 def __enter__(self):
1384 def __enter__(self):
1375 return self
1385 return self
1376
1386
1377 def __exit__(self, exc_type, exc_value, exc_tb):
1387 def __exit__(self, exc_type, exc_value, exc_tb):
1378 self.complete()
1388 self.complete()
1379
1389
1380 def update(self, pos, item="", total=None):
1390 def update(self, pos, item="", total=None):
1381 assert pos is not None
1391 assert pos is not None
1382 if total:
1392 if total:
1383 self.total = total
1393 self.total = total
1384 self.pos = pos
1394 self.pos = pos
1385 self._print(item)
1395 self._print(item)
1386
1396
1387 def increment(self, step=1, item="", total=None):
1397 def increment(self, step=1, item="", total=None):
1388 self.update(self.pos + step, item, total)
1398 self.update(self.pos + step, item, total)
1389
1399
1390 def complete(self):
1400 def complete(self):
1391 self.ui.progress(self.topic, None)
1401 self.ui.progress(self.topic, None)
1392
1402
1393 def _print(self, item):
1403 def _print(self, item):
1394 self.ui.progress(self.topic, self.pos, item, self.unit,
1404 self.ui.progress(self.topic, self.pos, item, self.unit,
1395 self.total)
1405 self.total)
1396
1406
1397 def gdinitconfig(ui):
1407 def gdinitconfig(ui):
1398 """helper function to know if a repo should be created as general delta
1408 """helper function to know if a repo should be created as general delta
1399 """
1409 """
1400 # experimental config: format.generaldelta
1410 # experimental config: format.generaldelta
1401 return (ui.configbool('format', 'generaldelta')
1411 return (ui.configbool('format', 'generaldelta')
1402 or ui.configbool('format', 'usegeneraldelta')
1412 or ui.configbool('format', 'usegeneraldelta')
1403 or ui.configbool('format', 'sparse-revlog'))
1413 or ui.configbool('format', 'sparse-revlog'))
1404
1414
1405 def gddeltaconfig(ui):
1415 def gddeltaconfig(ui):
1406 """helper function to know if incoming delta should be optimised
1416 """helper function to know if incoming delta should be optimised
1407 """
1417 """
1408 # experimental config: format.generaldelta
1418 # experimental config: format.generaldelta
1409 return ui.configbool('format', 'generaldelta')
1419 return ui.configbool('format', 'generaldelta')
1410
1420
1411 class simplekeyvaluefile(object):
1421 class simplekeyvaluefile(object):
1412 """A simple file with key=value lines
1422 """A simple file with key=value lines
1413
1423
1414 Keys must be alphanumerics and start with a letter, values must not
1424 Keys must be alphanumerics and start with a letter, values must not
1415 contain '\n' characters"""
1425 contain '\n' characters"""
1416 firstlinekey = '__firstline'
1426 firstlinekey = '__firstline'
1417
1427
1418 def __init__(self, vfs, path, keys=None):
1428 def __init__(self, vfs, path, keys=None):
1419 self.vfs = vfs
1429 self.vfs = vfs
1420 self.path = path
1430 self.path = path
1421
1431
1422 def read(self, firstlinenonkeyval=False):
1432 def read(self, firstlinenonkeyval=False):
1423 """Read the contents of a simple key-value file
1433 """Read the contents of a simple key-value file
1424
1434
1425 'firstlinenonkeyval' indicates whether the first line of file should
1435 'firstlinenonkeyval' indicates whether the first line of file should
1426 be treated as a key-value pair or reuturned fully under the
1436 be treated as a key-value pair or reuturned fully under the
1427 __firstline key."""
1437 __firstline key."""
1428 lines = self.vfs.readlines(self.path)
1438 lines = self.vfs.readlines(self.path)
1429 d = {}
1439 d = {}
1430 if firstlinenonkeyval:
1440 if firstlinenonkeyval:
1431 if not lines:
1441 if not lines:
1432 e = _("empty simplekeyvalue file")
1442 e = _("empty simplekeyvalue file")
1433 raise error.CorruptedState(e)
1443 raise error.CorruptedState(e)
1434 # we don't want to include '\n' in the __firstline
1444 # we don't want to include '\n' in the __firstline
1435 d[self.firstlinekey] = lines[0][:-1]
1445 d[self.firstlinekey] = lines[0][:-1]
1436 del lines[0]
1446 del lines[0]
1437
1447
1438 try:
1448 try:
1439 # the 'if line.strip()' part prevents us from failing on empty
1449 # the 'if line.strip()' part prevents us from failing on empty
1440 # lines which only contain '\n' therefore are not skipped
1450 # lines which only contain '\n' therefore are not skipped
1441 # by 'if line'
1451 # by 'if line'
1442 updatedict = dict(line[:-1].split('=', 1) for line in lines
1452 updatedict = dict(line[:-1].split('=', 1) for line in lines
1443 if line.strip())
1453 if line.strip())
1444 if self.firstlinekey in updatedict:
1454 if self.firstlinekey in updatedict:
1445 e = _("%r can't be used as a key")
1455 e = _("%r can't be used as a key")
1446 raise error.CorruptedState(e % self.firstlinekey)
1456 raise error.CorruptedState(e % self.firstlinekey)
1447 d.update(updatedict)
1457 d.update(updatedict)
1448 except ValueError as e:
1458 except ValueError as e:
1449 raise error.CorruptedState(str(e))
1459 raise error.CorruptedState(str(e))
1450 return d
1460 return d
1451
1461
1452 def write(self, data, firstline=None):
1462 def write(self, data, firstline=None):
1453 """Write key=>value mapping to a file
1463 """Write key=>value mapping to a file
1454 data is a dict. Keys must be alphanumerical and start with a letter.
1464 data is a dict. Keys must be alphanumerical and start with a letter.
1455 Values must not contain newline characters.
1465 Values must not contain newline characters.
1456
1466
1457 If 'firstline' is not None, it is written to file before
1467 If 'firstline' is not None, it is written to file before
1458 everything else, as it is, not in a key=value form"""
1468 everything else, as it is, not in a key=value form"""
1459 lines = []
1469 lines = []
1460 if firstline is not None:
1470 if firstline is not None:
1461 lines.append('%s\n' % firstline)
1471 lines.append('%s\n' % firstline)
1462
1472
1463 for k, v in data.items():
1473 for k, v in data.items():
1464 if k == self.firstlinekey:
1474 if k == self.firstlinekey:
1465 e = "key name '%s' is reserved" % self.firstlinekey
1475 e = "key name '%s' is reserved" % self.firstlinekey
1466 raise error.ProgrammingError(e)
1476 raise error.ProgrammingError(e)
1467 if not k[0:1].isalpha():
1477 if not k[0:1].isalpha():
1468 e = "keys must start with a letter in a key-value file"
1478 e = "keys must start with a letter in a key-value file"
1469 raise error.ProgrammingError(e)
1479 raise error.ProgrammingError(e)
1470 if not k.isalnum():
1480 if not k.isalnum():
1471 e = "invalid key name in a simple key-value file"
1481 e = "invalid key name in a simple key-value file"
1472 raise error.ProgrammingError(e)
1482 raise error.ProgrammingError(e)
1473 if '\n' in v:
1483 if '\n' in v:
1474 e = "invalid value in a simple key-value file"
1484 e = "invalid value in a simple key-value file"
1475 raise error.ProgrammingError(e)
1485 raise error.ProgrammingError(e)
1476 lines.append("%s=%s\n" % (k, v))
1486 lines.append("%s=%s\n" % (k, v))
1477 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1487 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1478 fp.write(''.join(lines))
1488 fp.write(''.join(lines))
1479
1489
1480 _reportobsoletedsource = [
1490 _reportobsoletedsource = [
1481 'debugobsolete',
1491 'debugobsolete',
1482 'pull',
1492 'pull',
1483 'push',
1493 'push',
1484 'serve',
1494 'serve',
1485 'unbundle',
1495 'unbundle',
1486 ]
1496 ]
1487
1497
1488 _reportnewcssource = [
1498 _reportnewcssource = [
1489 'pull',
1499 'pull',
1490 'unbundle',
1500 'unbundle',
1491 ]
1501 ]
1492
1502
1493 def prefetchfiles(repo, revs, match):
1503 def prefetchfiles(repo, revs, match):
1494 """Invokes the registered file prefetch functions, allowing extensions to
1504 """Invokes the registered file prefetch functions, allowing extensions to
1495 ensure the corresponding files are available locally, before the command
1505 ensure the corresponding files are available locally, before the command
1496 uses them."""
1506 uses them."""
1497 if match:
1507 if match:
1498 # The command itself will complain about files that don't exist, so
1508 # The command itself will complain about files that don't exist, so
1499 # don't duplicate the message.
1509 # don't duplicate the message.
1500 match = matchmod.badmatch(match, lambda fn, msg: None)
1510 match = matchmod.badmatch(match, lambda fn, msg: None)
1501 else:
1511 else:
1502 match = matchall(repo)
1512 match = matchall(repo)
1503
1513
1504 fileprefetchhooks(repo, revs, match)
1514 fileprefetchhooks(repo, revs, match)
1505
1515
1506 # a list of (repo, revs, match) prefetch functions
1516 # a list of (repo, revs, match) prefetch functions
1507 fileprefetchhooks = util.hooks()
1517 fileprefetchhooks = util.hooks()
1508
1518
1509 # A marker that tells the evolve extension to suppress its own reporting
1519 # A marker that tells the evolve extension to suppress its own reporting
1510 _reportstroubledchangesets = True
1520 _reportstroubledchangesets = True
1511
1521
1512 def registersummarycallback(repo, otr, txnname=''):
1522 def registersummarycallback(repo, otr, txnname=''):
1513 """register a callback to issue a summary after the transaction is closed
1523 """register a callback to issue a summary after the transaction is closed
1514 """
1524 """
1515 def txmatch(sources):
1525 def txmatch(sources):
1516 return any(txnname.startswith(source) for source in sources)
1526 return any(txnname.startswith(source) for source in sources)
1517
1527
1518 categories = []
1528 categories = []
1519
1529
1520 def reportsummary(func):
1530 def reportsummary(func):
1521 """decorator for report callbacks."""
1531 """decorator for report callbacks."""
1522 # The repoview life cycle is shorter than the one of the actual
1532 # The repoview life cycle is shorter than the one of the actual
1523 # underlying repository. So the filtered object can die before the
1533 # underlying repository. So the filtered object can die before the
1524 # weakref is used leading to troubles. We keep a reference to the
1534 # weakref is used leading to troubles. We keep a reference to the
1525 # unfiltered object and restore the filtering when retrieving the
1535 # unfiltered object and restore the filtering when retrieving the
1526 # repository through the weakref.
1536 # repository through the weakref.
1527 filtername = repo.filtername
1537 filtername = repo.filtername
1528 reporef = weakref.ref(repo.unfiltered())
1538 reporef = weakref.ref(repo.unfiltered())
1529 def wrapped(tr):
1539 def wrapped(tr):
1530 repo = reporef()
1540 repo = reporef()
1531 if filtername:
1541 if filtername:
1532 repo = repo.filtered(filtername)
1542 repo = repo.filtered(filtername)
1533 func(repo, tr)
1543 func(repo, tr)
1534 newcat = '%02i-txnreport' % len(categories)
1544 newcat = '%02i-txnreport' % len(categories)
1535 otr.addpostclose(newcat, wrapped)
1545 otr.addpostclose(newcat, wrapped)
1536 categories.append(newcat)
1546 categories.append(newcat)
1537 return wrapped
1547 return wrapped
1538
1548
1539 if txmatch(_reportobsoletedsource):
1549 if txmatch(_reportobsoletedsource):
1540 @reportsummary
1550 @reportsummary
1541 def reportobsoleted(repo, tr):
1551 def reportobsoleted(repo, tr):
1542 obsoleted = obsutil.getobsoleted(repo, tr)
1552 obsoleted = obsutil.getobsoleted(repo, tr)
1543 if obsoleted:
1553 if obsoleted:
1544 repo.ui.status(_('obsoleted %i changesets\n')
1554 repo.ui.status(_('obsoleted %i changesets\n')
1545 % len(obsoleted))
1555 % len(obsoleted))
1546
1556
1547 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1557 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1548 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1558 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1549 instabilitytypes = [
1559 instabilitytypes = [
1550 ('orphan', 'orphan'),
1560 ('orphan', 'orphan'),
1551 ('phase-divergent', 'phasedivergent'),
1561 ('phase-divergent', 'phasedivergent'),
1552 ('content-divergent', 'contentdivergent'),
1562 ('content-divergent', 'contentdivergent'),
1553 ]
1563 ]
1554
1564
1555 def getinstabilitycounts(repo):
1565 def getinstabilitycounts(repo):
1556 filtered = repo.changelog.filteredrevs
1566 filtered = repo.changelog.filteredrevs
1557 counts = {}
1567 counts = {}
1558 for instability, revset in instabilitytypes:
1568 for instability, revset in instabilitytypes:
1559 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1569 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1560 filtered)
1570 filtered)
1561 return counts
1571 return counts
1562
1572
1563 oldinstabilitycounts = getinstabilitycounts(repo)
1573 oldinstabilitycounts = getinstabilitycounts(repo)
1564 @reportsummary
1574 @reportsummary
1565 def reportnewinstabilities(repo, tr):
1575 def reportnewinstabilities(repo, tr):
1566 newinstabilitycounts = getinstabilitycounts(repo)
1576 newinstabilitycounts = getinstabilitycounts(repo)
1567 for instability, revset in instabilitytypes:
1577 for instability, revset in instabilitytypes:
1568 delta = (newinstabilitycounts[instability] -
1578 delta = (newinstabilitycounts[instability] -
1569 oldinstabilitycounts[instability])
1579 oldinstabilitycounts[instability])
1570 msg = getinstabilitymessage(delta, instability)
1580 msg = getinstabilitymessage(delta, instability)
1571 if msg:
1581 if msg:
1572 repo.ui.warn(msg)
1582 repo.ui.warn(msg)
1573
1583
1574 if txmatch(_reportnewcssource):
1584 if txmatch(_reportnewcssource):
1575 @reportsummary
1585 @reportsummary
1576 def reportnewcs(repo, tr):
1586 def reportnewcs(repo, tr):
1577 """Report the range of new revisions pulled/unbundled."""
1587 """Report the range of new revisions pulled/unbundled."""
1578 newrevs = tr.changes.get('revs', pycompat.xrange(0, 0))
1588 newrevs = tr.changes.get('revs', pycompat.xrange(0, 0))
1579 if not newrevs:
1589 if not newrevs:
1580 return
1590 return
1581
1591
1582 # Compute the bounds of new revisions' range, excluding obsoletes.
1592 # Compute the bounds of new revisions' range, excluding obsoletes.
1583 unfi = repo.unfiltered()
1593 unfi = repo.unfiltered()
1584 revs = unfi.revs('%ld and not obsolete()', newrevs)
1594 revs = unfi.revs('%ld and not obsolete()', newrevs)
1585 if not revs:
1595 if not revs:
1586 # Got only obsoletes.
1596 # Got only obsoletes.
1587 return
1597 return
1588 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1598 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1589
1599
1590 if minrev == maxrev:
1600 if minrev == maxrev:
1591 revrange = minrev
1601 revrange = minrev
1592 else:
1602 else:
1593 revrange = '%s:%s' % (minrev, maxrev)
1603 revrange = '%s:%s' % (minrev, maxrev)
1594 repo.ui.status(_('new changesets %s\n') % revrange)
1604 repo.ui.status(_('new changesets %s\n') % revrange)
1595
1605
1596 @reportsummary
1606 @reportsummary
1597 def reportphasechanges(repo, tr):
1607 def reportphasechanges(repo, tr):
1598 """Report statistics of phase changes for changesets pre-existing
1608 """Report statistics of phase changes for changesets pre-existing
1599 pull/unbundle.
1609 pull/unbundle.
1600 """
1610 """
1601 newrevs = tr.changes.get('revs', pycompat.xrange(0, 0))
1611 newrevs = tr.changes.get('revs', pycompat.xrange(0, 0))
1602 phasetracking = tr.changes.get('phases', {})
1612 phasetracking = tr.changes.get('phases', {})
1603 if not phasetracking:
1613 if not phasetracking:
1604 return
1614 return
1605 published = [
1615 published = [
1606 rev for rev, (old, new) in phasetracking.iteritems()
1616 rev for rev, (old, new) in phasetracking.iteritems()
1607 if new == phases.public and rev not in newrevs
1617 if new == phases.public and rev not in newrevs
1608 ]
1618 ]
1609 if not published:
1619 if not published:
1610 return
1620 return
1611 repo.ui.status(_('%d local changesets published\n')
1621 repo.ui.status(_('%d local changesets published\n')
1612 % len(published))
1622 % len(published))
1613
1623
1614 def getinstabilitymessage(delta, instability):
1624 def getinstabilitymessage(delta, instability):
1615 """function to return the message to show warning about new instabilities
1625 """function to return the message to show warning about new instabilities
1616
1626
1617 exists as a separate function so that extension can wrap to show more
1627 exists as a separate function so that extension can wrap to show more
1618 information like how to fix instabilities"""
1628 information like how to fix instabilities"""
1619 if delta > 0:
1629 if delta > 0:
1620 return _('%i new %s changesets\n') % (delta, instability)
1630 return _('%i new %s changesets\n') % (delta, instability)
1621
1631
1622 def nodesummaries(repo, nodes, maxnumnodes=4):
1632 def nodesummaries(repo, nodes, maxnumnodes=4):
1623 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1633 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1624 return ' '.join(short(h) for h in nodes)
1634 return ' '.join(short(h) for h in nodes)
1625 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1635 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1626 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1636 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1627
1637
1628 def enforcesinglehead(repo, tr, desc):
1638 def enforcesinglehead(repo, tr, desc):
1629 """check that no named branch has multiple heads"""
1639 """check that no named branch has multiple heads"""
1630 if desc in ('strip', 'repair'):
1640 if desc in ('strip', 'repair'):
1631 # skip the logic during strip
1641 # skip the logic during strip
1632 return
1642 return
1633 visible = repo.filtered('visible')
1643 visible = repo.filtered('visible')
1634 # possible improvement: we could restrict the check to affected branch
1644 # possible improvement: we could restrict the check to affected branch
1635 for name, heads in visible.branchmap().iteritems():
1645 for name, heads in visible.branchmap().iteritems():
1636 if len(heads) > 1:
1646 if len(heads) > 1:
1637 msg = _('rejecting multiple heads on branch "%s"')
1647 msg = _('rejecting multiple heads on branch "%s"')
1638 msg %= name
1648 msg %= name
1639 hint = _('%d heads: %s')
1649 hint = _('%d heads: %s')
1640 hint %= (len(heads), nodesummaries(repo, heads))
1650 hint %= (len(heads), nodesummaries(repo, heads))
1641 raise error.Abort(msg, hint=hint)
1651 raise error.Abort(msg, hint=hint)
1642
1652
1643 def wrapconvertsink(sink):
1653 def wrapconvertsink(sink):
1644 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1654 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1645 before it is used, whether or not the convert extension was formally loaded.
1655 before it is used, whether or not the convert extension was formally loaded.
1646 """
1656 """
1647 return sink
1657 return sink
1648
1658
1649 def unhidehashlikerevs(repo, specs, hiddentype):
1659 def unhidehashlikerevs(repo, specs, hiddentype):
1650 """parse the user specs and unhide changesets whose hash or revision number
1660 """parse the user specs and unhide changesets whose hash or revision number
1651 is passed.
1661 is passed.
1652
1662
1653 hiddentype can be: 1) 'warn': warn while unhiding changesets
1663 hiddentype can be: 1) 'warn': warn while unhiding changesets
1654 2) 'nowarn': don't warn while unhiding changesets
1664 2) 'nowarn': don't warn while unhiding changesets
1655
1665
1656 returns a repo object with the required changesets unhidden
1666 returns a repo object with the required changesets unhidden
1657 """
1667 """
1658 if not repo.filtername or not repo.ui.configbool('experimental',
1668 if not repo.filtername or not repo.ui.configbool('experimental',
1659 'directaccess'):
1669 'directaccess'):
1660 return repo
1670 return repo
1661
1671
1662 if repo.filtername not in ('visible', 'visible-hidden'):
1672 if repo.filtername not in ('visible', 'visible-hidden'):
1663 return repo
1673 return repo
1664
1674
1665 symbols = set()
1675 symbols = set()
1666 for spec in specs:
1676 for spec in specs:
1667 try:
1677 try:
1668 tree = revsetlang.parse(spec)
1678 tree = revsetlang.parse(spec)
1669 except error.ParseError: # will be reported by scmutil.revrange()
1679 except error.ParseError: # will be reported by scmutil.revrange()
1670 continue
1680 continue
1671
1681
1672 symbols.update(revsetlang.gethashlikesymbols(tree))
1682 symbols.update(revsetlang.gethashlikesymbols(tree))
1673
1683
1674 if not symbols:
1684 if not symbols:
1675 return repo
1685 return repo
1676
1686
1677 revs = _getrevsfromsymbols(repo, symbols)
1687 revs = _getrevsfromsymbols(repo, symbols)
1678
1688
1679 if not revs:
1689 if not revs:
1680 return repo
1690 return repo
1681
1691
1682 if hiddentype == 'warn':
1692 if hiddentype == 'warn':
1683 unfi = repo.unfiltered()
1693 unfi = repo.unfiltered()
1684 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1694 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1685 repo.ui.warn(_("warning: accessing hidden changesets for write "
1695 repo.ui.warn(_("warning: accessing hidden changesets for write "
1686 "operation: %s\n") % revstr)
1696 "operation: %s\n") % revstr)
1687
1697
1688 # we have to use new filtername to separate branch/tags cache until we can
1698 # we have to use new filtername to separate branch/tags cache until we can
1689 # disbale these cache when revisions are dynamically pinned.
1699 # disbale these cache when revisions are dynamically pinned.
1690 return repo.filtered('visible-hidden', revs)
1700 return repo.filtered('visible-hidden', revs)
1691
1701
1692 def _getrevsfromsymbols(repo, symbols):
1702 def _getrevsfromsymbols(repo, symbols):
1693 """parse the list of symbols and returns a set of revision numbers of hidden
1703 """parse the list of symbols and returns a set of revision numbers of hidden
1694 changesets present in symbols"""
1704 changesets present in symbols"""
1695 revs = set()
1705 revs = set()
1696 unfi = repo.unfiltered()
1706 unfi = repo.unfiltered()
1697 unficl = unfi.changelog
1707 unficl = unfi.changelog
1698 cl = repo.changelog
1708 cl = repo.changelog
1699 tiprev = len(unficl)
1709 tiprev = len(unficl)
1700 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1710 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1701 for s in symbols:
1711 for s in symbols:
1702 try:
1712 try:
1703 n = int(s)
1713 n = int(s)
1704 if n <= tiprev:
1714 if n <= tiprev:
1705 if not allowrevnums:
1715 if not allowrevnums:
1706 continue
1716 continue
1707 else:
1717 else:
1708 if n not in cl:
1718 if n not in cl:
1709 revs.add(n)
1719 revs.add(n)
1710 continue
1720 continue
1711 except ValueError:
1721 except ValueError:
1712 pass
1722 pass
1713
1723
1714 try:
1724 try:
1715 s = resolvehexnodeidprefix(unfi, s)
1725 s = resolvehexnodeidprefix(unfi, s)
1716 except (error.LookupError, error.WdirUnsupported):
1726 except (error.LookupError, error.WdirUnsupported):
1717 s = None
1727 s = None
1718
1728
1719 if s is not None:
1729 if s is not None:
1720 rev = unficl.rev(s)
1730 rev = unficl.rev(s)
1721 if rev not in cl:
1731 if rev not in cl:
1722 revs.add(rev)
1732 revs.add(rev)
1723
1733
1724 return revs
1734 return revs
1725
1735
1726 def bookmarkrevs(repo, mark):
1736 def bookmarkrevs(repo, mark):
1727 """
1737 """
1728 Select revisions reachable by a given bookmark
1738 Select revisions reachable by a given bookmark
1729 """
1739 """
1730 return repo.revs("ancestors(bookmark(%s)) - "
1740 return repo.revs("ancestors(bookmark(%s)) - "
1731 "ancestors(head() and not bookmark(%s)) - "
1741 "ancestors(head() and not bookmark(%s)) - "
1732 "ancestors(bookmark() and not bookmark(%s))",
1742 "ancestors(bookmark() and not bookmark(%s))",
1733 mark, mark, mark)
1743 mark, mark, mark)
@@ -1,717 +1,718 b''
1 # templatefuncs.py - common template functions
1 # templatefuncs.py - common template functions
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 bin,
14 bin,
15 wdirid,
15 wdirid,
16 )
16 )
17 from . import (
17 from . import (
18 color,
18 color,
19 encoding,
19 encoding,
20 error,
20 error,
21 minirst,
21 minirst,
22 obsutil,
22 obsutil,
23 registrar,
23 registrar,
24 revset as revsetmod,
24 revset as revsetmod,
25 revsetlang,
25 revsetlang,
26 scmutil,
26 scmutil,
27 templatefilters,
27 templatefilters,
28 templatekw,
28 templatekw,
29 templateutil,
29 templateutil,
30 util,
30 util,
31 )
31 )
32 from .utils import (
32 from .utils import (
33 dateutil,
33 dateutil,
34 stringutil,
34 stringutil,
35 )
35 )
36
36
37 evalrawexp = templateutil.evalrawexp
37 evalrawexp = templateutil.evalrawexp
38 evalwrapped = templateutil.evalwrapped
38 evalwrapped = templateutil.evalwrapped
39 evalfuncarg = templateutil.evalfuncarg
39 evalfuncarg = templateutil.evalfuncarg
40 evalboolean = templateutil.evalboolean
40 evalboolean = templateutil.evalboolean
41 evaldate = templateutil.evaldate
41 evaldate = templateutil.evaldate
42 evalinteger = templateutil.evalinteger
42 evalinteger = templateutil.evalinteger
43 evalstring = templateutil.evalstring
43 evalstring = templateutil.evalstring
44 evalstringliteral = templateutil.evalstringliteral
44 evalstringliteral = templateutil.evalstringliteral
45
45
46 # dict of template built-in functions
46 # dict of template built-in functions
47 funcs = {}
47 funcs = {}
48 templatefunc = registrar.templatefunc(funcs)
48 templatefunc = registrar.templatefunc(funcs)
49
49
50 @templatefunc('date(date[, fmt])')
50 @templatefunc('date(date[, fmt])')
51 def date(context, mapping, args):
51 def date(context, mapping, args):
52 """Format a date. See :hg:`help dates` for formatting
52 """Format a date. See :hg:`help dates` for formatting
53 strings. The default is a Unix date format, including the timezone:
53 strings. The default is a Unix date format, including the timezone:
54 "Mon Sep 04 15:13:13 2006 0700"."""
54 "Mon Sep 04 15:13:13 2006 0700"."""
55 if not (1 <= len(args) <= 2):
55 if not (1 <= len(args) <= 2):
56 # i18n: "date" is a keyword
56 # i18n: "date" is a keyword
57 raise error.ParseError(_("date expects one or two arguments"))
57 raise error.ParseError(_("date expects one or two arguments"))
58
58
59 date = evaldate(context, mapping, args[0],
59 date = evaldate(context, mapping, args[0],
60 # i18n: "date" is a keyword
60 # i18n: "date" is a keyword
61 _("date expects a date information"))
61 _("date expects a date information"))
62 fmt = None
62 fmt = None
63 if len(args) == 2:
63 if len(args) == 2:
64 fmt = evalstring(context, mapping, args[1])
64 fmt = evalstring(context, mapping, args[1])
65 if fmt is None:
65 if fmt is None:
66 return dateutil.datestr(date)
66 return dateutil.datestr(date)
67 else:
67 else:
68 return dateutil.datestr(date, fmt)
68 return dateutil.datestr(date, fmt)
69
69
70 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
70 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
71 def dict_(context, mapping, args):
71 def dict_(context, mapping, args):
72 """Construct a dict from key-value pairs. A key may be omitted if
72 """Construct a dict from key-value pairs. A key may be omitted if
73 a value expression can provide an unambiguous name."""
73 a value expression can provide an unambiguous name."""
74 data = util.sortdict()
74 data = util.sortdict()
75
75
76 for v in args['args']:
76 for v in args['args']:
77 k = templateutil.findsymbolicname(v)
77 k = templateutil.findsymbolicname(v)
78 if not k:
78 if not k:
79 raise error.ParseError(_('dict key cannot be inferred'))
79 raise error.ParseError(_('dict key cannot be inferred'))
80 if k in data or k in args['kwargs']:
80 if k in data or k in args['kwargs']:
81 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
81 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
82 data[k] = evalfuncarg(context, mapping, v)
82 data[k] = evalfuncarg(context, mapping, v)
83
83
84 data.update((k, evalfuncarg(context, mapping, v))
84 data.update((k, evalfuncarg(context, mapping, v))
85 for k, v in args['kwargs'].iteritems())
85 for k, v in args['kwargs'].iteritems())
86 return templateutil.hybriddict(data)
86 return templateutil.hybriddict(data)
87
87
88 @templatefunc('diff([includepattern [, excludepattern]])', requires={'ctx'})
88 @templatefunc('diff([includepattern [, excludepattern]])', requires={'ctx'})
89 def diff(context, mapping, args):
89 def diff(context, mapping, args):
90 """Show a diff, optionally
90 """Show a diff, optionally
91 specifying files to include or exclude."""
91 specifying files to include or exclude."""
92 if len(args) > 2:
92 if len(args) > 2:
93 # i18n: "diff" is a keyword
93 # i18n: "diff" is a keyword
94 raise error.ParseError(_("diff expects zero, one, or two arguments"))
94 raise error.ParseError(_("diff expects zero, one, or two arguments"))
95
95
96 def getpatterns(i):
96 def getpatterns(i):
97 if i < len(args):
97 if i < len(args):
98 s = evalstring(context, mapping, args[i]).strip()
98 s = evalstring(context, mapping, args[i]).strip()
99 if s:
99 if s:
100 return [s]
100 return [s]
101 return []
101 return []
102
102
103 ctx = context.resource(mapping, 'ctx')
103 ctx = context.resource(mapping, 'ctx')
104 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
104 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
105
105
106 return ''.join(chunks)
106 return ''.join(chunks)
107
107
108 @templatefunc('extdata(source)', argspec='source', requires={'ctx', 'cache'})
108 @templatefunc('extdata(source)', argspec='source', requires={'ctx', 'cache'})
109 def extdata(context, mapping, args):
109 def extdata(context, mapping, args):
110 """Show a text read from the specified extdata source. (EXPERIMENTAL)"""
110 """Show a text read from the specified extdata source. (EXPERIMENTAL)"""
111 if 'source' not in args:
111 if 'source' not in args:
112 # i18n: "extdata" is a keyword
112 # i18n: "extdata" is a keyword
113 raise error.ParseError(_('extdata expects one argument'))
113 raise error.ParseError(_('extdata expects one argument'))
114
114
115 source = evalstring(context, mapping, args['source'])
115 source = evalstring(context, mapping, args['source'])
116 if not source:
116 if not source:
117 sym = templateutil.findsymbolicname(args['source'])
117 sym = templateutil.findsymbolicname(args['source'])
118 if sym:
118 if sym:
119 raise error.ParseError(_('empty data source specified'),
119 raise error.ParseError(_('empty data source specified'),
120 hint=_("did you mean extdata('%s')?") % sym)
120 hint=_("did you mean extdata('%s')?") % sym)
121 else:
121 else:
122 raise error.ParseError(_('empty data source specified'))
122 raise error.ParseError(_('empty data source specified'))
123 cache = context.resource(mapping, 'cache').setdefault('extdata', {})
123 cache = context.resource(mapping, 'cache').setdefault('extdata', {})
124 ctx = context.resource(mapping, 'ctx')
124 ctx = context.resource(mapping, 'ctx')
125 if source in cache:
125 if source in cache:
126 data = cache[source]
126 data = cache[source]
127 else:
127 else:
128 data = cache[source] = scmutil.extdatasource(ctx.repo(), source)
128 data = cache[source] = scmutil.extdatasource(ctx.repo(), source)
129 return data.get(ctx.rev(), '')
129 return data.get(ctx.rev(), '')
130
130
131 @templatefunc('files(pattern)', requires={'ctx'})
131 @templatefunc('files(pattern)', requires={'ctx'})
132 def files(context, mapping, args):
132 def files(context, mapping, args):
133 """All files of the current changeset matching the pattern. See
133 """All files of the current changeset matching the pattern. See
134 :hg:`help patterns`."""
134 :hg:`help patterns`."""
135 if not len(args) == 1:
135 if not len(args) == 1:
136 # i18n: "files" is a keyword
136 # i18n: "files" is a keyword
137 raise error.ParseError(_("files expects one argument"))
137 raise error.ParseError(_("files expects one argument"))
138
138
139 raw = evalstring(context, mapping, args[0])
139 raw = evalstring(context, mapping, args[0])
140 ctx = context.resource(mapping, 'ctx')
140 ctx = context.resource(mapping, 'ctx')
141 m = ctx.match([raw])
141 m = ctx.match([raw])
142 files = list(ctx.matches(m))
142 files = list(ctx.matches(m))
143 return templateutil.compatlist(context, mapping, "file", files)
143 return templateutil.compatlist(context, mapping, "file", files)
144
144
145 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
145 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
146 def fill(context, mapping, args):
146 def fill(context, mapping, args):
147 """Fill many
147 """Fill many
148 paragraphs with optional indentation. See the "fill" filter."""
148 paragraphs with optional indentation. See the "fill" filter."""
149 if not (1 <= len(args) <= 4):
149 if not (1 <= len(args) <= 4):
150 # i18n: "fill" is a keyword
150 # i18n: "fill" is a keyword
151 raise error.ParseError(_("fill expects one to four arguments"))
151 raise error.ParseError(_("fill expects one to four arguments"))
152
152
153 text = evalstring(context, mapping, args[0])
153 text = evalstring(context, mapping, args[0])
154 width = 76
154 width = 76
155 initindent = ''
155 initindent = ''
156 hangindent = ''
156 hangindent = ''
157 if 2 <= len(args) <= 4:
157 if 2 <= len(args) <= 4:
158 width = evalinteger(context, mapping, args[1],
158 width = evalinteger(context, mapping, args[1],
159 # i18n: "fill" is a keyword
159 # i18n: "fill" is a keyword
160 _("fill expects an integer width"))
160 _("fill expects an integer width"))
161 try:
161 try:
162 initindent = evalstring(context, mapping, args[2])
162 initindent = evalstring(context, mapping, args[2])
163 hangindent = evalstring(context, mapping, args[3])
163 hangindent = evalstring(context, mapping, args[3])
164 except IndexError:
164 except IndexError:
165 pass
165 pass
166
166
167 return templatefilters.fill(text, width, initindent, hangindent)
167 return templatefilters.fill(text, width, initindent, hangindent)
168
168
169 @templatefunc('filter(iterable[, expr])')
169 @templatefunc('filter(iterable[, expr])')
170 def filter_(context, mapping, args):
170 def filter_(context, mapping, args):
171 """Remove empty elements from a list or a dict. If expr specified, it's
171 """Remove empty elements from a list or a dict. If expr specified, it's
172 applied to each element to test emptiness."""
172 applied to each element to test emptiness."""
173 if not (1 <= len(args) <= 2):
173 if not (1 <= len(args) <= 2):
174 # i18n: "filter" is a keyword
174 # i18n: "filter" is a keyword
175 raise error.ParseError(_("filter expects one or two arguments"))
175 raise error.ParseError(_("filter expects one or two arguments"))
176 iterable = evalwrapped(context, mapping, args[0])
176 iterable = evalwrapped(context, mapping, args[0])
177 if len(args) == 1:
177 if len(args) == 1:
178 def select(w):
178 def select(w):
179 return w.tobool(context, mapping)
179 return w.tobool(context, mapping)
180 else:
180 else:
181 def select(w):
181 def select(w):
182 if not isinstance(w, templateutil.mappable):
182 if not isinstance(w, templateutil.mappable):
183 raise error.ParseError(_("not filterable by expression"))
183 raise error.ParseError(_("not filterable by expression"))
184 lm = context.overlaymap(mapping, w.tomap(context))
184 lm = context.overlaymap(mapping, w.tomap(context))
185 return evalboolean(context, lm, args[1])
185 return evalboolean(context, lm, args[1])
186 return iterable.filter(context, mapping, select)
186 return iterable.filter(context, mapping, select)
187
187
188 @templatefunc('formatnode(node)', requires={'ui'})
188 @templatefunc('formatnode(node)', requires={'ui'})
189 def formatnode(context, mapping, args):
189 def formatnode(context, mapping, args):
190 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
190 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
191 if len(args) != 1:
191 if len(args) != 1:
192 # i18n: "formatnode" is a keyword
192 # i18n: "formatnode" is a keyword
193 raise error.ParseError(_("formatnode expects one argument"))
193 raise error.ParseError(_("formatnode expects one argument"))
194
194
195 ui = context.resource(mapping, 'ui')
195 ui = context.resource(mapping, 'ui')
196 node = evalstring(context, mapping, args[0])
196 node = evalstring(context, mapping, args[0])
197 if ui.debugflag:
197 if ui.debugflag:
198 return node
198 return node
199 return templatefilters.short(node)
199 return templatefilters.short(node)
200
200
201 @templatefunc('mailmap(author)', requires={'repo', 'cache'})
201 @templatefunc('mailmap(author)', requires={'repo', 'cache'})
202 def mailmap(context, mapping, args):
202 def mailmap(context, mapping, args):
203 """Return the author, updated according to the value
203 """Return the author, updated according to the value
204 set in the .mailmap file"""
204 set in the .mailmap file"""
205 if len(args) != 1:
205 if len(args) != 1:
206 raise error.ParseError(_("mailmap expects one argument"))
206 raise error.ParseError(_("mailmap expects one argument"))
207
207
208 author = evalstring(context, mapping, args[0])
208 author = evalstring(context, mapping, args[0])
209
209
210 cache = context.resource(mapping, 'cache')
210 cache = context.resource(mapping, 'cache')
211 repo = context.resource(mapping, 'repo')
211 repo = context.resource(mapping, 'repo')
212
212
213 if 'mailmap' not in cache:
213 if 'mailmap' not in cache:
214 data = repo.wvfs.tryread('.mailmap')
214 data = repo.wvfs.tryread('.mailmap')
215 cache['mailmap'] = stringutil.parsemailmap(data)
215 cache['mailmap'] = stringutil.parsemailmap(data)
216
216
217 return stringutil.mapname(cache['mailmap'], author)
217 return stringutil.mapname(cache['mailmap'], author)
218
218
219 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
219 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
220 argspec='text width fillchar left')
220 argspec='text width fillchar left')
221 def pad(context, mapping, args):
221 def pad(context, mapping, args):
222 """Pad text with a
222 """Pad text with a
223 fill character."""
223 fill character."""
224 if 'text' not in args or 'width' not in args:
224 if 'text' not in args or 'width' not in args:
225 # i18n: "pad" is a keyword
225 # i18n: "pad" is a keyword
226 raise error.ParseError(_("pad() expects two to four arguments"))
226 raise error.ParseError(_("pad() expects two to four arguments"))
227
227
228 width = evalinteger(context, mapping, args['width'],
228 width = evalinteger(context, mapping, args['width'],
229 # i18n: "pad" is a keyword
229 # i18n: "pad" is a keyword
230 _("pad() expects an integer width"))
230 _("pad() expects an integer width"))
231
231
232 text = evalstring(context, mapping, args['text'])
232 text = evalstring(context, mapping, args['text'])
233
233
234 left = False
234 left = False
235 fillchar = ' '
235 fillchar = ' '
236 if 'fillchar' in args:
236 if 'fillchar' in args:
237 fillchar = evalstring(context, mapping, args['fillchar'])
237 fillchar = evalstring(context, mapping, args['fillchar'])
238 if len(color.stripeffects(fillchar)) != 1:
238 if len(color.stripeffects(fillchar)) != 1:
239 # i18n: "pad" is a keyword
239 # i18n: "pad" is a keyword
240 raise error.ParseError(_("pad() expects a single fill character"))
240 raise error.ParseError(_("pad() expects a single fill character"))
241 if 'left' in args:
241 if 'left' in args:
242 left = evalboolean(context, mapping, args['left'])
242 left = evalboolean(context, mapping, args['left'])
243
243
244 fillwidth = width - encoding.colwidth(color.stripeffects(text))
244 fillwidth = width - encoding.colwidth(color.stripeffects(text))
245 if fillwidth <= 0:
245 if fillwidth <= 0:
246 return text
246 return text
247 if left:
247 if left:
248 return fillchar * fillwidth + text
248 return fillchar * fillwidth + text
249 else:
249 else:
250 return text + fillchar * fillwidth
250 return text + fillchar * fillwidth
251
251
252 @templatefunc('indent(text, indentchars[, firstline])')
252 @templatefunc('indent(text, indentchars[, firstline])')
253 def indent(context, mapping, args):
253 def indent(context, mapping, args):
254 """Indents all non-empty lines
254 """Indents all non-empty lines
255 with the characters given in the indentchars string. An optional
255 with the characters given in the indentchars string. An optional
256 third parameter will override the indent for the first line only
256 third parameter will override the indent for the first line only
257 if present."""
257 if present."""
258 if not (2 <= len(args) <= 3):
258 if not (2 <= len(args) <= 3):
259 # i18n: "indent" is a keyword
259 # i18n: "indent" is a keyword
260 raise error.ParseError(_("indent() expects two or three arguments"))
260 raise error.ParseError(_("indent() expects two or three arguments"))
261
261
262 text = evalstring(context, mapping, args[0])
262 text = evalstring(context, mapping, args[0])
263 indent = evalstring(context, mapping, args[1])
263 indent = evalstring(context, mapping, args[1])
264
264
265 if len(args) == 3:
265 if len(args) == 3:
266 firstline = evalstring(context, mapping, args[2])
266 firstline = evalstring(context, mapping, args[2])
267 else:
267 else:
268 firstline = indent
268 firstline = indent
269
269
270 # the indent function doesn't indent the first line, so we do it here
270 # the indent function doesn't indent the first line, so we do it here
271 return templatefilters.indent(firstline + text, indent)
271 return templatefilters.indent(firstline + text, indent)
272
272
273 @templatefunc('get(dict, key)')
273 @templatefunc('get(dict, key)')
274 def get(context, mapping, args):
274 def get(context, mapping, args):
275 """Get an attribute/key from an object. Some keywords
275 """Get an attribute/key from an object. Some keywords
276 are complex types. This function allows you to obtain the value of an
276 are complex types. This function allows you to obtain the value of an
277 attribute on these types."""
277 attribute on these types."""
278 if len(args) != 2:
278 if len(args) != 2:
279 # i18n: "get" is a keyword
279 # i18n: "get" is a keyword
280 raise error.ParseError(_("get() expects two arguments"))
280 raise error.ParseError(_("get() expects two arguments"))
281
281
282 dictarg = evalwrapped(context, mapping, args[0])
282 dictarg = evalwrapped(context, mapping, args[0])
283 key = evalrawexp(context, mapping, args[1])
283 key = evalrawexp(context, mapping, args[1])
284 try:
284 try:
285 return dictarg.getmember(context, mapping, key)
285 return dictarg.getmember(context, mapping, key)
286 except error.ParseError as err:
286 except error.ParseError as err:
287 # i18n: "get" is a keyword
287 # i18n: "get" is a keyword
288 hint = _("get() expects a dict as first argument")
288 hint = _("get() expects a dict as first argument")
289 raise error.ParseError(bytes(err), hint=hint)
289 raise error.ParseError(bytes(err), hint=hint)
290
290
291 @templatefunc('if(expr, then[, else])')
291 @templatefunc('if(expr, then[, else])')
292 def if_(context, mapping, args):
292 def if_(context, mapping, args):
293 """Conditionally execute based on the result of
293 """Conditionally execute based on the result of
294 an expression."""
294 an expression."""
295 if not (2 <= len(args) <= 3):
295 if not (2 <= len(args) <= 3):
296 # i18n: "if" is a keyword
296 # i18n: "if" is a keyword
297 raise error.ParseError(_("if expects two or three arguments"))
297 raise error.ParseError(_("if expects two or three arguments"))
298
298
299 test = evalboolean(context, mapping, args[0])
299 test = evalboolean(context, mapping, args[0])
300 if test:
300 if test:
301 return evalrawexp(context, mapping, args[1])
301 return evalrawexp(context, mapping, args[1])
302 elif len(args) == 3:
302 elif len(args) == 3:
303 return evalrawexp(context, mapping, args[2])
303 return evalrawexp(context, mapping, args[2])
304
304
305 @templatefunc('ifcontains(needle, haystack, then[, else])')
305 @templatefunc('ifcontains(needle, haystack, then[, else])')
306 def ifcontains(context, mapping, args):
306 def ifcontains(context, mapping, args):
307 """Conditionally execute based
307 """Conditionally execute based
308 on whether the item "needle" is in "haystack"."""
308 on whether the item "needle" is in "haystack"."""
309 if not (3 <= len(args) <= 4):
309 if not (3 <= len(args) <= 4):
310 # i18n: "ifcontains" is a keyword
310 # i18n: "ifcontains" is a keyword
311 raise error.ParseError(_("ifcontains expects three or four arguments"))
311 raise error.ParseError(_("ifcontains expects three or four arguments"))
312
312
313 haystack = evalwrapped(context, mapping, args[1])
313 haystack = evalwrapped(context, mapping, args[1])
314 try:
314 try:
315 needle = evalrawexp(context, mapping, args[0])
315 needle = evalrawexp(context, mapping, args[0])
316 found = haystack.contains(context, mapping, needle)
316 found = haystack.contains(context, mapping, needle)
317 except error.ParseError:
317 except error.ParseError:
318 found = False
318 found = False
319
319
320 if found:
320 if found:
321 return evalrawexp(context, mapping, args[2])
321 return evalrawexp(context, mapping, args[2])
322 elif len(args) == 4:
322 elif len(args) == 4:
323 return evalrawexp(context, mapping, args[3])
323 return evalrawexp(context, mapping, args[3])
324
324
325 @templatefunc('ifeq(expr1, expr2, then[, else])')
325 @templatefunc('ifeq(expr1, expr2, then[, else])')
326 def ifeq(context, mapping, args):
326 def ifeq(context, mapping, args):
327 """Conditionally execute based on
327 """Conditionally execute based on
328 whether 2 items are equivalent."""
328 whether 2 items are equivalent."""
329 if not (3 <= len(args) <= 4):
329 if not (3 <= len(args) <= 4):
330 # i18n: "ifeq" is a keyword
330 # i18n: "ifeq" is a keyword
331 raise error.ParseError(_("ifeq expects three or four arguments"))
331 raise error.ParseError(_("ifeq expects three or four arguments"))
332
332
333 test = evalstring(context, mapping, args[0])
333 test = evalstring(context, mapping, args[0])
334 match = evalstring(context, mapping, args[1])
334 match = evalstring(context, mapping, args[1])
335 if test == match:
335 if test == match:
336 return evalrawexp(context, mapping, args[2])
336 return evalrawexp(context, mapping, args[2])
337 elif len(args) == 4:
337 elif len(args) == 4:
338 return evalrawexp(context, mapping, args[3])
338 return evalrawexp(context, mapping, args[3])
339
339
340 @templatefunc('join(list, sep)')
340 @templatefunc('join(list, sep)')
341 def join(context, mapping, args):
341 def join(context, mapping, args):
342 """Join items in a list with a delimiter."""
342 """Join items in a list with a delimiter."""
343 if not (1 <= len(args) <= 2):
343 if not (1 <= len(args) <= 2):
344 # i18n: "join" is a keyword
344 # i18n: "join" is a keyword
345 raise error.ParseError(_("join expects one or two arguments"))
345 raise error.ParseError(_("join expects one or two arguments"))
346
346
347 joinset = evalwrapped(context, mapping, args[0])
347 joinset = evalwrapped(context, mapping, args[0])
348 joiner = " "
348 joiner = " "
349 if len(args) > 1:
349 if len(args) > 1:
350 joiner = evalstring(context, mapping, args[1])
350 joiner = evalstring(context, mapping, args[1])
351 return joinset.join(context, mapping, joiner)
351 return joinset.join(context, mapping, joiner)
352
352
353 @templatefunc('label(label, expr)', requires={'ui'})
353 @templatefunc('label(label, expr)', requires={'ui'})
354 def label(context, mapping, args):
354 def label(context, mapping, args):
355 """Apply a label to generated content. Content with
355 """Apply a label to generated content. Content with
356 a label applied can result in additional post-processing, such as
356 a label applied can result in additional post-processing, such as
357 automatic colorization."""
357 automatic colorization."""
358 if len(args) != 2:
358 if len(args) != 2:
359 # i18n: "label" is a keyword
359 # i18n: "label" is a keyword
360 raise error.ParseError(_("label expects two arguments"))
360 raise error.ParseError(_("label expects two arguments"))
361
361
362 ui = context.resource(mapping, 'ui')
362 ui = context.resource(mapping, 'ui')
363 thing = evalstring(context, mapping, args[1])
363 thing = evalstring(context, mapping, args[1])
364 # preserve unknown symbol as literal so effects like 'red', 'bold',
364 # preserve unknown symbol as literal so effects like 'red', 'bold',
365 # etc. don't need to be quoted
365 # etc. don't need to be quoted
366 label = evalstringliteral(context, mapping, args[0])
366 label = evalstringliteral(context, mapping, args[0])
367
367
368 return ui.label(thing, label)
368 return ui.label(thing, label)
369
369
370 @templatefunc('latesttag([pattern])')
370 @templatefunc('latesttag([pattern])')
371 def latesttag(context, mapping, args):
371 def latesttag(context, mapping, args):
372 """The global tags matching the given pattern on the
372 """The global tags matching the given pattern on the
373 most recent globally tagged ancestor of this changeset.
373 most recent globally tagged ancestor of this changeset.
374 If no such tags exist, the "{tag}" template resolves to
374 If no such tags exist, the "{tag}" template resolves to
375 the string "null". See :hg:`help revisions.patterns` for the pattern
375 the string "null". See :hg:`help revisions.patterns` for the pattern
376 syntax.
376 syntax.
377 """
377 """
378 if len(args) > 1:
378 if len(args) > 1:
379 # i18n: "latesttag" is a keyword
379 # i18n: "latesttag" is a keyword
380 raise error.ParseError(_("latesttag expects at most one argument"))
380 raise error.ParseError(_("latesttag expects at most one argument"))
381
381
382 pattern = None
382 pattern = None
383 if len(args) == 1:
383 if len(args) == 1:
384 pattern = evalstring(context, mapping, args[0])
384 pattern = evalstring(context, mapping, args[0])
385 return templatekw.showlatesttags(context, mapping, pattern)
385 return templatekw.showlatesttags(context, mapping, pattern)
386
386
387 @templatefunc('localdate(date[, tz])')
387 @templatefunc('localdate(date[, tz])')
388 def localdate(context, mapping, args):
388 def localdate(context, mapping, args):
389 """Converts a date to the specified timezone.
389 """Converts a date to the specified timezone.
390 The default is local date."""
390 The default is local date."""
391 if not (1 <= len(args) <= 2):
391 if not (1 <= len(args) <= 2):
392 # i18n: "localdate" is a keyword
392 # i18n: "localdate" is a keyword
393 raise error.ParseError(_("localdate expects one or two arguments"))
393 raise error.ParseError(_("localdate expects one or two arguments"))
394
394
395 date = evaldate(context, mapping, args[0],
395 date = evaldate(context, mapping, args[0],
396 # i18n: "localdate" is a keyword
396 # i18n: "localdate" is a keyword
397 _("localdate expects a date information"))
397 _("localdate expects a date information"))
398 if len(args) >= 2:
398 if len(args) >= 2:
399 tzoffset = None
399 tzoffset = None
400 tz = evalfuncarg(context, mapping, args[1])
400 tz = evalfuncarg(context, mapping, args[1])
401 if isinstance(tz, bytes):
401 if isinstance(tz, bytes):
402 tzoffset, remainder = dateutil.parsetimezone(tz)
402 tzoffset, remainder = dateutil.parsetimezone(tz)
403 if remainder:
403 if remainder:
404 tzoffset = None
404 tzoffset = None
405 if tzoffset is None:
405 if tzoffset is None:
406 try:
406 try:
407 tzoffset = int(tz)
407 tzoffset = int(tz)
408 except (TypeError, ValueError):
408 except (TypeError, ValueError):
409 # i18n: "localdate" is a keyword
409 # i18n: "localdate" is a keyword
410 raise error.ParseError(_("localdate expects a timezone"))
410 raise error.ParseError(_("localdate expects a timezone"))
411 else:
411 else:
412 tzoffset = dateutil.makedate()[1]
412 tzoffset = dateutil.makedate()[1]
413 return templateutil.date((date[0], tzoffset))
413 return templateutil.date((date[0], tzoffset))
414
414
415 @templatefunc('max(iterable)')
415 @templatefunc('max(iterable)')
416 def max_(context, mapping, args, **kwargs):
416 def max_(context, mapping, args, **kwargs):
417 """Return the max of an iterable"""
417 """Return the max of an iterable"""
418 if len(args) != 1:
418 if len(args) != 1:
419 # i18n: "max" is a keyword
419 # i18n: "max" is a keyword
420 raise error.ParseError(_("max expects one argument"))
420 raise error.ParseError(_("max expects one argument"))
421
421
422 iterable = evalwrapped(context, mapping, args[0])
422 iterable = evalwrapped(context, mapping, args[0])
423 try:
423 try:
424 return iterable.getmax(context, mapping)
424 return iterable.getmax(context, mapping)
425 except error.ParseError as err:
425 except error.ParseError as err:
426 # i18n: "max" is a keyword
426 # i18n: "max" is a keyword
427 hint = _("max first argument should be an iterable")
427 hint = _("max first argument should be an iterable")
428 raise error.ParseError(bytes(err), hint=hint)
428 raise error.ParseError(bytes(err), hint=hint)
429
429
430 @templatefunc('min(iterable)')
430 @templatefunc('min(iterable)')
431 def min_(context, mapping, args, **kwargs):
431 def min_(context, mapping, args, **kwargs):
432 """Return the min of an iterable"""
432 """Return the min of an iterable"""
433 if len(args) != 1:
433 if len(args) != 1:
434 # i18n: "min" is a keyword
434 # i18n: "min" is a keyword
435 raise error.ParseError(_("min expects one argument"))
435 raise error.ParseError(_("min expects one argument"))
436
436
437 iterable = evalwrapped(context, mapping, args[0])
437 iterable = evalwrapped(context, mapping, args[0])
438 try:
438 try:
439 return iterable.getmin(context, mapping)
439 return iterable.getmin(context, mapping)
440 except error.ParseError as err:
440 except error.ParseError as err:
441 # i18n: "min" is a keyword
441 # i18n: "min" is a keyword
442 hint = _("min first argument should be an iterable")
442 hint = _("min first argument should be an iterable")
443 raise error.ParseError(bytes(err), hint=hint)
443 raise error.ParseError(bytes(err), hint=hint)
444
444
445 @templatefunc('mod(a, b)')
445 @templatefunc('mod(a, b)')
446 def mod(context, mapping, args):
446 def mod(context, mapping, args):
447 """Calculate a mod b such that a / b + a mod b == a"""
447 """Calculate a mod b such that a / b + a mod b == a"""
448 if not len(args) == 2:
448 if not len(args) == 2:
449 # i18n: "mod" is a keyword
449 # i18n: "mod" is a keyword
450 raise error.ParseError(_("mod expects two arguments"))
450 raise error.ParseError(_("mod expects two arguments"))
451
451
452 func = lambda a, b: a % b
452 func = lambda a, b: a % b
453 return templateutil.runarithmetic(context, mapping,
453 return templateutil.runarithmetic(context, mapping,
454 (func, args[0], args[1]))
454 (func, args[0], args[1]))
455
455
456 @templatefunc('obsfateoperations(markers)')
456 @templatefunc('obsfateoperations(markers)')
457 def obsfateoperations(context, mapping, args):
457 def obsfateoperations(context, mapping, args):
458 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
458 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
459 if len(args) != 1:
459 if len(args) != 1:
460 # i18n: "obsfateoperations" is a keyword
460 # i18n: "obsfateoperations" is a keyword
461 raise error.ParseError(_("obsfateoperations expects one argument"))
461 raise error.ParseError(_("obsfateoperations expects one argument"))
462
462
463 markers = evalfuncarg(context, mapping, args[0])
463 markers = evalfuncarg(context, mapping, args[0])
464
464
465 try:
465 try:
466 data = obsutil.markersoperations(markers)
466 data = obsutil.markersoperations(markers)
467 return templateutil.hybridlist(data, name='operation')
467 return templateutil.hybridlist(data, name='operation')
468 except (TypeError, KeyError):
468 except (TypeError, KeyError):
469 # i18n: "obsfateoperations" is a keyword
469 # i18n: "obsfateoperations" is a keyword
470 errmsg = _("obsfateoperations first argument should be an iterable")
470 errmsg = _("obsfateoperations first argument should be an iterable")
471 raise error.ParseError(errmsg)
471 raise error.ParseError(errmsg)
472
472
473 @templatefunc('obsfatedate(markers)')
473 @templatefunc('obsfatedate(markers)')
474 def obsfatedate(context, mapping, args):
474 def obsfatedate(context, mapping, args):
475 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
475 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
476 if len(args) != 1:
476 if len(args) != 1:
477 # i18n: "obsfatedate" is a keyword
477 # i18n: "obsfatedate" is a keyword
478 raise error.ParseError(_("obsfatedate expects one argument"))
478 raise error.ParseError(_("obsfatedate expects one argument"))
479
479
480 markers = evalfuncarg(context, mapping, args[0])
480 markers = evalfuncarg(context, mapping, args[0])
481
481
482 try:
482 try:
483 # TODO: maybe this has to be a wrapped list of date wrappers?
483 # TODO: maybe this has to be a wrapped list of date wrappers?
484 data = obsutil.markersdates(markers)
484 data = obsutil.markersdates(markers)
485 return templateutil.hybridlist(data, name='date', fmt='%d %d')
485 return templateutil.hybridlist(data, name='date', fmt='%d %d')
486 except (TypeError, KeyError):
486 except (TypeError, KeyError):
487 # i18n: "obsfatedate" is a keyword
487 # i18n: "obsfatedate" is a keyword
488 errmsg = _("obsfatedate first argument should be an iterable")
488 errmsg = _("obsfatedate first argument should be an iterable")
489 raise error.ParseError(errmsg)
489 raise error.ParseError(errmsg)
490
490
491 @templatefunc('obsfateusers(markers)')
491 @templatefunc('obsfateusers(markers)')
492 def obsfateusers(context, mapping, args):
492 def obsfateusers(context, mapping, args):
493 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
493 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
494 if len(args) != 1:
494 if len(args) != 1:
495 # i18n: "obsfateusers" is a keyword
495 # i18n: "obsfateusers" is a keyword
496 raise error.ParseError(_("obsfateusers expects one argument"))
496 raise error.ParseError(_("obsfateusers expects one argument"))
497
497
498 markers = evalfuncarg(context, mapping, args[0])
498 markers = evalfuncarg(context, mapping, args[0])
499
499
500 try:
500 try:
501 data = obsutil.markersusers(markers)
501 data = obsutil.markersusers(markers)
502 return templateutil.hybridlist(data, name='user')
502 return templateutil.hybridlist(data, name='user')
503 except (TypeError, KeyError, ValueError):
503 except (TypeError, KeyError, ValueError):
504 # i18n: "obsfateusers" is a keyword
504 # i18n: "obsfateusers" is a keyword
505 msg = _("obsfateusers first argument should be an iterable of "
505 msg = _("obsfateusers first argument should be an iterable of "
506 "obsmakers")
506 "obsmakers")
507 raise error.ParseError(msg)
507 raise error.ParseError(msg)
508
508
509 @templatefunc('obsfateverb(successors, markers)')
509 @templatefunc('obsfateverb(successors, markers)')
510 def obsfateverb(context, mapping, args):
510 def obsfateverb(context, mapping, args):
511 """Compute obsfate related information based on successors (EXPERIMENTAL)"""
511 """Compute obsfate related information based on successors (EXPERIMENTAL)"""
512 if len(args) != 2:
512 if len(args) != 2:
513 # i18n: "obsfateverb" is a keyword
513 # i18n: "obsfateverb" is a keyword
514 raise error.ParseError(_("obsfateverb expects two arguments"))
514 raise error.ParseError(_("obsfateverb expects two arguments"))
515
515
516 successors = evalfuncarg(context, mapping, args[0])
516 successors = evalfuncarg(context, mapping, args[0])
517 markers = evalfuncarg(context, mapping, args[1])
517 markers = evalfuncarg(context, mapping, args[1])
518
518
519 try:
519 try:
520 return obsutil.obsfateverb(successors, markers)
520 return obsutil.obsfateverb(successors, markers)
521 except TypeError:
521 except TypeError:
522 # i18n: "obsfateverb" is a keyword
522 # i18n: "obsfateverb" is a keyword
523 errmsg = _("obsfateverb first argument should be countable")
523 errmsg = _("obsfateverb first argument should be countable")
524 raise error.ParseError(errmsg)
524 raise error.ParseError(errmsg)
525
525
526 @templatefunc('relpath(path)', requires={'repo'})
526 @templatefunc('relpath(path)', requires={'repo'})
527 def relpath(context, mapping, args):
527 def relpath(context, mapping, args):
528 """Convert a repository-absolute path into a filesystem path relative to
528 """Convert a repository-absolute path into a filesystem path relative to
529 the current working directory."""
529 the current working directory."""
530 if len(args) != 1:
530 if len(args) != 1:
531 # i18n: "relpath" is a keyword
531 # i18n: "relpath" is a keyword
532 raise error.ParseError(_("relpath expects one argument"))
532 raise error.ParseError(_("relpath expects one argument"))
533
533
534 repo = context.resource(mapping, 'repo')
534 repo = context.resource(mapping, 'repo')
535 path = evalstring(context, mapping, args[0])
535 path = evalstring(context, mapping, args[0])
536 return repo.pathto(path)
536 return repo.pathto(path)
537
537
538 @templatefunc('revset(query[, formatargs...])', requires={'repo', 'cache'})
538 @templatefunc('revset(query[, formatargs...])', requires={'repo', 'cache'})
539 def revset(context, mapping, args):
539 def revset(context, mapping, args):
540 """Execute a revision set query. See
540 """Execute a revision set query. See
541 :hg:`help revset`."""
541 :hg:`help revset`."""
542 if not len(args) > 0:
542 if not len(args) > 0:
543 # i18n: "revset" is a keyword
543 # i18n: "revset" is a keyword
544 raise error.ParseError(_("revset expects one or more arguments"))
544 raise error.ParseError(_("revset expects one or more arguments"))
545
545
546 raw = evalstring(context, mapping, args[0])
546 raw = evalstring(context, mapping, args[0])
547 repo = context.resource(mapping, 'repo')
547 repo = context.resource(mapping, 'repo')
548
548
549 def query(expr):
549 def query(expr):
550 m = revsetmod.match(repo.ui, expr, lookup=revsetmod.lookupfn(repo))
550 m = revsetmod.match(repo.ui, expr, lookup=revsetmod.lookupfn(repo))
551 return m(repo)
551 return m(repo)
552
552
553 if len(args) > 1:
553 if len(args) > 1:
554 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
554 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
555 revs = query(revsetlang.formatspec(raw, *formatargs))
555 revs = query(revsetlang.formatspec(raw, *formatargs))
556 revs = list(revs)
556 revs = list(revs)
557 else:
557 else:
558 cache = context.resource(mapping, 'cache')
558 cache = context.resource(mapping, 'cache')
559 revsetcache = cache.setdefault("revsetcache", {})
559 revsetcache = cache.setdefault("revsetcache", {})
560 if raw in revsetcache:
560 if raw in revsetcache:
561 revs = revsetcache[raw]
561 revs = revsetcache[raw]
562 else:
562 else:
563 revs = query(raw)
563 revs = query(raw)
564 revs = list(revs)
564 revs = list(revs)
565 revsetcache[raw] = revs
565 revsetcache[raw] = revs
566 return templatekw.showrevslist(context, mapping, "revision", revs)
566 return templatekw.showrevslist(context, mapping, "revision", revs)
567
567
568 @templatefunc('rstdoc(text, style)')
568 @templatefunc('rstdoc(text, style)')
569 def rstdoc(context, mapping, args):
569 def rstdoc(context, mapping, args):
570 """Format reStructuredText."""
570 """Format reStructuredText."""
571 if len(args) != 2:
571 if len(args) != 2:
572 # i18n: "rstdoc" is a keyword
572 # i18n: "rstdoc" is a keyword
573 raise error.ParseError(_("rstdoc expects two arguments"))
573 raise error.ParseError(_("rstdoc expects two arguments"))
574
574
575 text = evalstring(context, mapping, args[0])
575 text = evalstring(context, mapping, args[0])
576 style = evalstring(context, mapping, args[1])
576 style = evalstring(context, mapping, args[1])
577
577
578 return minirst.format(text, style=style, keep=['verbose'])[0]
578 return minirst.format(text, style=style, keep=['verbose'])[0]
579
579
580 @templatefunc('separate(sep, args...)', argspec='sep *args')
580 @templatefunc('separate(sep, args...)', argspec='sep *args')
581 def separate(context, mapping, args):
581 def separate(context, mapping, args):
582 """Add a separator between non-empty arguments."""
582 """Add a separator between non-empty arguments."""
583 if 'sep' not in args:
583 if 'sep' not in args:
584 # i18n: "separate" is a keyword
584 # i18n: "separate" is a keyword
585 raise error.ParseError(_("separate expects at least one argument"))
585 raise error.ParseError(_("separate expects at least one argument"))
586
586
587 sep = evalstring(context, mapping, args['sep'])
587 sep = evalstring(context, mapping, args['sep'])
588 first = True
588 first = True
589 for arg in args['args']:
589 for arg in args['args']:
590 argstr = evalstring(context, mapping, arg)
590 argstr = evalstring(context, mapping, arg)
591 if not argstr:
591 if not argstr:
592 continue
592 continue
593 if first:
593 if first:
594 first = False
594 first = False
595 else:
595 else:
596 yield sep
596 yield sep
597 yield argstr
597 yield argstr
598
598
599 @templatefunc('shortest(node, minlength=4)', requires={'repo'})
599 @templatefunc('shortest(node, minlength=4)', requires={'repo', 'cache'})
600 def shortest(context, mapping, args):
600 def shortest(context, mapping, args):
601 """Obtain the shortest representation of
601 """Obtain the shortest representation of
602 a node."""
602 a node."""
603 if not (1 <= len(args) <= 2):
603 if not (1 <= len(args) <= 2):
604 # i18n: "shortest" is a keyword
604 # i18n: "shortest" is a keyword
605 raise error.ParseError(_("shortest() expects one or two arguments"))
605 raise error.ParseError(_("shortest() expects one or two arguments"))
606
606
607 hexnode = evalstring(context, mapping, args[0])
607 hexnode = evalstring(context, mapping, args[0])
608
608
609 minlength = 4
609 minlength = 4
610 if len(args) > 1:
610 if len(args) > 1:
611 minlength = evalinteger(context, mapping, args[1],
611 minlength = evalinteger(context, mapping, args[1],
612 # i18n: "shortest" is a keyword
612 # i18n: "shortest" is a keyword
613 _("shortest() expects an integer minlength"))
613 _("shortest() expects an integer minlength"))
614
614
615 repo = context.resource(mapping, 'repo')
615 repo = context.resource(mapping, 'repo')
616 if len(hexnode) > 40:
616 if len(hexnode) > 40:
617 return hexnode
617 return hexnode
618 elif len(hexnode) == 40:
618 elif len(hexnode) == 40:
619 try:
619 try:
620 node = bin(hexnode)
620 node = bin(hexnode)
621 except TypeError:
621 except TypeError:
622 return hexnode
622 return hexnode
623 else:
623 else:
624 try:
624 try:
625 node = scmutil.resolvehexnodeidprefix(repo, hexnode)
625 node = scmutil.resolvehexnodeidprefix(repo, hexnode)
626 except error.WdirUnsupported:
626 except error.WdirUnsupported:
627 node = wdirid
627 node = wdirid
628 except error.LookupError:
628 except error.LookupError:
629 return hexnode
629 return hexnode
630 if not node:
630 if not node:
631 return hexnode
631 return hexnode
632 cache = context.resource(mapping, 'cache')
632 try:
633 try:
633 return scmutil.shortesthexnodeidprefix(repo, node, minlength)
634 return scmutil.shortesthexnodeidprefix(repo, node, minlength, cache)
634 except error.RepoLookupError:
635 except error.RepoLookupError:
635 return hexnode
636 return hexnode
636
637
637 @templatefunc('strip(text[, chars])')
638 @templatefunc('strip(text[, chars])')
638 def strip(context, mapping, args):
639 def strip(context, mapping, args):
639 """Strip characters from a string. By default,
640 """Strip characters from a string. By default,
640 strips all leading and trailing whitespace."""
641 strips all leading and trailing whitespace."""
641 if not (1 <= len(args) <= 2):
642 if not (1 <= len(args) <= 2):
642 # i18n: "strip" is a keyword
643 # i18n: "strip" is a keyword
643 raise error.ParseError(_("strip expects one or two arguments"))
644 raise error.ParseError(_("strip expects one or two arguments"))
644
645
645 text = evalstring(context, mapping, args[0])
646 text = evalstring(context, mapping, args[0])
646 if len(args) == 2:
647 if len(args) == 2:
647 chars = evalstring(context, mapping, args[1])
648 chars = evalstring(context, mapping, args[1])
648 return text.strip(chars)
649 return text.strip(chars)
649 return text.strip()
650 return text.strip()
650
651
651 @templatefunc('sub(pattern, replacement, expression)')
652 @templatefunc('sub(pattern, replacement, expression)')
652 def sub(context, mapping, args):
653 def sub(context, mapping, args):
653 """Perform text substitution
654 """Perform text substitution
654 using regular expressions."""
655 using regular expressions."""
655 if len(args) != 3:
656 if len(args) != 3:
656 # i18n: "sub" is a keyword
657 # i18n: "sub" is a keyword
657 raise error.ParseError(_("sub expects three arguments"))
658 raise error.ParseError(_("sub expects three arguments"))
658
659
659 pat = evalstring(context, mapping, args[0])
660 pat = evalstring(context, mapping, args[0])
660 rpl = evalstring(context, mapping, args[1])
661 rpl = evalstring(context, mapping, args[1])
661 src = evalstring(context, mapping, args[2])
662 src = evalstring(context, mapping, args[2])
662 try:
663 try:
663 patre = re.compile(pat)
664 patre = re.compile(pat)
664 except re.error:
665 except re.error:
665 # i18n: "sub" is a keyword
666 # i18n: "sub" is a keyword
666 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
667 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
667 try:
668 try:
668 yield patre.sub(rpl, src)
669 yield patre.sub(rpl, src)
669 except re.error:
670 except re.error:
670 # i18n: "sub" is a keyword
671 # i18n: "sub" is a keyword
671 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
672 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
672
673
673 @templatefunc('startswith(pattern, text)')
674 @templatefunc('startswith(pattern, text)')
674 def startswith(context, mapping, args):
675 def startswith(context, mapping, args):
675 """Returns the value from the "text" argument
676 """Returns the value from the "text" argument
676 if it begins with the content from the "pattern" argument."""
677 if it begins with the content from the "pattern" argument."""
677 if len(args) != 2:
678 if len(args) != 2:
678 # i18n: "startswith" is a keyword
679 # i18n: "startswith" is a keyword
679 raise error.ParseError(_("startswith expects two arguments"))
680 raise error.ParseError(_("startswith expects two arguments"))
680
681
681 patn = evalstring(context, mapping, args[0])
682 patn = evalstring(context, mapping, args[0])
682 text = evalstring(context, mapping, args[1])
683 text = evalstring(context, mapping, args[1])
683 if text.startswith(patn):
684 if text.startswith(patn):
684 return text
685 return text
685 return ''
686 return ''
686
687
687 @templatefunc('word(number, text[, separator])')
688 @templatefunc('word(number, text[, separator])')
688 def word(context, mapping, args):
689 def word(context, mapping, args):
689 """Return the nth word from a string."""
690 """Return the nth word from a string."""
690 if not (2 <= len(args) <= 3):
691 if not (2 <= len(args) <= 3):
691 # i18n: "word" is a keyword
692 # i18n: "word" is a keyword
692 raise error.ParseError(_("word expects two or three arguments, got %d")
693 raise error.ParseError(_("word expects two or three arguments, got %d")
693 % len(args))
694 % len(args))
694
695
695 num = evalinteger(context, mapping, args[0],
696 num = evalinteger(context, mapping, args[0],
696 # i18n: "word" is a keyword
697 # i18n: "word" is a keyword
697 _("word expects an integer index"))
698 _("word expects an integer index"))
698 text = evalstring(context, mapping, args[1])
699 text = evalstring(context, mapping, args[1])
699 if len(args) == 3:
700 if len(args) == 3:
700 splitter = evalstring(context, mapping, args[2])
701 splitter = evalstring(context, mapping, args[2])
701 else:
702 else:
702 splitter = None
703 splitter = None
703
704
704 tokens = text.split(splitter)
705 tokens = text.split(splitter)
705 if num >= len(tokens) or num < -len(tokens):
706 if num >= len(tokens) or num < -len(tokens):
706 return ''
707 return ''
707 else:
708 else:
708 return tokens[num]
709 return tokens[num]
709
710
710 def loadfunction(ui, extname, registrarobj):
711 def loadfunction(ui, extname, registrarobj):
711 """Load template function from specified registrarobj
712 """Load template function from specified registrarobj
712 """
713 """
713 for name, func in registrarobj._table.iteritems():
714 for name, func in registrarobj._table.iteritems():
714 funcs[name] = func
715 funcs[name] = func
715
716
716 # tell hggettext to extract docstrings from these functions:
717 # tell hggettext to extract docstrings from these functions:
717 i18nfunctions = funcs.values()
718 i18nfunctions = funcs.values()
General Comments 0
You need to be logged in to leave comments. Login now