Show More
@@ -1,800 +1,804 b'' | |||
|
1 | 1 | # hgweb/webutil.py - utility library for the web interface. |
|
2 | 2 | # |
|
3 | 3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
4 | 4 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | import copy |
|
12 | 12 | import difflib |
|
13 | 13 | import os |
|
14 | 14 | import re |
|
15 | 15 | |
|
16 | 16 | from ..i18n import _ |
|
17 | 17 | from ..node import hex, nullid, short |
|
18 | 18 | |
|
19 | 19 | from .common import ( |
|
20 | 20 | ErrorResponse, |
|
21 | 21 | HTTP_BAD_REQUEST, |
|
22 | 22 | HTTP_NOT_FOUND, |
|
23 | 23 | paritygen, |
|
24 | 24 | ) |
|
25 | 25 | |
|
26 | 26 | from .. import ( |
|
27 | 27 | context, |
|
28 | 28 | error, |
|
29 | 29 | match, |
|
30 | 30 | mdiff, |
|
31 | 31 | obsutil, |
|
32 | 32 | patch, |
|
33 | 33 | pathutil, |
|
34 | 34 | pycompat, |
|
35 | 35 | scmutil, |
|
36 | 36 | templatefilters, |
|
37 | 37 | templatekw, |
|
38 | 38 | templateutil, |
|
39 | 39 | ui as uimod, |
|
40 | 40 | util, |
|
41 | 41 | ) |
|
42 | 42 | |
|
43 | 43 | from ..utils import ( |
|
44 | 44 | stringutil, |
|
45 | 45 | ) |
|
46 | 46 | |
|
47 | 47 | archivespecs = util.sortdict(( |
|
48 | 48 | ('zip', ('application/zip', 'zip', '.zip', None)), |
|
49 | 49 | ('gz', ('application/x-gzip', 'tgz', '.tar.gz', None)), |
|
50 | 50 | ('bz2', ('application/x-bzip2', 'tbz2', '.tar.bz2', None)), |
|
51 | 51 | )) |
|
52 | 52 | |
|
53 | 53 | def archivelist(ui, nodeid, url=None): |
|
54 | 54 | allowed = ui.configlist('web', 'allow-archive', untrusted=True) |
|
55 | 55 | archives = [] |
|
56 | 56 | |
|
57 | 57 | for typ, spec in archivespecs.iteritems(): |
|
58 | 58 | if typ in allowed or ui.configbool('web', 'allow' + typ, |
|
59 | 59 | untrusted=True): |
|
60 | 60 | archives.append({ |
|
61 | 61 | 'type': typ, |
|
62 | 62 | 'extension': spec[2], |
|
63 | 63 | 'node': nodeid, |
|
64 | 64 | 'url': url, |
|
65 | 65 | }) |
|
66 | 66 | |
|
67 | 67 | return templateutil.mappinglist(archives) |
|
68 | 68 | |
|
69 | 69 | def up(p): |
|
70 | 70 | if p[0:1] != "/": |
|
71 | 71 | p = "/" + p |
|
72 | 72 | if p[-1:] == "/": |
|
73 | 73 | p = p[:-1] |
|
74 | 74 | up = os.path.dirname(p) |
|
75 | 75 | if up == "/": |
|
76 | 76 | return "/" |
|
77 | 77 | return up + "/" |
|
78 | 78 | |
|
79 | 79 | def _navseq(step, firststep=None): |
|
80 | 80 | if firststep: |
|
81 | 81 | yield firststep |
|
82 | 82 | if firststep >= 20 and firststep <= 40: |
|
83 | 83 | firststep = 50 |
|
84 | 84 | yield firststep |
|
85 | 85 | assert step > 0 |
|
86 | 86 | assert firststep > 0 |
|
87 | 87 | while step <= firststep: |
|
88 | 88 | step *= 10 |
|
89 | 89 | while True: |
|
90 | 90 | yield 1 * step |
|
91 | 91 | yield 3 * step |
|
92 | 92 | step *= 10 |
|
93 | 93 | |
|
94 | 94 | class revnav(object): |
|
95 | 95 | |
|
96 | 96 | def __init__(self, repo): |
|
97 | 97 | """Navigation generation object |
|
98 | 98 | |
|
99 | 99 | :repo: repo object we generate nav for |
|
100 | 100 | """ |
|
101 | 101 | # used for hex generation |
|
102 | 102 | self._revlog = repo.changelog |
|
103 | 103 | |
|
104 | 104 | def __nonzero__(self): |
|
105 | 105 | """return True if any revision to navigate over""" |
|
106 | 106 | return self._first() is not None |
|
107 | 107 | |
|
108 | 108 | __bool__ = __nonzero__ |
|
109 | 109 | |
|
110 | 110 | def _first(self): |
|
111 | 111 | """return the minimum non-filtered changeset or None""" |
|
112 | 112 | try: |
|
113 | 113 | return next(iter(self._revlog)) |
|
114 | 114 | except StopIteration: |
|
115 | 115 | return None |
|
116 | 116 | |
|
117 | 117 | def hex(self, rev): |
|
118 | 118 | return hex(self._revlog.node(rev)) |
|
119 | 119 | |
|
120 | 120 | def gen(self, pos, pagelen, limit): |
|
121 | 121 | """computes label and revision id for navigation link |
|
122 | 122 | |
|
123 | 123 | :pos: is the revision relative to which we generate navigation. |
|
124 | 124 | :pagelen: the size of each navigation page |
|
125 | 125 | :limit: how far shall we link |
|
126 | 126 | |
|
127 | 127 | The return is: |
|
128 | 128 | - a single element mappinglist |
|
129 | 129 | - containing a dictionary with a `before` and `after` key |
|
130 | 130 | - values are dictionaries with `label` and `node` keys |
|
131 | 131 | """ |
|
132 | 132 | if not self: |
|
133 | 133 | # empty repo |
|
134 | 134 | return templateutil.mappinglist([ |
|
135 | 135 | {'before': templateutil.mappinglist([]), |
|
136 | 136 | 'after': templateutil.mappinglist([])}, |
|
137 | 137 | ]) |
|
138 | 138 | |
|
139 | 139 | targets = [] |
|
140 | 140 | for f in _navseq(1, pagelen): |
|
141 | 141 | if f > limit: |
|
142 | 142 | break |
|
143 | 143 | targets.append(pos + f) |
|
144 | 144 | targets.append(pos - f) |
|
145 | 145 | targets.sort() |
|
146 | 146 | |
|
147 | 147 | first = self._first() |
|
148 | 148 | navbefore = [{'label': '(%i)' % first, 'node': self.hex(first)}] |
|
149 | 149 | navafter = [] |
|
150 | 150 | for rev in targets: |
|
151 | 151 | if rev not in self._revlog: |
|
152 | 152 | continue |
|
153 | 153 | if pos < rev < limit: |
|
154 | 154 | navafter.append({'label': '+%d' % abs(rev - pos), |
|
155 | 155 | 'node': self.hex(rev)}) |
|
156 | 156 | if 0 < rev < pos: |
|
157 | 157 | navbefore.append({'label': '-%d' % abs(rev - pos), |
|
158 | 158 | 'node': self.hex(rev)}) |
|
159 | 159 | |
|
160 | 160 | navafter.append({'label': 'tip', 'node': 'tip'}) |
|
161 | 161 | |
|
162 | 162 | # TODO: maybe this can be a scalar object supporting tomap() |
|
163 | 163 | return templateutil.mappinglist([ |
|
164 | 164 | {'before': templateutil.mappinglist(navbefore), |
|
165 | 165 | 'after': templateutil.mappinglist(navafter)}, |
|
166 | 166 | ]) |
|
167 | 167 | |
|
168 | 168 | class filerevnav(revnav): |
|
169 | 169 | |
|
170 | 170 | def __init__(self, repo, path): |
|
171 | 171 | """Navigation generation object |
|
172 | 172 | |
|
173 | 173 | :repo: repo object we generate nav for |
|
174 | 174 | :path: path of the file we generate nav for |
|
175 | 175 | """ |
|
176 | 176 | # used for iteration |
|
177 | 177 | self._changelog = repo.unfiltered().changelog |
|
178 | 178 | # used for hex generation |
|
179 | 179 | self._revlog = repo.file(path) |
|
180 | 180 | |
|
181 | 181 | def hex(self, rev): |
|
182 | 182 | return hex(self._changelog.node(self._revlog.linkrev(rev))) |
|
183 | 183 | |
|
184 | 184 | # TODO: maybe this can be a wrapper class for changectx/filectx list, which |
|
185 | 185 | # yields {'ctx': ctx} |
|
186 | 186 | def _ctxsgen(context, ctxs): |
|
187 | 187 | for s in ctxs: |
|
188 | 188 | d = { |
|
189 | 189 | 'node': s.hex(), |
|
190 | 190 | 'rev': s.rev(), |
|
191 | 191 | 'user': s.user(), |
|
192 | 192 | 'date': s.date(), |
|
193 | 193 | 'description': s.description(), |
|
194 | 194 | 'branch': s.branch(), |
|
195 | 195 | } |
|
196 | 196 | if util.safehasattr(s, 'path'): |
|
197 | 197 | d['file'] = s.path() |
|
198 | 198 | yield d |
|
199 | 199 | |
|
200 | 200 | def _siblings(siblings=None, hiderev=None): |
|
201 | 201 | if siblings is None: |
|
202 | 202 | siblings = [] |
|
203 | 203 | siblings = [s for s in siblings if s.node() != nullid] |
|
204 | 204 | if len(siblings) == 1 and siblings[0].rev() == hiderev: |
|
205 | 205 | siblings = [] |
|
206 | 206 | return templateutil.mappinggenerator(_ctxsgen, args=(siblings,)) |
|
207 | 207 | |
|
208 | 208 | def difffeatureopts(req, ui, section): |
|
209 | 209 | diffopts = patch.difffeatureopts(ui, untrusted=True, |
|
210 | 210 | section=section, whitespace=True) |
|
211 | 211 | |
|
212 | 212 | for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'): |
|
213 | 213 | v = req.qsparams.get(k) |
|
214 | 214 | if v is not None: |
|
215 | 215 | v = stringutil.parsebool(v) |
|
216 | 216 | setattr(diffopts, k, v if v is not None else True) |
|
217 | 217 | |
|
218 | 218 | return diffopts |
|
219 | 219 | |
|
220 | 220 | def annotate(req, fctx, ui): |
|
221 | 221 | diffopts = difffeatureopts(req, ui, 'annotate') |
|
222 | 222 | return fctx.annotate(follow=True, diffopts=diffopts) |
|
223 | 223 | |
|
224 | 224 | def parents(ctx, hide=None): |
|
225 | 225 | if isinstance(ctx, context.basefilectx): |
|
226 | 226 | introrev = ctx.introrev() |
|
227 | 227 | if ctx.changectx().rev() != introrev: |
|
228 | 228 | return _siblings([ctx.repo()[introrev]], hide) |
|
229 | 229 | return _siblings(ctx.parents(), hide) |
|
230 | 230 | |
|
231 | 231 | def children(ctx, hide=None): |
|
232 | 232 | return _siblings(ctx.children(), hide) |
|
233 | 233 | |
|
234 | 234 | def renamelink(fctx): |
|
235 | 235 | r = fctx.renamed() |
|
236 | 236 | if r: |
|
237 | 237 | return templateutil.mappinglist([{'file': r[0], 'node': hex(r[1])}]) |
|
238 | 238 | return templateutil.mappinglist([]) |
|
239 | 239 | |
|
240 | 240 | def nodetagsdict(repo, node): |
|
241 | 241 | return templateutil.hybridlist(repo.nodetags(node), name='name') |
|
242 | 242 | |
|
243 | 243 | def nodebookmarksdict(repo, node): |
|
244 | 244 | return templateutil.hybridlist(repo.nodebookmarks(node), name='name') |
|
245 | 245 | |
|
246 | 246 | def nodebranchdict(repo, ctx): |
|
247 | 247 | branches = [] |
|
248 | 248 | branch = ctx.branch() |
|
249 | 249 | # If this is an empty repo, ctx.node() == nullid, |
|
250 | 250 | # ctx.branch() == 'default'. |
|
251 | 251 | try: |
|
252 | 252 | branchnode = repo.branchtip(branch) |
|
253 | 253 | except error.RepoLookupError: |
|
254 | 254 | branchnode = None |
|
255 | 255 | if branchnode == ctx.node(): |
|
256 | 256 | branches.append(branch) |
|
257 | 257 | return templateutil.hybridlist(branches, name='name') |
|
258 | 258 | |
|
259 | 259 | def nodeinbranch(repo, ctx): |
|
260 | 260 | branches = [] |
|
261 | 261 | branch = ctx.branch() |
|
262 | 262 | try: |
|
263 | 263 | branchnode = repo.branchtip(branch) |
|
264 | 264 | except error.RepoLookupError: |
|
265 | 265 | branchnode = None |
|
266 | 266 | if branch != 'default' and branchnode != ctx.node(): |
|
267 | 267 | branches.append(branch) |
|
268 | 268 | return templateutil.hybridlist(branches, name='name') |
|
269 | 269 | |
|
270 | 270 | def nodebranchnodefault(ctx): |
|
271 | 271 | branches = [] |
|
272 | 272 | branch = ctx.branch() |
|
273 | 273 | if branch != 'default': |
|
274 | 274 | branches.append(branch) |
|
275 | 275 | return templateutil.hybridlist(branches, name='name') |
|
276 | 276 | |
|
277 | 277 | def _nodenamesgen(context, f, node, name): |
|
278 | 278 | for t in f(node): |
|
279 | 279 | yield {name: t} |
|
280 | 280 | |
|
281 | 281 | def showtag(repo, t1, node=nullid): |
|
282 | 282 | args = (repo.nodetags, node, 'tag') |
|
283 | 283 | return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1) |
|
284 | 284 | |
|
285 | 285 | def showbookmark(repo, t1, node=nullid): |
|
286 | 286 | args = (repo.nodebookmarks, node, 'bookmark') |
|
287 | 287 | return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1) |
|
288 | 288 | |
|
289 | 289 | def branchentries(repo, stripecount, limit=0): |
|
290 | 290 | tips = [] |
|
291 | 291 | heads = repo.heads() |
|
292 | 292 | parity = paritygen(stripecount) |
|
293 | 293 | sortkey = lambda item: (not item[1], item[0].rev()) |
|
294 | 294 | |
|
295 | 295 | def entries(context): |
|
296 | 296 | count = 0 |
|
297 | 297 | if not tips: |
|
298 | 298 | for tag, hs, tip, closed in repo.branchmap().iterbranches(): |
|
299 | 299 | tips.append((repo[tip], closed)) |
|
300 | 300 | for ctx, closed in sorted(tips, key=sortkey, reverse=True): |
|
301 | 301 | if limit > 0 and count >= limit: |
|
302 | 302 | return |
|
303 | 303 | count += 1 |
|
304 | 304 | if closed: |
|
305 | 305 | status = 'closed' |
|
306 | 306 | elif ctx.node() not in heads: |
|
307 | 307 | status = 'inactive' |
|
308 | 308 | else: |
|
309 | 309 | status = 'open' |
|
310 | 310 | yield { |
|
311 | 311 | 'parity': next(parity), |
|
312 | 312 | 'branch': ctx.branch(), |
|
313 | 313 | 'status': status, |
|
314 | 314 | 'node': ctx.hex(), |
|
315 | 315 | 'date': ctx.date() |
|
316 | 316 | } |
|
317 | 317 | |
|
318 | 318 | return templateutil.mappinggenerator(entries) |
|
319 | 319 | |
|
320 | 320 | def cleanpath(repo, path): |
|
321 | 321 | path = path.lstrip('/') |
|
322 | 322 | return pathutil.canonpath(repo.root, '', path) |
|
323 | 323 | |
|
324 | 324 | def changectx(repo, req): |
|
325 | 325 | changeid = "tip" |
|
326 | 326 | if 'node' in req.qsparams: |
|
327 | 327 | changeid = req.qsparams['node'] |
|
328 | 328 | ipos = changeid.find(':') |
|
329 | 329 | if ipos != -1: |
|
330 | 330 | changeid = changeid[(ipos + 1):] |
|
331 | 331 | |
|
332 | 332 | return scmutil.revsymbol(repo, changeid) |
|
333 | 333 | |
|
334 | 334 | def basechangectx(repo, req): |
|
335 | 335 | if 'node' in req.qsparams: |
|
336 | 336 | changeid = req.qsparams['node'] |
|
337 | 337 | ipos = changeid.find(':') |
|
338 | 338 | if ipos != -1: |
|
339 | 339 | changeid = changeid[:ipos] |
|
340 | 340 | return scmutil.revsymbol(repo, changeid) |
|
341 | 341 | |
|
342 | 342 | return None |
|
343 | 343 | |
|
344 | 344 | def filectx(repo, req): |
|
345 | 345 | if 'file' not in req.qsparams: |
|
346 | 346 | raise ErrorResponse(HTTP_NOT_FOUND, 'file not given') |
|
347 | 347 | path = cleanpath(repo, req.qsparams['file']) |
|
348 | 348 | if 'node' in req.qsparams: |
|
349 | 349 | changeid = req.qsparams['node'] |
|
350 | 350 | elif 'filenode' in req.qsparams: |
|
351 | 351 | changeid = req.qsparams['filenode'] |
|
352 | 352 | else: |
|
353 | 353 | raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given') |
|
354 | 354 | try: |
|
355 | 355 | fctx = scmutil.revsymbol(repo, changeid)[path] |
|
356 | 356 | except error.RepoError: |
|
357 | 357 | fctx = repo.filectx(path, fileid=changeid) |
|
358 | 358 | |
|
359 | 359 | return fctx |
|
360 | 360 | |
|
361 | 361 | def linerange(req): |
|
362 | 362 | linerange = req.qsparams.getall('linerange') |
|
363 | 363 | if not linerange: |
|
364 | 364 | return None |
|
365 | 365 | if len(linerange) > 1: |
|
366 | 366 | raise ErrorResponse(HTTP_BAD_REQUEST, |
|
367 | 367 | 'redundant linerange parameter') |
|
368 | 368 | try: |
|
369 | 369 | fromline, toline = map(int, linerange[0].split(':', 1)) |
|
370 | 370 | except ValueError: |
|
371 | 371 | raise ErrorResponse(HTTP_BAD_REQUEST, |
|
372 | 372 | 'invalid linerange parameter') |
|
373 | 373 | try: |
|
374 | 374 | return util.processlinerange(fromline, toline) |
|
375 | 375 | except error.ParseError as exc: |
|
376 | 376 | raise ErrorResponse(HTTP_BAD_REQUEST, pycompat.bytestr(exc)) |
|
377 | 377 | |
|
378 | 378 | def formatlinerange(fromline, toline): |
|
379 | 379 | return '%d:%d' % (fromline + 1, toline) |
|
380 | 380 | |
|
381 | 381 | def _succsandmarkersgen(context, mapping): |
|
382 | 382 | repo = context.resource(mapping, 'repo') |
|
383 | 383 | itemmappings = templatekw.showsuccsandmarkers(context, mapping) |
|
384 | 384 | for item in itemmappings.tovalue(context, mapping): |
|
385 | 385 | item['successors'] = _siblings(repo[successor] |
|
386 | 386 | for successor in item['successors']) |
|
387 | 387 | yield item |
|
388 | 388 | |
|
389 | 389 | def succsandmarkers(context, mapping): |
|
390 | 390 | return templateutil.mappinggenerator(_succsandmarkersgen, args=(mapping,)) |
|
391 | 391 | |
|
392 | 392 | # teach templater succsandmarkers is switched to (context, mapping) API |
|
393 | 393 | succsandmarkers._requires = {'repo', 'ctx'} |
|
394 | 394 | |
|
395 | 395 | def _whyunstablegen(context, mapping): |
|
396 | 396 | repo = context.resource(mapping, 'repo') |
|
397 | 397 | ctx = context.resource(mapping, 'ctx') |
|
398 | 398 | |
|
399 | 399 | entries = obsutil.whyunstable(repo, ctx) |
|
400 | 400 | for entry in entries: |
|
401 | 401 | if entry.get('divergentnodes'): |
|
402 | 402 | entry['divergentnodes'] = _siblings(entry['divergentnodes']) |
|
403 | 403 | yield entry |
|
404 | 404 | |
|
405 | 405 | def whyunstable(context, mapping): |
|
406 | 406 | return templateutil.mappinggenerator(_whyunstablegen, args=(mapping,)) |
|
407 | 407 | |
|
408 | 408 | whyunstable._requires = {'repo', 'ctx'} |
|
409 | 409 | |
|
410 | 410 | def commonentry(repo, ctx): |
|
411 | 411 | node = ctx.node() |
|
412 | 412 | return { |
|
413 | 413 | # TODO: perhaps ctx.changectx() should be assigned if ctx is a |
|
414 | 414 | # filectx, but I'm not pretty sure if that would always work because |
|
415 | 415 | # fctx.parents() != fctx.changectx.parents() for example. |
|
416 | 416 | 'ctx': ctx, |
|
417 | 417 | 'rev': ctx.rev(), |
|
418 | 418 | 'node': hex(node), |
|
419 | 419 | 'author': ctx.user(), |
|
420 | 420 | 'desc': ctx.description(), |
|
421 | 421 | 'date': ctx.date(), |
|
422 | 422 | 'extra': ctx.extra(), |
|
423 | 423 | 'phase': ctx.phasestr(), |
|
424 | 424 | 'obsolete': ctx.obsolete(), |
|
425 | 425 | 'succsandmarkers': succsandmarkers, |
|
426 | 426 | 'instabilities': templateutil.hybridlist(ctx.instabilities(), |
|
427 | 427 | name='instability'), |
|
428 | 428 | 'whyunstable': whyunstable, |
|
429 | 429 | 'branch': nodebranchnodefault(ctx), |
|
430 | 430 | 'inbranch': nodeinbranch(repo, ctx), |
|
431 | 431 | 'branches': nodebranchdict(repo, ctx), |
|
432 | 432 | 'tags': nodetagsdict(repo, node), |
|
433 | 433 | 'bookmarks': nodebookmarksdict(repo, node), |
|
434 | 434 | 'parent': lambda **x: parents(ctx), |
|
435 | 435 | 'child': lambda **x: children(ctx), |
|
436 | 436 | } |
|
437 | 437 | |
|
438 | 438 | def changelistentry(web, ctx): |
|
439 | 439 | '''Obtain a dictionary to be used for entries in a changelist. |
|
440 | 440 | |
|
441 | 441 | This function is called when producing items for the "entries" list passed |
|
442 | 442 | to the "shortlog" and "changelog" templates. |
|
443 | 443 | ''' |
|
444 | 444 | repo = web.repo |
|
445 | 445 | rev = ctx.rev() |
|
446 | 446 | n = ctx.node() |
|
447 | 447 | showtags = showtag(repo, 'changelogtag', n) |
|
448 | 448 | files = listfilediffs(ctx.files(), n, web.maxfiles) |
|
449 | 449 | |
|
450 | 450 | entry = commonentry(repo, ctx) |
|
451 | 451 | entry.update( |
|
452 | 452 | allparents=lambda **x: parents(ctx), |
|
453 | 453 | parent=lambda **x: parents(ctx, rev - 1), |
|
454 | 454 | child=lambda **x: children(ctx, rev + 1), |
|
455 | 455 | changelogtag=showtags, |
|
456 | 456 | files=files, |
|
457 | 457 | ) |
|
458 | 458 | return entry |
|
459 | 459 | |
|
460 | 460 | def changelistentries(web, revs, maxcount, parityfn): |
|
461 | 461 | """Emit up to N records for an iterable of revisions.""" |
|
462 | 462 | repo = web.repo |
|
463 | 463 | |
|
464 | 464 | count = 0 |
|
465 | 465 | for rev in revs: |
|
466 | 466 | if count >= maxcount: |
|
467 | 467 | break |
|
468 | 468 | |
|
469 | 469 | count += 1 |
|
470 | 470 | |
|
471 | 471 | entry = changelistentry(web, repo[rev]) |
|
472 | 472 | entry['parity'] = next(parityfn) |
|
473 | 473 | |
|
474 | 474 | yield entry |
|
475 | 475 | |
|
476 | 476 | def symrevorshortnode(req, ctx): |
|
477 | 477 | if 'node' in req.qsparams: |
|
478 | 478 | return templatefilters.revescape(req.qsparams['node']) |
|
479 | 479 | else: |
|
480 | 480 | return short(ctx.node()) |
|
481 | 481 | |
|
482 | 482 | def _listfilesgen(context, ctx, stripecount): |
|
483 | 483 | parity = paritygen(stripecount) |
|
484 | 484 | for blockno, f in enumerate(ctx.files()): |
|
485 | 485 | template = 'filenodelink' if f in ctx else 'filenolink' |
|
486 | 486 | yield context.process(template, { |
|
487 | 487 | 'node': ctx.hex(), |
|
488 | 488 | 'file': f, |
|
489 | 489 | 'blockno': blockno + 1, |
|
490 | 490 | 'parity': next(parity), |
|
491 | 491 | }) |
|
492 | 492 | |
|
493 | 493 | def changesetentry(web, ctx): |
|
494 | 494 | '''Obtain a dictionary to be used to render the "changeset" template.''' |
|
495 | 495 | |
|
496 | 496 | showtags = showtag(web.repo, 'changesettag', ctx.node()) |
|
497 | 497 | showbookmarks = showbookmark(web.repo, 'changesetbookmark', ctx.node()) |
|
498 | 498 | showbranch = nodebranchnodefault(ctx) |
|
499 | 499 | |
|
500 | 500 | basectx = basechangectx(web.repo, web.req) |
|
501 | 501 | if basectx is None: |
|
502 | 502 | basectx = ctx.p1() |
|
503 | 503 | |
|
504 | 504 | style = web.config('web', 'style') |
|
505 | 505 | if 'style' in web.req.qsparams: |
|
506 | 506 | style = web.req.qsparams['style'] |
|
507 | 507 | |
|
508 | 508 | diff = diffs(web, ctx, basectx, None, style) |
|
509 | 509 | |
|
510 | 510 | parity = paritygen(web.stripecount) |
|
511 | 511 | diffstatsgen = diffstatgen(ctx, basectx) |
|
512 | 512 | diffstats = diffstat(ctx, diffstatsgen, parity) |
|
513 | 513 | |
|
514 | 514 | return dict( |
|
515 | 515 | diff=diff, |
|
516 | 516 | symrev=symrevorshortnode(web.req, ctx), |
|
517 | 517 | basenode=basectx.hex(), |
|
518 | 518 | changesettag=showtags, |
|
519 | 519 | changesetbookmark=showbookmarks, |
|
520 | 520 | changesetbranch=showbranch, |
|
521 | 521 | files=templateutil.mappedgenerator(_listfilesgen, |
|
522 | 522 | args=(ctx, web.stripecount)), |
|
523 | 523 | diffsummary=lambda **x: diffsummary(diffstatsgen), |
|
524 | 524 | diffstat=diffstats, |
|
525 | 525 | archives=web.archivelist(ctx.hex()), |
|
526 | 526 | **pycompat.strkwargs(commonentry(web.repo, ctx))) |
|
527 | 527 | |
|
528 | 528 | def _listfilediffsgen(context, files, node, max): |
|
529 | 529 | for f in files[:max]: |
|
530 | 530 | yield context.process('filedifflink', {'node': hex(node), 'file': f}) |
|
531 | 531 | if len(files) > max: |
|
532 | 532 | yield context.process('fileellipses', {}) |
|
533 | 533 | |
|
534 | 534 | def listfilediffs(files, node, max): |
|
535 | 535 | return templateutil.mappedgenerator(_listfilediffsgen, |
|
536 | 536 | args=(files, node, max)) |
|
537 | 537 | |
|
538 | 538 | def _prettyprintdifflines(context, lines, blockno, lineidprefix): |
|
539 | 539 | for lineno, l in enumerate(lines, 1): |
|
540 | 540 | difflineno = "%d.%d" % (blockno, lineno) |
|
541 | 541 | if l.startswith('+'): |
|
542 | 542 | ltype = "difflineplus" |
|
543 | 543 | elif l.startswith('-'): |
|
544 | 544 | ltype = "difflineminus" |
|
545 | 545 | elif l.startswith('@'): |
|
546 | 546 | ltype = "difflineat" |
|
547 | 547 | else: |
|
548 | 548 | ltype = "diffline" |
|
549 | 549 | yield context.process(ltype, { |
|
550 | 550 | 'line': l, |
|
551 | 551 | 'lineno': lineno, |
|
552 | 552 | 'lineid': lineidprefix + "l%s" % difflineno, |
|
553 | 553 | 'linenumber': "% 8s" % difflineno, |
|
554 | 554 | }) |
|
555 | 555 | |
|
556 | 556 | def _diffsgen(context, repo, ctx, basectx, files, style, stripecount, |
|
557 | 557 | linerange, lineidprefix): |
|
558 | 558 | if files: |
|
559 | 559 | m = match.exact(repo.root, repo.getcwd(), files) |
|
560 | 560 | else: |
|
561 | 561 | m = match.always(repo.root, repo.getcwd()) |
|
562 | 562 | |
|
563 | 563 | diffopts = patch.diffopts(repo.ui, untrusted=True) |
|
564 | 564 | node1 = basectx.node() |
|
565 | 565 | node2 = ctx.node() |
|
566 | 566 | parity = paritygen(stripecount) |
|
567 | 567 | |
|
568 | 568 | diffhunks = patch.diffhunks(repo, node1, node2, m, opts=diffopts) |
|
569 | 569 | for blockno, (fctx1, fctx2, header, hunks) in enumerate(diffhunks, 1): |
|
570 | 570 | if style != 'raw': |
|
571 | 571 | header = header[1:] |
|
572 | 572 | lines = [h + '\n' for h in header] |
|
573 | 573 | for hunkrange, hunklines in hunks: |
|
574 | 574 | if linerange is not None and hunkrange is not None: |
|
575 | 575 | s1, l1, s2, l2 = hunkrange |
|
576 | 576 | if not mdiff.hunkinrange((s2, l2), linerange): |
|
577 | 577 | continue |
|
578 | 578 | lines.extend(hunklines) |
|
579 | 579 | if lines: |
|
580 | 580 | l = templateutil.mappedgenerator(_prettyprintdifflines, |
|
581 | 581 | args=(lines, blockno, |
|
582 | 582 | lineidprefix)) |
|
583 | 583 | yield { |
|
584 | 584 | 'parity': next(parity), |
|
585 | 585 | 'blockno': blockno, |
|
586 | 586 | 'lines': l, |
|
587 | 587 | } |
|
588 | 588 | |
|
589 | 589 | def diffs(web, ctx, basectx, files, style, linerange=None, lineidprefix=''): |
|
590 | 590 | args = (web.repo, ctx, basectx, files, style, web.stripecount, |
|
591 | 591 | linerange, lineidprefix) |
|
592 | 592 | return templateutil.mappinggenerator(_diffsgen, args=args, name='diffblock') |
|
593 | 593 | |
|
594 | 594 | def _compline(type, leftlineno, leftline, rightlineno, rightline): |
|
595 | 595 | lineid = leftlineno and ("l%d" % leftlineno) or '' |
|
596 | 596 | lineid += rightlineno and ("r%d" % rightlineno) or '' |
|
597 | 597 | llno = '%d' % leftlineno if leftlineno else '' |
|
598 | 598 | rlno = '%d' % rightlineno if rightlineno else '' |
|
599 | 599 | return { |
|
600 | 600 | 'type': type, |
|
601 | 601 | 'lineid': lineid, |
|
602 | 602 | 'leftlineno': leftlineno, |
|
603 | 603 | 'leftlinenumber': "% 6s" % llno, |
|
604 | 604 | 'leftline': leftline or '', |
|
605 | 605 | 'rightlineno': rightlineno, |
|
606 | 606 | 'rightlinenumber': "% 6s" % rlno, |
|
607 | 607 | 'rightline': rightline or '', |
|
608 | 608 | } |
|
609 | 609 | |
|
610 | 610 | def _getcompblockgen(context, leftlines, rightlines, opcodes): |
|
611 | 611 | for type, llo, lhi, rlo, rhi in opcodes: |
|
612 | 612 | len1 = lhi - llo |
|
613 | 613 | len2 = rhi - rlo |
|
614 | 614 | count = min(len1, len2) |
|
615 | 615 | for i in xrange(count): |
|
616 | 616 | yield _compline(type=type, |
|
617 | 617 | leftlineno=llo + i + 1, |
|
618 | 618 | leftline=leftlines[llo + i], |
|
619 | 619 | rightlineno=rlo + i + 1, |
|
620 | 620 | rightline=rightlines[rlo + i]) |
|
621 | 621 | if len1 > len2: |
|
622 | 622 | for i in xrange(llo + count, lhi): |
|
623 | 623 | yield _compline(type=type, |
|
624 | 624 | leftlineno=i + 1, |
|
625 | 625 | leftline=leftlines[i], |
|
626 | 626 | rightlineno=None, |
|
627 | 627 | rightline=None) |
|
628 | 628 | elif len2 > len1: |
|
629 | 629 | for i in xrange(rlo + count, rhi): |
|
630 | 630 | yield _compline(type=type, |
|
631 | 631 | leftlineno=None, |
|
632 | 632 | leftline=None, |
|
633 | 633 | rightlineno=i + 1, |
|
634 | 634 | rightline=rightlines[i]) |
|
635 | 635 | |
|
636 | 636 | def _getcompblock(leftlines, rightlines, opcodes): |
|
637 | 637 | args = (leftlines, rightlines, opcodes) |
|
638 | 638 | return templateutil.mappinggenerator(_getcompblockgen, args=args, |
|
639 | 639 | name='comparisonline') |
|
640 | 640 | |
|
641 | 641 | def _comparegen(context, contextnum, leftlines, rightlines): |
|
642 | 642 | '''Generator function that provides side-by-side comparison data.''' |
|
643 | 643 | s = difflib.SequenceMatcher(None, leftlines, rightlines) |
|
644 | 644 | if contextnum < 0: |
|
645 | 645 | l = _getcompblock(leftlines, rightlines, s.get_opcodes()) |
|
646 | 646 | yield {'lines': l} |
|
647 | 647 | else: |
|
648 | 648 | for oc in s.get_grouped_opcodes(n=contextnum): |
|
649 | 649 | l = _getcompblock(leftlines, rightlines, oc) |
|
650 | 650 | yield {'lines': l} |
|
651 | 651 | |
|
652 | 652 | def compare(contextnum, leftlines, rightlines): |
|
653 | 653 | args = (contextnum, leftlines, rightlines) |
|
654 | 654 | return templateutil.mappinggenerator(_comparegen, args=args, |
|
655 | 655 | name='comparisonblock') |
|
656 | 656 | |
|
657 | 657 | def diffstatgen(ctx, basectx): |
|
658 | 658 | '''Generator function that provides the diffstat data.''' |
|
659 | 659 | |
|
660 | 660 | stats = patch.diffstatdata( |
|
661 | 661 | util.iterlines(ctx.diff(basectx, noprefix=False))) |
|
662 | 662 | maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats) |
|
663 | 663 | while True: |
|
664 | 664 | yield stats, maxname, maxtotal, addtotal, removetotal, binary |
|
665 | 665 | |
|
666 | 666 | def diffsummary(statgen): |
|
667 | 667 | '''Return a short summary of the diff.''' |
|
668 | 668 | |
|
669 | 669 | stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen) |
|
670 | 670 | return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % ( |
|
671 | 671 | len(stats), addtotal, removetotal) |
|
672 | 672 | |
|
673 | 673 | def _diffstattmplgen(context, ctx, statgen, parity): |
|
674 | 674 | stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen) |
|
675 | 675 | files = ctx.files() |
|
676 | 676 | |
|
677 | 677 | def pct(i): |
|
678 | 678 | if maxtotal == 0: |
|
679 | 679 | return 0 |
|
680 | 680 | return (float(i) / maxtotal) * 100 |
|
681 | 681 | |
|
682 | 682 | fileno = 0 |
|
683 | 683 | for filename, adds, removes, isbinary in stats: |
|
684 | 684 | template = 'diffstatlink' if filename in files else 'diffstatnolink' |
|
685 | 685 | total = adds + removes |
|
686 | 686 | fileno += 1 |
|
687 | 687 | yield context.process(template, { |
|
688 | 688 | 'node': ctx.hex(), |
|
689 | 689 | 'file': filename, |
|
690 | 690 | 'fileno': fileno, |
|
691 | 691 | 'total': total, |
|
692 | 692 | 'addpct': pct(adds), |
|
693 | 693 | 'removepct': pct(removes), |
|
694 | 694 | 'parity': next(parity), |
|
695 | 695 | }) |
|
696 | 696 | |
|
697 | 697 | def diffstat(ctx, statgen, parity): |
|
698 | 698 | '''Return a diffstat template for each file in the diff.''' |
|
699 | 699 | args = (ctx, statgen, parity) |
|
700 | 700 | return templateutil.mappedgenerator(_diffstattmplgen, args=args) |
|
701 | 701 | |
|
702 | 702 | class sessionvars(templateutil.wrapped): |
|
703 | 703 | def __init__(self, vars, start='?'): |
|
704 | 704 | self._start = start |
|
705 | 705 | self._vars = vars |
|
706 | 706 | |
|
707 | 707 | def __getitem__(self, key): |
|
708 | 708 | return self._vars[key] |
|
709 | 709 | |
|
710 | 710 | def __setitem__(self, key, value): |
|
711 | 711 | self._vars[key] = value |
|
712 | 712 | |
|
713 | 713 | def __copy__(self): |
|
714 | 714 | return sessionvars(copy.copy(self._vars), self._start) |
|
715 | 715 | |
|
716 | 716 | def contains(self, context, mapping, item): |
|
717 | 717 | item = templateutil.unwrapvalue(context, mapping, item) |
|
718 | 718 | return item in self._vars |
|
719 | 719 | |
|
720 | 720 | def getmember(self, context, mapping, key): |
|
721 | 721 | key = templateutil.unwrapvalue(context, mapping, key) |
|
722 | 722 | return self._vars.get(key) |
|
723 | 723 | |
|
724 | 724 | def getmin(self, context, mapping): |
|
725 | 725 | raise error.ParseError(_('not comparable')) |
|
726 | 726 | |
|
727 | 727 | def getmax(self, context, mapping): |
|
728 | 728 | raise error.ParseError(_('not comparable')) |
|
729 | 729 | |
|
730 | def filter(self, context, mapping, select): | |
|
731 | # implement if necessary | |
|
732 | raise error.ParseError(_('not filterable')) | |
|
733 | ||
|
730 | 734 | def itermaps(self, context): |
|
731 | 735 | separator = self._start |
|
732 | 736 | for key, value in sorted(self._vars.iteritems()): |
|
733 | 737 | yield {'name': key, |
|
734 | 738 | 'value': pycompat.bytestr(value), |
|
735 | 739 | 'separator': separator, |
|
736 | 740 | } |
|
737 | 741 | separator = '&' |
|
738 | 742 | |
|
739 | 743 | def join(self, context, mapping, sep): |
|
740 | 744 | # could be '{separator}{name}={value|urlescape}' |
|
741 | 745 | raise error.ParseError(_('not displayable without template')) |
|
742 | 746 | |
|
743 | 747 | def show(self, context, mapping): |
|
744 | 748 | return self.join(context, '') |
|
745 | 749 | |
|
746 | 750 | def tobool(self, context, mapping): |
|
747 | 751 | return bool(self._vars) |
|
748 | 752 | |
|
749 | 753 | def tovalue(self, context, mapping): |
|
750 | 754 | return self._vars |
|
751 | 755 | |
|
752 | 756 | class wsgiui(uimod.ui): |
|
753 | 757 | # default termwidth breaks under mod_wsgi |
|
754 | 758 | def termwidth(self): |
|
755 | 759 | return 80 |
|
756 | 760 | |
|
757 | 761 | def getwebsubs(repo): |
|
758 | 762 | websubtable = [] |
|
759 | 763 | websubdefs = repo.ui.configitems('websub') |
|
760 | 764 | # we must maintain interhg backwards compatibility |
|
761 | 765 | websubdefs += repo.ui.configitems('interhg') |
|
762 | 766 | for key, pattern in websubdefs: |
|
763 | 767 | # grab the delimiter from the character after the "s" |
|
764 | 768 | unesc = pattern[1:2] |
|
765 | 769 | delim = re.escape(unesc) |
|
766 | 770 | |
|
767 | 771 | # identify portions of the pattern, taking care to avoid escaped |
|
768 | 772 | # delimiters. the replace format and flags are optional, but |
|
769 | 773 | # delimiters are required. |
|
770 | 774 | match = re.match( |
|
771 | 775 | br'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$' |
|
772 | 776 | % (delim, delim, delim), pattern) |
|
773 | 777 | if not match: |
|
774 | 778 | repo.ui.warn(_("websub: invalid pattern for %s: %s\n") |
|
775 | 779 | % (key, pattern)) |
|
776 | 780 | continue |
|
777 | 781 | |
|
778 | 782 | # we need to unescape the delimiter for regexp and format |
|
779 | 783 | delim_re = re.compile(br'(?<!\\)\\%s' % delim) |
|
780 | 784 | regexp = delim_re.sub(unesc, match.group(1)) |
|
781 | 785 | format = delim_re.sub(unesc, match.group(2)) |
|
782 | 786 | |
|
783 | 787 | # the pattern allows for 6 regexp flags, so set them if necessary |
|
784 | 788 | flagin = match.group(3) |
|
785 | 789 | flags = 0 |
|
786 | 790 | if flagin: |
|
787 | 791 | for flag in flagin.upper(): |
|
788 | 792 | flags |= re.__dict__[flag] |
|
789 | 793 | |
|
790 | 794 | try: |
|
791 | 795 | regexp = re.compile(regexp, flags) |
|
792 | 796 | websubtable.append((regexp, format)) |
|
793 | 797 | except re.error: |
|
794 | 798 | repo.ui.warn(_("websub: invalid regexp for %s: %s\n") |
|
795 | 799 | % (key, regexp)) |
|
796 | 800 | return websubtable |
|
797 | 801 | |
|
798 | 802 | def getgraphnode(repo, ctx): |
|
799 | 803 | return (templatekw.getgraphnodecurrent(repo, ctx) + |
|
800 | 804 | templatekw.getgraphnodesymbol(ctx)) |
@@ -1,698 +1,709 b'' | |||
|
1 | 1 | # templatefuncs.py - common template functions |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import re |
|
11 | 11 | |
|
12 | 12 | from .i18n import _ |
|
13 | 13 | from .node import ( |
|
14 | 14 | bin, |
|
15 | 15 | wdirid, |
|
16 | 16 | ) |
|
17 | 17 | from . import ( |
|
18 | 18 | color, |
|
19 | 19 | encoding, |
|
20 | 20 | error, |
|
21 | 21 | minirst, |
|
22 | 22 | obsutil, |
|
23 | 23 | registrar, |
|
24 | 24 | revset as revsetmod, |
|
25 | 25 | revsetlang, |
|
26 | 26 | scmutil, |
|
27 | 27 | templatefilters, |
|
28 | 28 | templatekw, |
|
29 | 29 | templateutil, |
|
30 | 30 | util, |
|
31 | 31 | ) |
|
32 | 32 | from .utils import ( |
|
33 | 33 | dateutil, |
|
34 | 34 | stringutil, |
|
35 | 35 | ) |
|
36 | 36 | |
|
37 | 37 | evalrawexp = templateutil.evalrawexp |
|
38 | 38 | evalwrapped = templateutil.evalwrapped |
|
39 | 39 | evalfuncarg = templateutil.evalfuncarg |
|
40 | 40 | evalboolean = templateutil.evalboolean |
|
41 | 41 | evaldate = templateutil.evaldate |
|
42 | 42 | evalinteger = templateutil.evalinteger |
|
43 | 43 | evalstring = templateutil.evalstring |
|
44 | 44 | evalstringliteral = templateutil.evalstringliteral |
|
45 | 45 | |
|
46 | 46 | # dict of template built-in functions |
|
47 | 47 | funcs = {} |
|
48 | 48 | templatefunc = registrar.templatefunc(funcs) |
|
49 | 49 | |
|
50 | 50 | @templatefunc('date(date[, fmt])') |
|
51 | 51 | def date(context, mapping, args): |
|
52 | 52 | """Format a date. See :hg:`help dates` for formatting |
|
53 | 53 | strings. The default is a Unix date format, including the timezone: |
|
54 | 54 | "Mon Sep 04 15:13:13 2006 0700".""" |
|
55 | 55 | if not (1 <= len(args) <= 2): |
|
56 | 56 | # i18n: "date" is a keyword |
|
57 | 57 | raise error.ParseError(_("date expects one or two arguments")) |
|
58 | 58 | |
|
59 | 59 | date = evaldate(context, mapping, args[0], |
|
60 | 60 | # i18n: "date" is a keyword |
|
61 | 61 | _("date expects a date information")) |
|
62 | 62 | fmt = None |
|
63 | 63 | if len(args) == 2: |
|
64 | 64 | fmt = evalstring(context, mapping, args[1]) |
|
65 | 65 | if fmt is None: |
|
66 | 66 | return dateutil.datestr(date) |
|
67 | 67 | else: |
|
68 | 68 | return dateutil.datestr(date, fmt) |
|
69 | 69 | |
|
70 | 70 | @templatefunc('dict([[key=]value...])', argspec='*args **kwargs') |
|
71 | 71 | def dict_(context, mapping, args): |
|
72 | 72 | """Construct a dict from key-value pairs. A key may be omitted if |
|
73 | 73 | a value expression can provide an unambiguous name.""" |
|
74 | 74 | data = util.sortdict() |
|
75 | 75 | |
|
76 | 76 | for v in args['args']: |
|
77 | 77 | k = templateutil.findsymbolicname(v) |
|
78 | 78 | if not k: |
|
79 | 79 | raise error.ParseError(_('dict key cannot be inferred')) |
|
80 | 80 | if k in data or k in args['kwargs']: |
|
81 | 81 | raise error.ParseError(_("duplicated dict key '%s' inferred") % k) |
|
82 | 82 | data[k] = evalfuncarg(context, mapping, v) |
|
83 | 83 | |
|
84 | 84 | data.update((k, evalfuncarg(context, mapping, v)) |
|
85 | 85 | for k, v in args['kwargs'].iteritems()) |
|
86 | 86 | return templateutil.hybriddict(data) |
|
87 | 87 | |
|
88 | 88 | @templatefunc('diff([includepattern [, excludepattern]])', requires={'ctx'}) |
|
89 | 89 | def diff(context, mapping, args): |
|
90 | 90 | """Show a diff, optionally |
|
91 | 91 | specifying files to include or exclude.""" |
|
92 | 92 | if len(args) > 2: |
|
93 | 93 | # i18n: "diff" is a keyword |
|
94 | 94 | raise error.ParseError(_("diff expects zero, one, or two arguments")) |
|
95 | 95 | |
|
96 | 96 | def getpatterns(i): |
|
97 | 97 | if i < len(args): |
|
98 | 98 | s = evalstring(context, mapping, args[i]).strip() |
|
99 | 99 | if s: |
|
100 | 100 | return [s] |
|
101 | 101 | return [] |
|
102 | 102 | |
|
103 | 103 | ctx = context.resource(mapping, 'ctx') |
|
104 | 104 | chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1))) |
|
105 | 105 | |
|
106 | 106 | return ''.join(chunks) |
|
107 | 107 | |
|
108 | 108 | @templatefunc('extdata(source)', argspec='source', requires={'ctx', 'cache'}) |
|
109 | 109 | def extdata(context, mapping, args): |
|
110 | 110 | """Show a text read from the specified extdata source. (EXPERIMENTAL)""" |
|
111 | 111 | if 'source' not in args: |
|
112 | 112 | # i18n: "extdata" is a keyword |
|
113 | 113 | raise error.ParseError(_('extdata expects one argument')) |
|
114 | 114 | |
|
115 | 115 | source = evalstring(context, mapping, args['source']) |
|
116 | 116 | if not source: |
|
117 | 117 | sym = templateutil.findsymbolicname(args['source']) |
|
118 | 118 | if sym: |
|
119 | 119 | raise error.ParseError(_('empty data source specified'), |
|
120 | 120 | hint=_("did you mean extdata('%s')?") % sym) |
|
121 | 121 | else: |
|
122 | 122 | raise error.ParseError(_('empty data source specified')) |
|
123 | 123 | cache = context.resource(mapping, 'cache').setdefault('extdata', {}) |
|
124 | 124 | ctx = context.resource(mapping, 'ctx') |
|
125 | 125 | if source in cache: |
|
126 | 126 | data = cache[source] |
|
127 | 127 | else: |
|
128 | 128 | data = cache[source] = scmutil.extdatasource(ctx.repo(), source) |
|
129 | 129 | return data.get(ctx.rev(), '') |
|
130 | 130 | |
|
131 | 131 | @templatefunc('files(pattern)', requires={'ctx'}) |
|
132 | 132 | def files(context, mapping, args): |
|
133 | 133 | """All files of the current changeset matching the pattern. See |
|
134 | 134 | :hg:`help patterns`.""" |
|
135 | 135 | if not len(args) == 1: |
|
136 | 136 | # i18n: "files" is a keyword |
|
137 | 137 | raise error.ParseError(_("files expects one argument")) |
|
138 | 138 | |
|
139 | 139 | raw = evalstring(context, mapping, args[0]) |
|
140 | 140 | ctx = context.resource(mapping, 'ctx') |
|
141 | 141 | m = ctx.match([raw]) |
|
142 | 142 | files = list(ctx.matches(m)) |
|
143 | 143 | return templateutil.compatlist(context, mapping, "file", files) |
|
144 | 144 | |
|
145 | 145 | @templatefunc('fill(text[, width[, initialident[, hangindent]]])') |
|
146 | 146 | def fill(context, mapping, args): |
|
147 | 147 | """Fill many |
|
148 | 148 | paragraphs with optional indentation. See the "fill" filter.""" |
|
149 | 149 | if not (1 <= len(args) <= 4): |
|
150 | 150 | # i18n: "fill" is a keyword |
|
151 | 151 | raise error.ParseError(_("fill expects one to four arguments")) |
|
152 | 152 | |
|
153 | 153 | text = evalstring(context, mapping, args[0]) |
|
154 | 154 | width = 76 |
|
155 | 155 | initindent = '' |
|
156 | 156 | hangindent = '' |
|
157 | 157 | if 2 <= len(args) <= 4: |
|
158 | 158 | width = evalinteger(context, mapping, args[1], |
|
159 | 159 | # i18n: "fill" is a keyword |
|
160 | 160 | _("fill expects an integer width")) |
|
161 | 161 | try: |
|
162 | 162 | initindent = evalstring(context, mapping, args[2]) |
|
163 | 163 | hangindent = evalstring(context, mapping, args[3]) |
|
164 | 164 | except IndexError: |
|
165 | 165 | pass |
|
166 | 166 | |
|
167 | 167 | return templatefilters.fill(text, width, initindent, hangindent) |
|
168 | 168 | |
|
169 | @templatefunc('filter(iterable)') | |
|
170 | def filter_(context, mapping, args): | |
|
171 | """Remove empty elements from a list or a dict.""" | |
|
172 | if len(args) != 1: | |
|
173 | # i18n: "filter" is a keyword | |
|
174 | raise error.ParseError(_("filter expects one argument")) | |
|
175 | iterable = evalwrapped(context, mapping, args[0]) | |
|
176 | def select(w): | |
|
177 | return w.tobool(context, mapping) | |
|
178 | return iterable.filter(context, mapping, select) | |
|
179 | ||
|
169 | 180 | @templatefunc('formatnode(node)', requires={'ui'}) |
|
170 | 181 | def formatnode(context, mapping, args): |
|
171 | 182 | """Obtain the preferred form of a changeset hash. (DEPRECATED)""" |
|
172 | 183 | if len(args) != 1: |
|
173 | 184 | # i18n: "formatnode" is a keyword |
|
174 | 185 | raise error.ParseError(_("formatnode expects one argument")) |
|
175 | 186 | |
|
176 | 187 | ui = context.resource(mapping, 'ui') |
|
177 | 188 | node = evalstring(context, mapping, args[0]) |
|
178 | 189 | if ui.debugflag: |
|
179 | 190 | return node |
|
180 | 191 | return templatefilters.short(node) |
|
181 | 192 | |
|
182 | 193 | @templatefunc('mailmap(author)', requires={'repo', 'cache'}) |
|
183 | 194 | def mailmap(context, mapping, args): |
|
184 | 195 | """Return the author, updated according to the value |
|
185 | 196 | set in the .mailmap file""" |
|
186 | 197 | if len(args) != 1: |
|
187 | 198 | raise error.ParseError(_("mailmap expects one argument")) |
|
188 | 199 | |
|
189 | 200 | author = evalstring(context, mapping, args[0]) |
|
190 | 201 | |
|
191 | 202 | cache = context.resource(mapping, 'cache') |
|
192 | 203 | repo = context.resource(mapping, 'repo') |
|
193 | 204 | |
|
194 | 205 | if 'mailmap' not in cache: |
|
195 | 206 | data = repo.wvfs.tryread('.mailmap') |
|
196 | 207 | cache['mailmap'] = stringutil.parsemailmap(data) |
|
197 | 208 | |
|
198 | 209 | return stringutil.mapname(cache['mailmap'], author) |
|
199 | 210 | |
|
200 | 211 | @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])', |
|
201 | 212 | argspec='text width fillchar left') |
|
202 | 213 | def pad(context, mapping, args): |
|
203 | 214 | """Pad text with a |
|
204 | 215 | fill character.""" |
|
205 | 216 | if 'text' not in args or 'width' not in args: |
|
206 | 217 | # i18n: "pad" is a keyword |
|
207 | 218 | raise error.ParseError(_("pad() expects two to four arguments")) |
|
208 | 219 | |
|
209 | 220 | width = evalinteger(context, mapping, args['width'], |
|
210 | 221 | # i18n: "pad" is a keyword |
|
211 | 222 | _("pad() expects an integer width")) |
|
212 | 223 | |
|
213 | 224 | text = evalstring(context, mapping, args['text']) |
|
214 | 225 | |
|
215 | 226 | left = False |
|
216 | 227 | fillchar = ' ' |
|
217 | 228 | if 'fillchar' in args: |
|
218 | 229 | fillchar = evalstring(context, mapping, args['fillchar']) |
|
219 | 230 | if len(color.stripeffects(fillchar)) != 1: |
|
220 | 231 | # i18n: "pad" is a keyword |
|
221 | 232 | raise error.ParseError(_("pad() expects a single fill character")) |
|
222 | 233 | if 'left' in args: |
|
223 | 234 | left = evalboolean(context, mapping, args['left']) |
|
224 | 235 | |
|
225 | 236 | fillwidth = width - encoding.colwidth(color.stripeffects(text)) |
|
226 | 237 | if fillwidth <= 0: |
|
227 | 238 | return text |
|
228 | 239 | if left: |
|
229 | 240 | return fillchar * fillwidth + text |
|
230 | 241 | else: |
|
231 | 242 | return text + fillchar * fillwidth |
|
232 | 243 | |
|
233 | 244 | @templatefunc('indent(text, indentchars[, firstline])') |
|
234 | 245 | def indent(context, mapping, args): |
|
235 | 246 | """Indents all non-empty lines |
|
236 | 247 | with the characters given in the indentchars string. An optional |
|
237 | 248 | third parameter will override the indent for the first line only |
|
238 | 249 | if present.""" |
|
239 | 250 | if not (2 <= len(args) <= 3): |
|
240 | 251 | # i18n: "indent" is a keyword |
|
241 | 252 | raise error.ParseError(_("indent() expects two or three arguments")) |
|
242 | 253 | |
|
243 | 254 | text = evalstring(context, mapping, args[0]) |
|
244 | 255 | indent = evalstring(context, mapping, args[1]) |
|
245 | 256 | |
|
246 | 257 | if len(args) == 3: |
|
247 | 258 | firstline = evalstring(context, mapping, args[2]) |
|
248 | 259 | else: |
|
249 | 260 | firstline = indent |
|
250 | 261 | |
|
251 | 262 | # the indent function doesn't indent the first line, so we do it here |
|
252 | 263 | return templatefilters.indent(firstline + text, indent) |
|
253 | 264 | |
|
254 | 265 | @templatefunc('get(dict, key)') |
|
255 | 266 | def get(context, mapping, args): |
|
256 | 267 | """Get an attribute/key from an object. Some keywords |
|
257 | 268 | are complex types. This function allows you to obtain the value of an |
|
258 | 269 | attribute on these types.""" |
|
259 | 270 | if len(args) != 2: |
|
260 | 271 | # i18n: "get" is a keyword |
|
261 | 272 | raise error.ParseError(_("get() expects two arguments")) |
|
262 | 273 | |
|
263 | 274 | dictarg = evalwrapped(context, mapping, args[0]) |
|
264 | 275 | key = evalrawexp(context, mapping, args[1]) |
|
265 | 276 | try: |
|
266 | 277 | return dictarg.getmember(context, mapping, key) |
|
267 | 278 | except error.ParseError as err: |
|
268 | 279 | # i18n: "get" is a keyword |
|
269 | 280 | hint = _("get() expects a dict as first argument") |
|
270 | 281 | raise error.ParseError(bytes(err), hint=hint) |
|
271 | 282 | |
|
272 | 283 | @templatefunc('if(expr, then[, else])') |
|
273 | 284 | def if_(context, mapping, args): |
|
274 | 285 | """Conditionally execute based on the result of |
|
275 | 286 | an expression.""" |
|
276 | 287 | if not (2 <= len(args) <= 3): |
|
277 | 288 | # i18n: "if" is a keyword |
|
278 | 289 | raise error.ParseError(_("if expects two or three arguments")) |
|
279 | 290 | |
|
280 | 291 | test = evalboolean(context, mapping, args[0]) |
|
281 | 292 | if test: |
|
282 | 293 | return evalrawexp(context, mapping, args[1]) |
|
283 | 294 | elif len(args) == 3: |
|
284 | 295 | return evalrawexp(context, mapping, args[2]) |
|
285 | 296 | |
|
286 | 297 | @templatefunc('ifcontains(needle, haystack, then[, else])') |
|
287 | 298 | def ifcontains(context, mapping, args): |
|
288 | 299 | """Conditionally execute based |
|
289 | 300 | on whether the item "needle" is in "haystack".""" |
|
290 | 301 | if not (3 <= len(args) <= 4): |
|
291 | 302 | # i18n: "ifcontains" is a keyword |
|
292 | 303 | raise error.ParseError(_("ifcontains expects three or four arguments")) |
|
293 | 304 | |
|
294 | 305 | haystack = evalwrapped(context, mapping, args[1]) |
|
295 | 306 | try: |
|
296 | 307 | needle = evalrawexp(context, mapping, args[0]) |
|
297 | 308 | found = haystack.contains(context, mapping, needle) |
|
298 | 309 | except error.ParseError: |
|
299 | 310 | found = False |
|
300 | 311 | |
|
301 | 312 | if found: |
|
302 | 313 | return evalrawexp(context, mapping, args[2]) |
|
303 | 314 | elif len(args) == 4: |
|
304 | 315 | return evalrawexp(context, mapping, args[3]) |
|
305 | 316 | |
|
306 | 317 | @templatefunc('ifeq(expr1, expr2, then[, else])') |
|
307 | 318 | def ifeq(context, mapping, args): |
|
308 | 319 | """Conditionally execute based on |
|
309 | 320 | whether 2 items are equivalent.""" |
|
310 | 321 | if not (3 <= len(args) <= 4): |
|
311 | 322 | # i18n: "ifeq" is a keyword |
|
312 | 323 | raise error.ParseError(_("ifeq expects three or four arguments")) |
|
313 | 324 | |
|
314 | 325 | test = evalstring(context, mapping, args[0]) |
|
315 | 326 | match = evalstring(context, mapping, args[1]) |
|
316 | 327 | if test == match: |
|
317 | 328 | return evalrawexp(context, mapping, args[2]) |
|
318 | 329 | elif len(args) == 4: |
|
319 | 330 | return evalrawexp(context, mapping, args[3]) |
|
320 | 331 | |
|
321 | 332 | @templatefunc('join(list, sep)') |
|
322 | 333 | def join(context, mapping, args): |
|
323 | 334 | """Join items in a list with a delimiter.""" |
|
324 | 335 | if not (1 <= len(args) <= 2): |
|
325 | 336 | # i18n: "join" is a keyword |
|
326 | 337 | raise error.ParseError(_("join expects one or two arguments")) |
|
327 | 338 | |
|
328 | 339 | joinset = evalwrapped(context, mapping, args[0]) |
|
329 | 340 | joiner = " " |
|
330 | 341 | if len(args) > 1: |
|
331 | 342 | joiner = evalstring(context, mapping, args[1]) |
|
332 | 343 | return joinset.join(context, mapping, joiner) |
|
333 | 344 | |
|
334 | 345 | @templatefunc('label(label, expr)', requires={'ui'}) |
|
335 | 346 | def label(context, mapping, args): |
|
336 | 347 | """Apply a label to generated content. Content with |
|
337 | 348 | a label applied can result in additional post-processing, such as |
|
338 | 349 | automatic colorization.""" |
|
339 | 350 | if len(args) != 2: |
|
340 | 351 | # i18n: "label" is a keyword |
|
341 | 352 | raise error.ParseError(_("label expects two arguments")) |
|
342 | 353 | |
|
343 | 354 | ui = context.resource(mapping, 'ui') |
|
344 | 355 | thing = evalstring(context, mapping, args[1]) |
|
345 | 356 | # preserve unknown symbol as literal so effects like 'red', 'bold', |
|
346 | 357 | # etc. don't need to be quoted |
|
347 | 358 | label = evalstringliteral(context, mapping, args[0]) |
|
348 | 359 | |
|
349 | 360 | return ui.label(thing, label) |
|
350 | 361 | |
|
351 | 362 | @templatefunc('latesttag([pattern])') |
|
352 | 363 | def latesttag(context, mapping, args): |
|
353 | 364 | """The global tags matching the given pattern on the |
|
354 | 365 | most recent globally tagged ancestor of this changeset. |
|
355 | 366 | If no such tags exist, the "{tag}" template resolves to |
|
356 | 367 | the string "null". See :hg:`help revisions.patterns` for the pattern |
|
357 | 368 | syntax. |
|
358 | 369 | """ |
|
359 | 370 | if len(args) > 1: |
|
360 | 371 | # i18n: "latesttag" is a keyword |
|
361 | 372 | raise error.ParseError(_("latesttag expects at most one argument")) |
|
362 | 373 | |
|
363 | 374 | pattern = None |
|
364 | 375 | if len(args) == 1: |
|
365 | 376 | pattern = evalstring(context, mapping, args[0]) |
|
366 | 377 | return templatekw.showlatesttags(context, mapping, pattern) |
|
367 | 378 | |
|
368 | 379 | @templatefunc('localdate(date[, tz])') |
|
369 | 380 | def localdate(context, mapping, args): |
|
370 | 381 | """Converts a date to the specified timezone. |
|
371 | 382 | The default is local date.""" |
|
372 | 383 | if not (1 <= len(args) <= 2): |
|
373 | 384 | # i18n: "localdate" is a keyword |
|
374 | 385 | raise error.ParseError(_("localdate expects one or two arguments")) |
|
375 | 386 | |
|
376 | 387 | date = evaldate(context, mapping, args[0], |
|
377 | 388 | # i18n: "localdate" is a keyword |
|
378 | 389 | _("localdate expects a date information")) |
|
379 | 390 | if len(args) >= 2: |
|
380 | 391 | tzoffset = None |
|
381 | 392 | tz = evalfuncarg(context, mapping, args[1]) |
|
382 | 393 | if isinstance(tz, bytes): |
|
383 | 394 | tzoffset, remainder = dateutil.parsetimezone(tz) |
|
384 | 395 | if remainder: |
|
385 | 396 | tzoffset = None |
|
386 | 397 | if tzoffset is None: |
|
387 | 398 | try: |
|
388 | 399 | tzoffset = int(tz) |
|
389 | 400 | except (TypeError, ValueError): |
|
390 | 401 | # i18n: "localdate" is a keyword |
|
391 | 402 | raise error.ParseError(_("localdate expects a timezone")) |
|
392 | 403 | else: |
|
393 | 404 | tzoffset = dateutil.makedate()[1] |
|
394 | 405 | return templateutil.date((date[0], tzoffset)) |
|
395 | 406 | |
|
396 | 407 | @templatefunc('max(iterable)') |
|
397 | 408 | def max_(context, mapping, args, **kwargs): |
|
398 | 409 | """Return the max of an iterable""" |
|
399 | 410 | if len(args) != 1: |
|
400 | 411 | # i18n: "max" is a keyword |
|
401 | 412 | raise error.ParseError(_("max expects one argument")) |
|
402 | 413 | |
|
403 | 414 | iterable = evalwrapped(context, mapping, args[0]) |
|
404 | 415 | try: |
|
405 | 416 | return iterable.getmax(context, mapping) |
|
406 | 417 | except error.ParseError as err: |
|
407 | 418 | # i18n: "max" is a keyword |
|
408 | 419 | hint = _("max first argument should be an iterable") |
|
409 | 420 | raise error.ParseError(bytes(err), hint=hint) |
|
410 | 421 | |
|
411 | 422 | @templatefunc('min(iterable)') |
|
412 | 423 | def min_(context, mapping, args, **kwargs): |
|
413 | 424 | """Return the min of an iterable""" |
|
414 | 425 | if len(args) != 1: |
|
415 | 426 | # i18n: "min" is a keyword |
|
416 | 427 | raise error.ParseError(_("min expects one argument")) |
|
417 | 428 | |
|
418 | 429 | iterable = evalwrapped(context, mapping, args[0]) |
|
419 | 430 | try: |
|
420 | 431 | return iterable.getmin(context, mapping) |
|
421 | 432 | except error.ParseError as err: |
|
422 | 433 | # i18n: "min" is a keyword |
|
423 | 434 | hint = _("min first argument should be an iterable") |
|
424 | 435 | raise error.ParseError(bytes(err), hint=hint) |
|
425 | 436 | |
|
426 | 437 | @templatefunc('mod(a, b)') |
|
427 | 438 | def mod(context, mapping, args): |
|
428 | 439 | """Calculate a mod b such that a / b + a mod b == a""" |
|
429 | 440 | if not len(args) == 2: |
|
430 | 441 | # i18n: "mod" is a keyword |
|
431 | 442 | raise error.ParseError(_("mod expects two arguments")) |
|
432 | 443 | |
|
433 | 444 | func = lambda a, b: a % b |
|
434 | 445 | return templateutil.runarithmetic(context, mapping, |
|
435 | 446 | (func, args[0], args[1])) |
|
436 | 447 | |
|
437 | 448 | @templatefunc('obsfateoperations(markers)') |
|
438 | 449 | def obsfateoperations(context, mapping, args): |
|
439 | 450 | """Compute obsfate related information based on markers (EXPERIMENTAL)""" |
|
440 | 451 | if len(args) != 1: |
|
441 | 452 | # i18n: "obsfateoperations" is a keyword |
|
442 | 453 | raise error.ParseError(_("obsfateoperations expects one argument")) |
|
443 | 454 | |
|
444 | 455 | markers = evalfuncarg(context, mapping, args[0]) |
|
445 | 456 | |
|
446 | 457 | try: |
|
447 | 458 | data = obsutil.markersoperations(markers) |
|
448 | 459 | return templateutil.hybridlist(data, name='operation') |
|
449 | 460 | except (TypeError, KeyError): |
|
450 | 461 | # i18n: "obsfateoperations" is a keyword |
|
451 | 462 | errmsg = _("obsfateoperations first argument should be an iterable") |
|
452 | 463 | raise error.ParseError(errmsg) |
|
453 | 464 | |
|
454 | 465 | @templatefunc('obsfatedate(markers)') |
|
455 | 466 | def obsfatedate(context, mapping, args): |
|
456 | 467 | """Compute obsfate related information based on markers (EXPERIMENTAL)""" |
|
457 | 468 | if len(args) != 1: |
|
458 | 469 | # i18n: "obsfatedate" is a keyword |
|
459 | 470 | raise error.ParseError(_("obsfatedate expects one argument")) |
|
460 | 471 | |
|
461 | 472 | markers = evalfuncarg(context, mapping, args[0]) |
|
462 | 473 | |
|
463 | 474 | try: |
|
464 | 475 | # TODO: maybe this has to be a wrapped list of date wrappers? |
|
465 | 476 | data = obsutil.markersdates(markers) |
|
466 | 477 | return templateutil.hybridlist(data, name='date', fmt='%d %d') |
|
467 | 478 | except (TypeError, KeyError): |
|
468 | 479 | # i18n: "obsfatedate" is a keyword |
|
469 | 480 | errmsg = _("obsfatedate first argument should be an iterable") |
|
470 | 481 | raise error.ParseError(errmsg) |
|
471 | 482 | |
|
472 | 483 | @templatefunc('obsfateusers(markers)') |
|
473 | 484 | def obsfateusers(context, mapping, args): |
|
474 | 485 | """Compute obsfate related information based on markers (EXPERIMENTAL)""" |
|
475 | 486 | if len(args) != 1: |
|
476 | 487 | # i18n: "obsfateusers" is a keyword |
|
477 | 488 | raise error.ParseError(_("obsfateusers expects one argument")) |
|
478 | 489 | |
|
479 | 490 | markers = evalfuncarg(context, mapping, args[0]) |
|
480 | 491 | |
|
481 | 492 | try: |
|
482 | 493 | data = obsutil.markersusers(markers) |
|
483 | 494 | return templateutil.hybridlist(data, name='user') |
|
484 | 495 | except (TypeError, KeyError, ValueError): |
|
485 | 496 | # i18n: "obsfateusers" is a keyword |
|
486 | 497 | msg = _("obsfateusers first argument should be an iterable of " |
|
487 | 498 | "obsmakers") |
|
488 | 499 | raise error.ParseError(msg) |
|
489 | 500 | |
|
490 | 501 | @templatefunc('obsfateverb(successors, markers)') |
|
491 | 502 | def obsfateverb(context, mapping, args): |
|
492 | 503 | """Compute obsfate related information based on successors (EXPERIMENTAL)""" |
|
493 | 504 | if len(args) != 2: |
|
494 | 505 | # i18n: "obsfateverb" is a keyword |
|
495 | 506 | raise error.ParseError(_("obsfateverb expects two arguments")) |
|
496 | 507 | |
|
497 | 508 | successors = evalfuncarg(context, mapping, args[0]) |
|
498 | 509 | markers = evalfuncarg(context, mapping, args[1]) |
|
499 | 510 | |
|
500 | 511 | try: |
|
501 | 512 | return obsutil.obsfateverb(successors, markers) |
|
502 | 513 | except TypeError: |
|
503 | 514 | # i18n: "obsfateverb" is a keyword |
|
504 | 515 | errmsg = _("obsfateverb first argument should be countable") |
|
505 | 516 | raise error.ParseError(errmsg) |
|
506 | 517 | |
|
507 | 518 | @templatefunc('relpath(path)', requires={'repo'}) |
|
508 | 519 | def relpath(context, mapping, args): |
|
509 | 520 | """Convert a repository-absolute path into a filesystem path relative to |
|
510 | 521 | the current working directory.""" |
|
511 | 522 | if len(args) != 1: |
|
512 | 523 | # i18n: "relpath" is a keyword |
|
513 | 524 | raise error.ParseError(_("relpath expects one argument")) |
|
514 | 525 | |
|
515 | 526 | repo = context.resource(mapping, 'repo') |
|
516 | 527 | path = evalstring(context, mapping, args[0]) |
|
517 | 528 | return repo.pathto(path) |
|
518 | 529 | |
|
519 | 530 | @templatefunc('revset(query[, formatargs...])', requires={'repo', 'cache'}) |
|
520 | 531 | def revset(context, mapping, args): |
|
521 | 532 | """Execute a revision set query. See |
|
522 | 533 | :hg:`help revset`.""" |
|
523 | 534 | if not len(args) > 0: |
|
524 | 535 | # i18n: "revset" is a keyword |
|
525 | 536 | raise error.ParseError(_("revset expects one or more arguments")) |
|
526 | 537 | |
|
527 | 538 | raw = evalstring(context, mapping, args[0]) |
|
528 | 539 | repo = context.resource(mapping, 'repo') |
|
529 | 540 | |
|
530 | 541 | def query(expr): |
|
531 | 542 | m = revsetmod.match(repo.ui, expr, lookup=revsetmod.lookupfn(repo)) |
|
532 | 543 | return m(repo) |
|
533 | 544 | |
|
534 | 545 | if len(args) > 1: |
|
535 | 546 | formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]] |
|
536 | 547 | revs = query(revsetlang.formatspec(raw, *formatargs)) |
|
537 | 548 | revs = list(revs) |
|
538 | 549 | else: |
|
539 | 550 | cache = context.resource(mapping, 'cache') |
|
540 | 551 | revsetcache = cache.setdefault("revsetcache", {}) |
|
541 | 552 | if raw in revsetcache: |
|
542 | 553 | revs = revsetcache[raw] |
|
543 | 554 | else: |
|
544 | 555 | revs = query(raw) |
|
545 | 556 | revs = list(revs) |
|
546 | 557 | revsetcache[raw] = revs |
|
547 | 558 | return templatekw.showrevslist(context, mapping, "revision", revs) |
|
548 | 559 | |
|
549 | 560 | @templatefunc('rstdoc(text, style)') |
|
550 | 561 | def rstdoc(context, mapping, args): |
|
551 | 562 | """Format reStructuredText.""" |
|
552 | 563 | if len(args) != 2: |
|
553 | 564 | # i18n: "rstdoc" is a keyword |
|
554 | 565 | raise error.ParseError(_("rstdoc expects two arguments")) |
|
555 | 566 | |
|
556 | 567 | text = evalstring(context, mapping, args[0]) |
|
557 | 568 | style = evalstring(context, mapping, args[1]) |
|
558 | 569 | |
|
559 | 570 | return minirst.format(text, style=style, keep=['verbose'])[0] |
|
560 | 571 | |
|
561 | 572 | @templatefunc('separate(sep, args...)', argspec='sep *args') |
|
562 | 573 | def separate(context, mapping, args): |
|
563 | 574 | """Add a separator between non-empty arguments.""" |
|
564 | 575 | if 'sep' not in args: |
|
565 | 576 | # i18n: "separate" is a keyword |
|
566 | 577 | raise error.ParseError(_("separate expects at least one argument")) |
|
567 | 578 | |
|
568 | 579 | sep = evalstring(context, mapping, args['sep']) |
|
569 | 580 | first = True |
|
570 | 581 | for arg in args['args']: |
|
571 | 582 | argstr = evalstring(context, mapping, arg) |
|
572 | 583 | if not argstr: |
|
573 | 584 | continue |
|
574 | 585 | if first: |
|
575 | 586 | first = False |
|
576 | 587 | else: |
|
577 | 588 | yield sep |
|
578 | 589 | yield argstr |
|
579 | 590 | |
|
580 | 591 | @templatefunc('shortest(node, minlength=4)', requires={'repo'}) |
|
581 | 592 | def shortest(context, mapping, args): |
|
582 | 593 | """Obtain the shortest representation of |
|
583 | 594 | a node.""" |
|
584 | 595 | if not (1 <= len(args) <= 2): |
|
585 | 596 | # i18n: "shortest" is a keyword |
|
586 | 597 | raise error.ParseError(_("shortest() expects one or two arguments")) |
|
587 | 598 | |
|
588 | 599 | hexnode = evalstring(context, mapping, args[0]) |
|
589 | 600 | |
|
590 | 601 | minlength = 4 |
|
591 | 602 | if len(args) > 1: |
|
592 | 603 | minlength = evalinteger(context, mapping, args[1], |
|
593 | 604 | # i18n: "shortest" is a keyword |
|
594 | 605 | _("shortest() expects an integer minlength")) |
|
595 | 606 | |
|
596 | 607 | repo = context.resource(mapping, 'repo') |
|
597 | 608 | if len(hexnode) > 40: |
|
598 | 609 | return hexnode |
|
599 | 610 | elif len(hexnode) == 40: |
|
600 | 611 | try: |
|
601 | 612 | node = bin(hexnode) |
|
602 | 613 | except TypeError: |
|
603 | 614 | return hexnode |
|
604 | 615 | else: |
|
605 | 616 | try: |
|
606 | 617 | node = scmutil.resolvehexnodeidprefix(repo, hexnode) |
|
607 | 618 | except error.WdirUnsupported: |
|
608 | 619 | node = wdirid |
|
609 | 620 | except error.LookupError: |
|
610 | 621 | return hexnode |
|
611 | 622 | if not node: |
|
612 | 623 | return hexnode |
|
613 | 624 | try: |
|
614 | 625 | return scmutil.shortesthexnodeidprefix(repo, node, minlength) |
|
615 | 626 | except error.RepoLookupError: |
|
616 | 627 | return hexnode |
|
617 | 628 | |
|
618 | 629 | @templatefunc('strip(text[, chars])') |
|
619 | 630 | def strip(context, mapping, args): |
|
620 | 631 | """Strip characters from a string. By default, |
|
621 | 632 | strips all leading and trailing whitespace.""" |
|
622 | 633 | if not (1 <= len(args) <= 2): |
|
623 | 634 | # i18n: "strip" is a keyword |
|
624 | 635 | raise error.ParseError(_("strip expects one or two arguments")) |
|
625 | 636 | |
|
626 | 637 | text = evalstring(context, mapping, args[0]) |
|
627 | 638 | if len(args) == 2: |
|
628 | 639 | chars = evalstring(context, mapping, args[1]) |
|
629 | 640 | return text.strip(chars) |
|
630 | 641 | return text.strip() |
|
631 | 642 | |
|
632 | 643 | @templatefunc('sub(pattern, replacement, expression)') |
|
633 | 644 | def sub(context, mapping, args): |
|
634 | 645 | """Perform text substitution |
|
635 | 646 | using regular expressions.""" |
|
636 | 647 | if len(args) != 3: |
|
637 | 648 | # i18n: "sub" is a keyword |
|
638 | 649 | raise error.ParseError(_("sub expects three arguments")) |
|
639 | 650 | |
|
640 | 651 | pat = evalstring(context, mapping, args[0]) |
|
641 | 652 | rpl = evalstring(context, mapping, args[1]) |
|
642 | 653 | src = evalstring(context, mapping, args[2]) |
|
643 | 654 | try: |
|
644 | 655 | patre = re.compile(pat) |
|
645 | 656 | except re.error: |
|
646 | 657 | # i18n: "sub" is a keyword |
|
647 | 658 | raise error.ParseError(_("sub got an invalid pattern: %s") % pat) |
|
648 | 659 | try: |
|
649 | 660 | yield patre.sub(rpl, src) |
|
650 | 661 | except re.error: |
|
651 | 662 | # i18n: "sub" is a keyword |
|
652 | 663 | raise error.ParseError(_("sub got an invalid replacement: %s") % rpl) |
|
653 | 664 | |
|
654 | 665 | @templatefunc('startswith(pattern, text)') |
|
655 | 666 | def startswith(context, mapping, args): |
|
656 | 667 | """Returns the value from the "text" argument |
|
657 | 668 | if it begins with the content from the "pattern" argument.""" |
|
658 | 669 | if len(args) != 2: |
|
659 | 670 | # i18n: "startswith" is a keyword |
|
660 | 671 | raise error.ParseError(_("startswith expects two arguments")) |
|
661 | 672 | |
|
662 | 673 | patn = evalstring(context, mapping, args[0]) |
|
663 | 674 | text = evalstring(context, mapping, args[1]) |
|
664 | 675 | if text.startswith(patn): |
|
665 | 676 | return text |
|
666 | 677 | return '' |
|
667 | 678 | |
|
668 | 679 | @templatefunc('word(number, text[, separator])') |
|
669 | 680 | def word(context, mapping, args): |
|
670 | 681 | """Return the nth word from a string.""" |
|
671 | 682 | if not (2 <= len(args) <= 3): |
|
672 | 683 | # i18n: "word" is a keyword |
|
673 | 684 | raise error.ParseError(_("word expects two or three arguments, got %d") |
|
674 | 685 | % len(args)) |
|
675 | 686 | |
|
676 | 687 | num = evalinteger(context, mapping, args[0], |
|
677 | 688 | # i18n: "word" is a keyword |
|
678 | 689 | _("word expects an integer index")) |
|
679 | 690 | text = evalstring(context, mapping, args[1]) |
|
680 | 691 | if len(args) == 3: |
|
681 | 692 | splitter = evalstring(context, mapping, args[2]) |
|
682 | 693 | else: |
|
683 | 694 | splitter = None |
|
684 | 695 | |
|
685 | 696 | tokens = text.split(splitter) |
|
686 | 697 | if num >= len(tokens) or num < -len(tokens): |
|
687 | 698 | return '' |
|
688 | 699 | else: |
|
689 | 700 | return tokens[num] |
|
690 | 701 | |
|
691 | 702 | def loadfunction(ui, extname, registrarobj): |
|
692 | 703 | """Load template function from specified registrarobj |
|
693 | 704 | """ |
|
694 | 705 | for name, func in registrarobj._table.iteritems(): |
|
695 | 706 | funcs[name] = func |
|
696 | 707 | |
|
697 | 708 | # tell hggettext to extract docstrings from these functions: |
|
698 | 709 | i18nfunctions = funcs.values() |
@@ -1,887 +1,931 b'' | |||
|
1 | 1 | # templateutil.py - utility for template evaluation |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import abc |
|
11 | 11 | import types |
|
12 | 12 | |
|
13 | 13 | from .i18n import _ |
|
14 | 14 | from . import ( |
|
15 | 15 | error, |
|
16 | 16 | pycompat, |
|
17 | 17 | util, |
|
18 | 18 | ) |
|
19 | 19 | from .utils import ( |
|
20 | 20 | dateutil, |
|
21 | 21 | stringutil, |
|
22 | 22 | ) |
|
23 | 23 | |
|
24 | 24 | class ResourceUnavailable(error.Abort): |
|
25 | 25 | pass |
|
26 | 26 | |
|
27 | 27 | class TemplateNotFound(error.Abort): |
|
28 | 28 | pass |
|
29 | 29 | |
|
30 | 30 | class wrapped(object): |
|
31 | 31 | """Object requiring extra conversion prior to displaying or processing |
|
32 | 32 | as value |
|
33 | 33 | |
|
34 | 34 | Use unwrapvalue() or unwrapastype() to obtain the inner object. |
|
35 | 35 | """ |
|
36 | 36 | |
|
37 | 37 | __metaclass__ = abc.ABCMeta |
|
38 | 38 | |
|
39 | 39 | @abc.abstractmethod |
|
40 | 40 | def contains(self, context, mapping, item): |
|
41 | 41 | """Test if the specified item is in self |
|
42 | 42 | |
|
43 | 43 | The item argument may be a wrapped object. |
|
44 | 44 | """ |
|
45 | 45 | |
|
46 | 46 | @abc.abstractmethod |
|
47 | 47 | def getmember(self, context, mapping, key): |
|
48 | 48 | """Return a member item for the specified key |
|
49 | 49 | |
|
50 | 50 | The key argument may be a wrapped object. |
|
51 | 51 | A returned object may be either a wrapped object or a pure value |
|
52 | 52 | depending on the self type. |
|
53 | 53 | """ |
|
54 | 54 | |
|
55 | 55 | @abc.abstractmethod |
|
56 | 56 | def getmin(self, context, mapping): |
|
57 | 57 | """Return the smallest item, which may be either a wrapped or a pure |
|
58 | 58 | value depending on the self type""" |
|
59 | 59 | |
|
60 | 60 | @abc.abstractmethod |
|
61 | 61 | def getmax(self, context, mapping): |
|
62 | 62 | """Return the largest item, which may be either a wrapped or a pure |
|
63 | 63 | value depending on the self type""" |
|
64 | 64 | |
|
65 | 65 | @abc.abstractmethod |
|
66 | def filter(self, context, mapping, select): | |
|
67 | """Return new container of the same type which includes only the | |
|
68 | selected elements | |
|
69 | ||
|
70 | select() takes each item as a wrapped object and returns True/False. | |
|
71 | """ | |
|
72 | ||
|
73 | @abc.abstractmethod | |
|
66 | 74 | def itermaps(self, context): |
|
67 | 75 | """Yield each template mapping""" |
|
68 | 76 | |
|
69 | 77 | @abc.abstractmethod |
|
70 | 78 | def join(self, context, mapping, sep): |
|
71 | 79 | """Join items with the separator; Returns a bytes or (possibly nested) |
|
72 | 80 | generator of bytes |
|
73 | 81 | |
|
74 | 82 | A pre-configured template may be rendered per item if this container |
|
75 | 83 | holds unprintable items. |
|
76 | 84 | """ |
|
77 | 85 | |
|
78 | 86 | @abc.abstractmethod |
|
79 | 87 | def show(self, context, mapping): |
|
80 | 88 | """Return a bytes or (possibly nested) generator of bytes representing |
|
81 | 89 | the underlying object |
|
82 | 90 | |
|
83 | 91 | A pre-configured template may be rendered if the underlying object is |
|
84 | 92 | not printable. |
|
85 | 93 | """ |
|
86 | 94 | |
|
87 | 95 | @abc.abstractmethod |
|
88 | 96 | def tobool(self, context, mapping): |
|
89 | 97 | """Return a boolean representation of the inner value""" |
|
90 | 98 | |
|
91 | 99 | @abc.abstractmethod |
|
92 | 100 | def tovalue(self, context, mapping): |
|
93 | 101 | """Move the inner value object out or create a value representation |
|
94 | 102 | |
|
95 | 103 | A returned value must be serializable by templaterfilters.json(). |
|
96 | 104 | """ |
|
97 | 105 | |
|
98 | 106 | class mappable(object): |
|
99 | 107 | """Object which can be converted to a single template mapping""" |
|
100 | 108 | |
|
101 | 109 | def itermaps(self, context): |
|
102 | 110 | yield self.tomap(context) |
|
103 | 111 | |
|
104 | 112 | @abc.abstractmethod |
|
105 | 113 | def tomap(self, context): |
|
106 | 114 | """Create a single template mapping representing this""" |
|
107 | 115 | |
|
108 | 116 | class wrappedbytes(wrapped): |
|
109 | 117 | """Wrapper for byte string""" |
|
110 | 118 | |
|
111 | 119 | def __init__(self, value): |
|
112 | 120 | self._value = value |
|
113 | 121 | |
|
114 | 122 | def contains(self, context, mapping, item): |
|
115 | 123 | item = stringify(context, mapping, item) |
|
116 | 124 | return item in self._value |
|
117 | 125 | |
|
118 | 126 | def getmember(self, context, mapping, key): |
|
119 | 127 | raise error.ParseError(_('%r is not a dictionary') |
|
120 | 128 | % pycompat.bytestr(self._value)) |
|
121 | 129 | |
|
122 | 130 | def getmin(self, context, mapping): |
|
123 | 131 | return self._getby(context, mapping, min) |
|
124 | 132 | |
|
125 | 133 | def getmax(self, context, mapping): |
|
126 | 134 | return self._getby(context, mapping, max) |
|
127 | 135 | |
|
128 | 136 | def _getby(self, context, mapping, func): |
|
129 | 137 | if not self._value: |
|
130 | 138 | raise error.ParseError(_('empty string')) |
|
131 | 139 | return func(pycompat.iterbytestr(self._value)) |
|
132 | 140 | |
|
141 | def filter(self, context, mapping, select): | |
|
142 | raise error.ParseError(_('%r is not filterable') | |
|
143 | % pycompat.bytestr(self._value)) | |
|
144 | ||
|
133 | 145 | def itermaps(self, context): |
|
134 | 146 | raise error.ParseError(_('%r is not iterable of mappings') |
|
135 | 147 | % pycompat.bytestr(self._value)) |
|
136 | 148 | |
|
137 | 149 | def join(self, context, mapping, sep): |
|
138 | 150 | return joinitems(pycompat.iterbytestr(self._value), sep) |
|
139 | 151 | |
|
140 | 152 | def show(self, context, mapping): |
|
141 | 153 | return self._value |
|
142 | 154 | |
|
143 | 155 | def tobool(self, context, mapping): |
|
144 | 156 | return bool(self._value) |
|
145 | 157 | |
|
146 | 158 | def tovalue(self, context, mapping): |
|
147 | 159 | return self._value |
|
148 | 160 | |
|
149 | 161 | class wrappedvalue(wrapped): |
|
150 | 162 | """Generic wrapper for pure non-list/dict/bytes value""" |
|
151 | 163 | |
|
152 | 164 | def __init__(self, value): |
|
153 | 165 | self._value = value |
|
154 | 166 | |
|
155 | 167 | def contains(self, context, mapping, item): |
|
156 | 168 | raise error.ParseError(_("%r is not iterable") % self._value) |
|
157 | 169 | |
|
158 | 170 | def getmember(self, context, mapping, key): |
|
159 | 171 | raise error.ParseError(_('%r is not a dictionary') % self._value) |
|
160 | 172 | |
|
161 | 173 | def getmin(self, context, mapping): |
|
162 | 174 | raise error.ParseError(_("%r is not iterable") % self._value) |
|
163 | 175 | |
|
164 | 176 | def getmax(self, context, mapping): |
|
165 | 177 | raise error.ParseError(_("%r is not iterable") % self._value) |
|
166 | 178 | |
|
179 | def filter(self, context, mapping, select): | |
|
180 | raise error.ParseError(_("%r is not iterable") % self._value) | |
|
181 | ||
|
167 | 182 | def itermaps(self, context): |
|
168 | 183 | raise error.ParseError(_('%r is not iterable of mappings') |
|
169 | 184 | % self._value) |
|
170 | 185 | |
|
171 | 186 | def join(self, context, mapping, sep): |
|
172 | 187 | raise error.ParseError(_('%r is not iterable') % self._value) |
|
173 | 188 | |
|
174 | 189 | def show(self, context, mapping): |
|
175 | 190 | if self._value is None: |
|
176 | 191 | return b'' |
|
177 | 192 | return pycompat.bytestr(self._value) |
|
178 | 193 | |
|
179 | 194 | def tobool(self, context, mapping): |
|
180 | 195 | if self._value is None: |
|
181 | 196 | return False |
|
182 | 197 | if isinstance(self._value, bool): |
|
183 | 198 | return self._value |
|
184 | 199 | # otherwise evaluate as string, which means 0 is True |
|
185 | 200 | return bool(pycompat.bytestr(self._value)) |
|
186 | 201 | |
|
187 | 202 | def tovalue(self, context, mapping): |
|
188 | 203 | return self._value |
|
189 | 204 | |
|
190 | 205 | class date(mappable, wrapped): |
|
191 | 206 | """Wrapper for date tuple""" |
|
192 | 207 | |
|
193 | 208 | def __init__(self, value, showfmt='%d %d'): |
|
194 | 209 | # value may be (float, int), but public interface shouldn't support |
|
195 | 210 | # floating-point timestamp |
|
196 | 211 | self._unixtime, self._tzoffset = map(int, value) |
|
197 | 212 | self._showfmt = showfmt |
|
198 | 213 | |
|
199 | 214 | def contains(self, context, mapping, item): |
|
200 | 215 | raise error.ParseError(_('date is not iterable')) |
|
201 | 216 | |
|
202 | 217 | def getmember(self, context, mapping, key): |
|
203 | 218 | raise error.ParseError(_('date is not a dictionary')) |
|
204 | 219 | |
|
205 | 220 | def getmin(self, context, mapping): |
|
206 | 221 | raise error.ParseError(_('date is not iterable')) |
|
207 | 222 | |
|
208 | 223 | def getmax(self, context, mapping): |
|
209 | 224 | raise error.ParseError(_('date is not iterable')) |
|
210 | 225 | |
|
226 | def filter(self, context, mapping, select): | |
|
227 | raise error.ParseError(_('date is not iterable')) | |
|
228 | ||
|
211 | 229 | def join(self, context, mapping, sep): |
|
212 | 230 | raise error.ParseError(_("date is not iterable")) |
|
213 | 231 | |
|
214 | 232 | def show(self, context, mapping): |
|
215 | 233 | return self._showfmt % (self._unixtime, self._tzoffset) |
|
216 | 234 | |
|
217 | 235 | def tomap(self, context): |
|
218 | 236 | return {'unixtime': self._unixtime, 'tzoffset': self._tzoffset} |
|
219 | 237 | |
|
220 | 238 | def tobool(self, context, mapping): |
|
221 | 239 | return True |
|
222 | 240 | |
|
223 | 241 | def tovalue(self, context, mapping): |
|
224 | 242 | return (self._unixtime, self._tzoffset) |
|
225 | 243 | |
|
226 | 244 | class hybrid(wrapped): |
|
227 | 245 | """Wrapper for list or dict to support legacy template |
|
228 | 246 | |
|
229 | 247 | This class allows us to handle both: |
|
230 | 248 | - "{files}" (legacy command-line-specific list hack) and |
|
231 | 249 | - "{files % '{file}\n'}" (hgweb-style with inlining and function support) |
|
232 | 250 | and to access raw values: |
|
233 | 251 | - "{ifcontains(file, files, ...)}", "{ifcontains(key, extras, ...)}" |
|
234 | 252 | - "{get(extras, key)}" |
|
235 | 253 | - "{files|json}" |
|
236 | 254 | """ |
|
237 | 255 | |
|
238 | 256 | def __init__(self, gen, values, makemap, joinfmt, keytype=None): |
|
239 | 257 | self._gen = gen # generator or function returning generator |
|
240 | 258 | self._values = values |
|
241 | 259 | self._makemap = makemap |
|
242 | 260 | self._joinfmt = joinfmt |
|
243 | 261 | self._keytype = keytype # hint for 'x in y' where type(x) is unresolved |
|
244 | 262 | |
|
245 | 263 | def contains(self, context, mapping, item): |
|
246 | 264 | item = unwrapastype(context, mapping, item, self._keytype) |
|
247 | 265 | return item in self._values |
|
248 | 266 | |
|
249 | 267 | def getmember(self, context, mapping, key): |
|
250 | 268 | # TODO: maybe split hybrid list/dict types? |
|
251 | 269 | if not util.safehasattr(self._values, 'get'): |
|
252 | 270 | raise error.ParseError(_('not a dictionary')) |
|
253 | 271 | key = unwrapastype(context, mapping, key, self._keytype) |
|
254 | 272 | return self._wrapvalue(key, self._values.get(key)) |
|
255 | 273 | |
|
256 | 274 | def getmin(self, context, mapping): |
|
257 | 275 | return self._getby(context, mapping, min) |
|
258 | 276 | |
|
259 | 277 | def getmax(self, context, mapping): |
|
260 | 278 | return self._getby(context, mapping, max) |
|
261 | 279 | |
|
262 | 280 | def _getby(self, context, mapping, func): |
|
263 | 281 | if not self._values: |
|
264 | 282 | raise error.ParseError(_('empty sequence')) |
|
265 | 283 | val = func(self._values) |
|
266 | 284 | return self._wrapvalue(val, val) |
|
267 | 285 | |
|
268 | 286 | def _wrapvalue(self, key, val): |
|
269 | 287 | if val is None: |
|
270 | 288 | return |
|
271 | 289 | if util.safehasattr(val, '_makemap'): |
|
272 | 290 | # a nested hybrid list/dict, which has its own way of map operation |
|
273 | 291 | return val |
|
274 | 292 | return hybriditem(None, key, val, self._makemap) |
|
275 | 293 | |
|
294 | def filter(self, context, mapping, select): | |
|
295 | if util.safehasattr(self._values, 'get'): | |
|
296 | values = {k: v for k, v in self._values.iteritems() | |
|
297 | if select(self._wrapvalue(k, v))} | |
|
298 | else: | |
|
299 | values = [v for v in self._values if select(self._wrapvalue(v, v))] | |
|
300 | return hybrid(None, values, self._makemap, self._joinfmt, self._keytype) | |
|
301 | ||
|
276 | 302 | def itermaps(self, context): |
|
277 | 303 | makemap = self._makemap |
|
278 | 304 | for x in self._values: |
|
279 | 305 | yield makemap(x) |
|
280 | 306 | |
|
281 | 307 | def join(self, context, mapping, sep): |
|
282 | 308 | # TODO: switch gen to (context, mapping) API? |
|
283 | 309 | return joinitems((self._joinfmt(x) for x in self._values), sep) |
|
284 | 310 | |
|
285 | 311 | def show(self, context, mapping): |
|
286 | 312 | # TODO: switch gen to (context, mapping) API? |
|
287 | 313 | gen = self._gen |
|
288 | 314 | if gen is None: |
|
289 | 315 | return self.join(context, mapping, ' ') |
|
290 | 316 | if callable(gen): |
|
291 | 317 | return gen() |
|
292 | 318 | return gen |
|
293 | 319 | |
|
294 | 320 | def tobool(self, context, mapping): |
|
295 | 321 | return bool(self._values) |
|
296 | 322 | |
|
297 | 323 | def tovalue(self, context, mapping): |
|
298 | 324 | # TODO: make it non-recursive for trivial lists/dicts |
|
299 | 325 | xs = self._values |
|
300 | 326 | if util.safehasattr(xs, 'get'): |
|
301 | 327 | return {k: unwrapvalue(context, mapping, v) |
|
302 | 328 | for k, v in xs.iteritems()} |
|
303 | 329 | return [unwrapvalue(context, mapping, x) for x in xs] |
|
304 | 330 | |
|
305 | 331 | class hybriditem(mappable, wrapped): |
|
306 | 332 | """Wrapper for non-list/dict object to support map operation |
|
307 | 333 | |
|
308 | 334 | This class allows us to handle both: |
|
309 | 335 | - "{manifest}" |
|
310 | 336 | - "{manifest % '{rev}:{node}'}" |
|
311 | 337 | - "{manifest.rev}" |
|
312 | 338 | """ |
|
313 | 339 | |
|
314 | 340 | def __init__(self, gen, key, value, makemap): |
|
315 | 341 | self._gen = gen # generator or function returning generator |
|
316 | 342 | self._key = key |
|
317 | 343 | self._value = value # may be generator of strings |
|
318 | 344 | self._makemap = makemap |
|
319 | 345 | |
|
320 | 346 | def tomap(self, context): |
|
321 | 347 | return self._makemap(self._key) |
|
322 | 348 | |
|
323 | 349 | def contains(self, context, mapping, item): |
|
324 | 350 | w = makewrapped(context, mapping, self._value) |
|
325 | 351 | return w.contains(context, mapping, item) |
|
326 | 352 | |
|
327 | 353 | def getmember(self, context, mapping, key): |
|
328 | 354 | w = makewrapped(context, mapping, self._value) |
|
329 | 355 | return w.getmember(context, mapping, key) |
|
330 | 356 | |
|
331 | 357 | def getmin(self, context, mapping): |
|
332 | 358 | w = makewrapped(context, mapping, self._value) |
|
333 | 359 | return w.getmin(context, mapping) |
|
334 | 360 | |
|
335 | 361 | def getmax(self, context, mapping): |
|
336 | 362 | w = makewrapped(context, mapping, self._value) |
|
337 | 363 | return w.getmax(context, mapping) |
|
338 | 364 | |
|
365 | def filter(self, context, mapping, select): | |
|
366 | w = makewrapped(context, mapping, self._value) | |
|
367 | return w.filter(context, mapping, select) | |
|
368 | ||
|
339 | 369 | def join(self, context, mapping, sep): |
|
340 | 370 | w = makewrapped(context, mapping, self._value) |
|
341 | 371 | return w.join(context, mapping, sep) |
|
342 | 372 | |
|
343 | 373 | def show(self, context, mapping): |
|
344 | 374 | # TODO: switch gen to (context, mapping) API? |
|
345 | 375 | gen = self._gen |
|
346 | 376 | if gen is None: |
|
347 | 377 | return pycompat.bytestr(self._value) |
|
348 | 378 | if callable(gen): |
|
349 | 379 | return gen() |
|
350 | 380 | return gen |
|
351 | 381 | |
|
352 | 382 | def tobool(self, context, mapping): |
|
353 | 383 | w = makewrapped(context, mapping, self._value) |
|
354 | 384 | return w.tobool(context, mapping) |
|
355 | 385 | |
|
356 | 386 | def tovalue(self, context, mapping): |
|
357 | 387 | return _unthunk(context, mapping, self._value) |
|
358 | 388 | |
|
359 | 389 | class _mappingsequence(wrapped): |
|
360 | 390 | """Wrapper for sequence of template mappings |
|
361 | 391 | |
|
362 | 392 | This represents an inner template structure (i.e. a list of dicts), |
|
363 | 393 | which can also be rendered by the specified named/literal template. |
|
364 | 394 | |
|
365 | 395 | Template mappings may be nested. |
|
366 | 396 | """ |
|
367 | 397 | |
|
368 | 398 | def __init__(self, name=None, tmpl=None, sep=''): |
|
369 | 399 | if name is not None and tmpl is not None: |
|
370 | 400 | raise error.ProgrammingError('name and tmpl are mutually exclusive') |
|
371 | 401 | self._name = name |
|
372 | 402 | self._tmpl = tmpl |
|
373 | 403 | self._defaultsep = sep |
|
374 | 404 | |
|
375 | 405 | def contains(self, context, mapping, item): |
|
376 | 406 | raise error.ParseError(_('not comparable')) |
|
377 | 407 | |
|
378 | 408 | def getmember(self, context, mapping, key): |
|
379 | 409 | raise error.ParseError(_('not a dictionary')) |
|
380 | 410 | |
|
381 | 411 | def getmin(self, context, mapping): |
|
382 | 412 | raise error.ParseError(_('not comparable')) |
|
383 | 413 | |
|
384 | 414 | def getmax(self, context, mapping): |
|
385 | 415 | raise error.ParseError(_('not comparable')) |
|
386 | 416 | |
|
417 | def filter(self, context, mapping, select): | |
|
418 | raise error.ParseError(_('not filterable without template')) | |
|
419 | ||
|
387 | 420 | def join(self, context, mapping, sep): |
|
388 | 421 | mapsiter = _iteroverlaymaps(context, mapping, self.itermaps(context)) |
|
389 | 422 | if self._name: |
|
390 | 423 | itemiter = (context.process(self._name, m) for m in mapsiter) |
|
391 | 424 | elif self._tmpl: |
|
392 | 425 | itemiter = (context.expand(self._tmpl, m) for m in mapsiter) |
|
393 | 426 | else: |
|
394 | 427 | raise error.ParseError(_('not displayable without template')) |
|
395 | 428 | return joinitems(itemiter, sep) |
|
396 | 429 | |
|
397 | 430 | def show(self, context, mapping): |
|
398 | 431 | return self.join(context, mapping, self._defaultsep) |
|
399 | 432 | |
|
400 | 433 | def tovalue(self, context, mapping): |
|
401 | 434 | knownres = context.knownresourcekeys() |
|
402 | 435 | items = [] |
|
403 | 436 | for nm in self.itermaps(context): |
|
404 | 437 | # drop internal resources (recursively) which shouldn't be displayed |
|
405 | 438 | lm = context.overlaymap(mapping, nm) |
|
406 | 439 | items.append({k: unwrapvalue(context, lm, v) |
|
407 | 440 | for k, v in nm.iteritems() if k not in knownres}) |
|
408 | 441 | return items |
|
409 | 442 | |
|
410 | 443 | class mappinggenerator(_mappingsequence): |
|
411 | 444 | """Wrapper for generator of template mappings |
|
412 | 445 | |
|
413 | 446 | The function ``make(context, *args)`` should return a generator of |
|
414 | 447 | mapping dicts. |
|
415 | 448 | """ |
|
416 | 449 | |
|
417 | 450 | def __init__(self, make, args=(), name=None, tmpl=None, sep=''): |
|
418 | 451 | super(mappinggenerator, self).__init__(name, tmpl, sep) |
|
419 | 452 | self._make = make |
|
420 | 453 | self._args = args |
|
421 | 454 | |
|
422 | 455 | def itermaps(self, context): |
|
423 | 456 | return self._make(context, *self._args) |
|
424 | 457 | |
|
425 | 458 | def tobool(self, context, mapping): |
|
426 | 459 | return _nonempty(self.itermaps(context)) |
|
427 | 460 | |
|
428 | 461 | class mappinglist(_mappingsequence): |
|
429 | 462 | """Wrapper for list of template mappings""" |
|
430 | 463 | |
|
431 | 464 | def __init__(self, mappings, name=None, tmpl=None, sep=''): |
|
432 | 465 | super(mappinglist, self).__init__(name, tmpl, sep) |
|
433 | 466 | self._mappings = mappings |
|
434 | 467 | |
|
435 | 468 | def itermaps(self, context): |
|
436 | 469 | return iter(self._mappings) |
|
437 | 470 | |
|
438 | 471 | def tobool(self, context, mapping): |
|
439 | 472 | return bool(self._mappings) |
|
440 | 473 | |
|
441 | 474 | class mappedgenerator(wrapped): |
|
442 | 475 | """Wrapper for generator of strings which acts as a list |
|
443 | 476 | |
|
444 | 477 | The function ``make(context, *args)`` should return a generator of |
|
445 | 478 | byte strings, or a generator of (possibly nested) generators of byte |
|
446 | 479 | strings (i.e. a generator for a list of byte strings.) |
|
447 | 480 | """ |
|
448 | 481 | |
|
449 | 482 | def __init__(self, make, args=()): |
|
450 | 483 | self._make = make |
|
451 | 484 | self._args = args |
|
452 | 485 | |
|
453 | 486 | def contains(self, context, mapping, item): |
|
454 | 487 | item = stringify(context, mapping, item) |
|
455 | 488 | return item in self.tovalue(context, mapping) |
|
456 | 489 | |
|
457 | 490 | def _gen(self, context): |
|
458 | 491 | return self._make(context, *self._args) |
|
459 | 492 | |
|
460 | 493 | def getmember(self, context, mapping, key): |
|
461 | 494 | raise error.ParseError(_('not a dictionary')) |
|
462 | 495 | |
|
463 | 496 | def getmin(self, context, mapping): |
|
464 | 497 | return self._getby(context, mapping, min) |
|
465 | 498 | |
|
466 | 499 | def getmax(self, context, mapping): |
|
467 | 500 | return self._getby(context, mapping, max) |
|
468 | 501 | |
|
469 | 502 | def _getby(self, context, mapping, func): |
|
470 | 503 | xs = self.tovalue(context, mapping) |
|
471 | 504 | if not xs: |
|
472 | 505 | raise error.ParseError(_('empty sequence')) |
|
473 | 506 | return func(xs) |
|
474 | 507 | |
|
508 | @staticmethod | |
|
509 | def _filteredgen(context, mapping, make, args, select): | |
|
510 | for x in make(context, *args): | |
|
511 | s = stringify(context, mapping, x) | |
|
512 | if select(wrappedbytes(s)): | |
|
513 | yield s | |
|
514 | ||
|
515 | def filter(self, context, mapping, select): | |
|
516 | args = (mapping, self._make, self._args, select) | |
|
517 | return mappedgenerator(self._filteredgen, args) | |
|
518 | ||
|
475 | 519 | def itermaps(self, context): |
|
476 | 520 | raise error.ParseError(_('list of strings is not mappable')) |
|
477 | 521 | |
|
478 | 522 | def join(self, context, mapping, sep): |
|
479 | 523 | return joinitems(self._gen(context), sep) |
|
480 | 524 | |
|
481 | 525 | def show(self, context, mapping): |
|
482 | 526 | return self.join(context, mapping, '') |
|
483 | 527 | |
|
484 | 528 | def tobool(self, context, mapping): |
|
485 | 529 | return _nonempty(self._gen(context)) |
|
486 | 530 | |
|
487 | 531 | def tovalue(self, context, mapping): |
|
488 | 532 | return [stringify(context, mapping, x) for x in self._gen(context)] |
|
489 | 533 | |
|
490 | 534 | def hybriddict(data, key='key', value='value', fmt=None, gen=None): |
|
491 | 535 | """Wrap data to support both dict-like and string-like operations""" |
|
492 | 536 | prefmt = pycompat.identity |
|
493 | 537 | if fmt is None: |
|
494 | 538 | fmt = '%s=%s' |
|
495 | 539 | prefmt = pycompat.bytestr |
|
496 | 540 | return hybrid(gen, data, lambda k: {key: k, value: data[k]}, |
|
497 | 541 | lambda k: fmt % (prefmt(k), prefmt(data[k]))) |
|
498 | 542 | |
|
499 | 543 | def hybridlist(data, name, fmt=None, gen=None): |
|
500 | 544 | """Wrap data to support both list-like and string-like operations""" |
|
501 | 545 | prefmt = pycompat.identity |
|
502 | 546 | if fmt is None: |
|
503 | 547 | fmt = '%s' |
|
504 | 548 | prefmt = pycompat.bytestr |
|
505 | 549 | return hybrid(gen, data, lambda x: {name: x}, lambda x: fmt % prefmt(x)) |
|
506 | 550 | |
|
507 | 551 | def compatdict(context, mapping, name, data, key='key', value='value', |
|
508 | 552 | fmt=None, plural=None, separator=' '): |
|
509 | 553 | """Wrap data like hybriddict(), but also supports old-style list template |
|
510 | 554 | |
|
511 | 555 | This exists for backward compatibility with the old-style template. Use |
|
512 | 556 | hybriddict() for new template keywords. |
|
513 | 557 | """ |
|
514 | 558 | c = [{key: k, value: v} for k, v in data.iteritems()] |
|
515 | 559 | f = _showcompatlist(context, mapping, name, c, plural, separator) |
|
516 | 560 | return hybriddict(data, key=key, value=value, fmt=fmt, gen=f) |
|
517 | 561 | |
|
518 | 562 | def compatlist(context, mapping, name, data, element=None, fmt=None, |
|
519 | 563 | plural=None, separator=' '): |
|
520 | 564 | """Wrap data like hybridlist(), but also supports old-style list template |
|
521 | 565 | |
|
522 | 566 | This exists for backward compatibility with the old-style template. Use |
|
523 | 567 | hybridlist() for new template keywords. |
|
524 | 568 | """ |
|
525 | 569 | f = _showcompatlist(context, mapping, name, data, plural, separator) |
|
526 | 570 | return hybridlist(data, name=element or name, fmt=fmt, gen=f) |
|
527 | 571 | |
|
528 | 572 | def _showcompatlist(context, mapping, name, values, plural=None, separator=' '): |
|
529 | 573 | """Return a generator that renders old-style list template |
|
530 | 574 | |
|
531 | 575 | name is name of key in template map. |
|
532 | 576 | values is list of strings or dicts. |
|
533 | 577 | plural is plural of name, if not simply name + 's'. |
|
534 | 578 | separator is used to join values as a string |
|
535 | 579 | |
|
536 | 580 | expansion works like this, given name 'foo'. |
|
537 | 581 | |
|
538 | 582 | if values is empty, expand 'no_foos'. |
|
539 | 583 | |
|
540 | 584 | if 'foo' not in template map, return values as a string, |
|
541 | 585 | joined by 'separator'. |
|
542 | 586 | |
|
543 | 587 | expand 'start_foos'. |
|
544 | 588 | |
|
545 | 589 | for each value, expand 'foo'. if 'last_foo' in template |
|
546 | 590 | map, expand it instead of 'foo' for last key. |
|
547 | 591 | |
|
548 | 592 | expand 'end_foos'. |
|
549 | 593 | """ |
|
550 | 594 | if not plural: |
|
551 | 595 | plural = name + 's' |
|
552 | 596 | if not values: |
|
553 | 597 | noname = 'no_' + plural |
|
554 | 598 | if context.preload(noname): |
|
555 | 599 | yield context.process(noname, mapping) |
|
556 | 600 | return |
|
557 | 601 | if not context.preload(name): |
|
558 | 602 | if isinstance(values[0], bytes): |
|
559 | 603 | yield separator.join(values) |
|
560 | 604 | else: |
|
561 | 605 | for v in values: |
|
562 | 606 | r = dict(v) |
|
563 | 607 | r.update(mapping) |
|
564 | 608 | yield r |
|
565 | 609 | return |
|
566 | 610 | startname = 'start_' + plural |
|
567 | 611 | if context.preload(startname): |
|
568 | 612 | yield context.process(startname, mapping) |
|
569 | 613 | def one(v, tag=name): |
|
570 | 614 | vmapping = {} |
|
571 | 615 | try: |
|
572 | 616 | vmapping.update(v) |
|
573 | 617 | # Python 2 raises ValueError if the type of v is wrong. Python |
|
574 | 618 | # 3 raises TypeError. |
|
575 | 619 | except (AttributeError, TypeError, ValueError): |
|
576 | 620 | try: |
|
577 | 621 | # Python 2 raises ValueError trying to destructure an e.g. |
|
578 | 622 | # bytes. Python 3 raises TypeError. |
|
579 | 623 | for a, b in v: |
|
580 | 624 | vmapping[a] = b |
|
581 | 625 | except (TypeError, ValueError): |
|
582 | 626 | vmapping[name] = v |
|
583 | 627 | vmapping = context.overlaymap(mapping, vmapping) |
|
584 | 628 | return context.process(tag, vmapping) |
|
585 | 629 | lastname = 'last_' + name |
|
586 | 630 | if context.preload(lastname): |
|
587 | 631 | last = values.pop() |
|
588 | 632 | else: |
|
589 | 633 | last = None |
|
590 | 634 | for v in values: |
|
591 | 635 | yield one(v) |
|
592 | 636 | if last is not None: |
|
593 | 637 | yield one(last, tag=lastname) |
|
594 | 638 | endname = 'end_' + plural |
|
595 | 639 | if context.preload(endname): |
|
596 | 640 | yield context.process(endname, mapping) |
|
597 | 641 | |
|
598 | 642 | def flatten(context, mapping, thing): |
|
599 | 643 | """Yield a single stream from a possibly nested set of iterators""" |
|
600 | 644 | if isinstance(thing, wrapped): |
|
601 | 645 | thing = thing.show(context, mapping) |
|
602 | 646 | if isinstance(thing, bytes): |
|
603 | 647 | yield thing |
|
604 | 648 | elif isinstance(thing, str): |
|
605 | 649 | # We can only hit this on Python 3, and it's here to guard |
|
606 | 650 | # against infinite recursion. |
|
607 | 651 | raise error.ProgrammingError('Mercurial IO including templates is done' |
|
608 | 652 | ' with bytes, not strings, got %r' % thing) |
|
609 | 653 | elif thing is None: |
|
610 | 654 | pass |
|
611 | 655 | elif not util.safehasattr(thing, '__iter__'): |
|
612 | 656 | yield pycompat.bytestr(thing) |
|
613 | 657 | else: |
|
614 | 658 | for i in thing: |
|
615 | 659 | if isinstance(i, wrapped): |
|
616 | 660 | i = i.show(context, mapping) |
|
617 | 661 | if isinstance(i, bytes): |
|
618 | 662 | yield i |
|
619 | 663 | elif i is None: |
|
620 | 664 | pass |
|
621 | 665 | elif not util.safehasattr(i, '__iter__'): |
|
622 | 666 | yield pycompat.bytestr(i) |
|
623 | 667 | else: |
|
624 | 668 | for j in flatten(context, mapping, i): |
|
625 | 669 | yield j |
|
626 | 670 | |
|
627 | 671 | def stringify(context, mapping, thing): |
|
628 | 672 | """Turn values into bytes by converting into text and concatenating them""" |
|
629 | 673 | if isinstance(thing, bytes): |
|
630 | 674 | return thing # retain localstr to be round-tripped |
|
631 | 675 | return b''.join(flatten(context, mapping, thing)) |
|
632 | 676 | |
|
633 | 677 | def findsymbolicname(arg): |
|
634 | 678 | """Find symbolic name for the given compiled expression; returns None |
|
635 | 679 | if nothing found reliably""" |
|
636 | 680 | while True: |
|
637 | 681 | func, data = arg |
|
638 | 682 | if func is runsymbol: |
|
639 | 683 | return data |
|
640 | 684 | elif func is runfilter: |
|
641 | 685 | arg = data[0] |
|
642 | 686 | else: |
|
643 | 687 | return None |
|
644 | 688 | |
|
645 | 689 | def _nonempty(xiter): |
|
646 | 690 | try: |
|
647 | 691 | next(xiter) |
|
648 | 692 | return True |
|
649 | 693 | except StopIteration: |
|
650 | 694 | return False |
|
651 | 695 | |
|
652 | 696 | def _unthunk(context, mapping, thing): |
|
653 | 697 | """Evaluate a lazy byte string into value""" |
|
654 | 698 | if not isinstance(thing, types.GeneratorType): |
|
655 | 699 | return thing |
|
656 | 700 | return stringify(context, mapping, thing) |
|
657 | 701 | |
|
658 | 702 | def evalrawexp(context, mapping, arg): |
|
659 | 703 | """Evaluate given argument as a bare template object which may require |
|
660 | 704 | further processing (such as folding generator of strings)""" |
|
661 | 705 | func, data = arg |
|
662 | 706 | return func(context, mapping, data) |
|
663 | 707 | |
|
664 | 708 | def evalwrapped(context, mapping, arg): |
|
665 | 709 | """Evaluate given argument to wrapped object""" |
|
666 | 710 | thing = evalrawexp(context, mapping, arg) |
|
667 | 711 | return makewrapped(context, mapping, thing) |
|
668 | 712 | |
|
669 | 713 | def makewrapped(context, mapping, thing): |
|
670 | 714 | """Lift object to a wrapped type""" |
|
671 | 715 | if isinstance(thing, wrapped): |
|
672 | 716 | return thing |
|
673 | 717 | thing = _unthunk(context, mapping, thing) |
|
674 | 718 | if isinstance(thing, bytes): |
|
675 | 719 | return wrappedbytes(thing) |
|
676 | 720 | return wrappedvalue(thing) |
|
677 | 721 | |
|
678 | 722 | def evalfuncarg(context, mapping, arg): |
|
679 | 723 | """Evaluate given argument as value type""" |
|
680 | 724 | return unwrapvalue(context, mapping, evalrawexp(context, mapping, arg)) |
|
681 | 725 | |
|
682 | 726 | def unwrapvalue(context, mapping, thing): |
|
683 | 727 | """Move the inner value object out of the wrapper""" |
|
684 | 728 | if isinstance(thing, wrapped): |
|
685 | 729 | return thing.tovalue(context, mapping) |
|
686 | 730 | # evalrawexp() may return string, generator of strings or arbitrary object |
|
687 | 731 | # such as date tuple, but filter does not want generator. |
|
688 | 732 | return _unthunk(context, mapping, thing) |
|
689 | 733 | |
|
690 | 734 | def evalboolean(context, mapping, arg): |
|
691 | 735 | """Evaluate given argument as boolean, but also takes boolean literals""" |
|
692 | 736 | func, data = arg |
|
693 | 737 | if func is runsymbol: |
|
694 | 738 | thing = func(context, mapping, data, default=None) |
|
695 | 739 | if thing is None: |
|
696 | 740 | # not a template keyword, takes as a boolean literal |
|
697 | 741 | thing = stringutil.parsebool(data) |
|
698 | 742 | else: |
|
699 | 743 | thing = func(context, mapping, data) |
|
700 | 744 | return makewrapped(context, mapping, thing).tobool(context, mapping) |
|
701 | 745 | |
|
702 | 746 | def evaldate(context, mapping, arg, err=None): |
|
703 | 747 | """Evaluate given argument as a date tuple or a date string; returns |
|
704 | 748 | a (unixtime, offset) tuple""" |
|
705 | 749 | thing = evalrawexp(context, mapping, arg) |
|
706 | 750 | return unwrapdate(context, mapping, thing, err) |
|
707 | 751 | |
|
708 | 752 | def unwrapdate(context, mapping, thing, err=None): |
|
709 | 753 | if isinstance(thing, date): |
|
710 | 754 | return thing.tovalue(context, mapping) |
|
711 | 755 | # TODO: update hgweb to not return bare tuple; then just stringify 'thing' |
|
712 | 756 | thing = unwrapvalue(context, mapping, thing) |
|
713 | 757 | try: |
|
714 | 758 | return dateutil.parsedate(thing) |
|
715 | 759 | except AttributeError: |
|
716 | 760 | raise error.ParseError(err or _('not a date tuple nor a string')) |
|
717 | 761 | except error.ParseError: |
|
718 | 762 | if not err: |
|
719 | 763 | raise |
|
720 | 764 | raise error.ParseError(err) |
|
721 | 765 | |
|
722 | 766 | def evalinteger(context, mapping, arg, err=None): |
|
723 | 767 | thing = evalrawexp(context, mapping, arg) |
|
724 | 768 | return unwrapinteger(context, mapping, thing, err) |
|
725 | 769 | |
|
726 | 770 | def unwrapinteger(context, mapping, thing, err=None): |
|
727 | 771 | thing = unwrapvalue(context, mapping, thing) |
|
728 | 772 | try: |
|
729 | 773 | return int(thing) |
|
730 | 774 | except (TypeError, ValueError): |
|
731 | 775 | raise error.ParseError(err or _('not an integer')) |
|
732 | 776 | |
|
733 | 777 | def evalstring(context, mapping, arg): |
|
734 | 778 | return stringify(context, mapping, evalrawexp(context, mapping, arg)) |
|
735 | 779 | |
|
736 | 780 | def evalstringliteral(context, mapping, arg): |
|
737 | 781 | """Evaluate given argument as string template, but returns symbol name |
|
738 | 782 | if it is unknown""" |
|
739 | 783 | func, data = arg |
|
740 | 784 | if func is runsymbol: |
|
741 | 785 | thing = func(context, mapping, data, default=data) |
|
742 | 786 | else: |
|
743 | 787 | thing = func(context, mapping, data) |
|
744 | 788 | return stringify(context, mapping, thing) |
|
745 | 789 | |
|
746 | 790 | _unwrapfuncbytype = { |
|
747 | 791 | None: unwrapvalue, |
|
748 | 792 | bytes: stringify, |
|
749 | 793 | date: unwrapdate, |
|
750 | 794 | int: unwrapinteger, |
|
751 | 795 | } |
|
752 | 796 | |
|
753 | 797 | def unwrapastype(context, mapping, thing, typ): |
|
754 | 798 | """Move the inner value object out of the wrapper and coerce its type""" |
|
755 | 799 | try: |
|
756 | 800 | f = _unwrapfuncbytype[typ] |
|
757 | 801 | except KeyError: |
|
758 | 802 | raise error.ProgrammingError('invalid type specified: %r' % typ) |
|
759 | 803 | return f(context, mapping, thing) |
|
760 | 804 | |
|
761 | 805 | def runinteger(context, mapping, data): |
|
762 | 806 | return int(data) |
|
763 | 807 | |
|
764 | 808 | def runstring(context, mapping, data): |
|
765 | 809 | return data |
|
766 | 810 | |
|
767 | 811 | def _recursivesymbolblocker(key): |
|
768 | 812 | def showrecursion(**args): |
|
769 | 813 | raise error.Abort(_("recursive reference '%s' in template") % key) |
|
770 | 814 | return showrecursion |
|
771 | 815 | |
|
772 | 816 | def runsymbol(context, mapping, key, default=''): |
|
773 | 817 | v = context.symbol(mapping, key) |
|
774 | 818 | if v is None: |
|
775 | 819 | # put poison to cut recursion. we can't move this to parsing phase |
|
776 | 820 | # because "x = {x}" is allowed if "x" is a keyword. (issue4758) |
|
777 | 821 | safemapping = mapping.copy() |
|
778 | 822 | safemapping[key] = _recursivesymbolblocker(key) |
|
779 | 823 | try: |
|
780 | 824 | v = context.process(key, safemapping) |
|
781 | 825 | except TemplateNotFound: |
|
782 | 826 | v = default |
|
783 | 827 | if callable(v) and getattr(v, '_requires', None) is None: |
|
784 | 828 | # old templatekw: expand all keywords and resources |
|
785 | 829 | # (TODO: deprecate this after porting web template keywords to new API) |
|
786 | 830 | props = {k: context._resources.lookup(context, mapping, k) |
|
787 | 831 | for k in context._resources.knownkeys()} |
|
788 | 832 | # pass context to _showcompatlist() through templatekw._showlist() |
|
789 | 833 | props['templ'] = context |
|
790 | 834 | props.update(mapping) |
|
791 | 835 | return v(**pycompat.strkwargs(props)) |
|
792 | 836 | if callable(v): |
|
793 | 837 | # new templatekw |
|
794 | 838 | try: |
|
795 | 839 | return v(context, mapping) |
|
796 | 840 | except ResourceUnavailable: |
|
797 | 841 | # unsupported keyword is mapped to empty just like unknown keyword |
|
798 | 842 | return None |
|
799 | 843 | return v |
|
800 | 844 | |
|
801 | 845 | def runtemplate(context, mapping, template): |
|
802 | 846 | for arg in template: |
|
803 | 847 | yield evalrawexp(context, mapping, arg) |
|
804 | 848 | |
|
805 | 849 | def runfilter(context, mapping, data): |
|
806 | 850 | arg, filt = data |
|
807 | 851 | thing = evalrawexp(context, mapping, arg) |
|
808 | 852 | intype = getattr(filt, '_intype', None) |
|
809 | 853 | try: |
|
810 | 854 | thing = unwrapastype(context, mapping, thing, intype) |
|
811 | 855 | return filt(thing) |
|
812 | 856 | except error.ParseError as e: |
|
813 | 857 | raise error.ParseError(bytes(e), hint=_formatfiltererror(arg, filt)) |
|
814 | 858 | |
|
815 | 859 | def _formatfiltererror(arg, filt): |
|
816 | 860 | fn = pycompat.sysbytes(filt.__name__) |
|
817 | 861 | sym = findsymbolicname(arg) |
|
818 | 862 | if not sym: |
|
819 | 863 | return _("incompatible use of template filter '%s'") % fn |
|
820 | 864 | return (_("template filter '%s' is not compatible with keyword '%s'") |
|
821 | 865 | % (fn, sym)) |
|
822 | 866 | |
|
823 | 867 | def _iteroverlaymaps(context, origmapping, newmappings): |
|
824 | 868 | """Generate combined mappings from the original mapping and an iterable |
|
825 | 869 | of partial mappings to override the original""" |
|
826 | 870 | for i, nm in enumerate(newmappings): |
|
827 | 871 | lm = context.overlaymap(origmapping, nm) |
|
828 | 872 | lm['index'] = i |
|
829 | 873 | yield lm |
|
830 | 874 | |
|
831 | 875 | def _applymap(context, mapping, d, darg, targ): |
|
832 | 876 | try: |
|
833 | 877 | diter = d.itermaps(context) |
|
834 | 878 | except error.ParseError as err: |
|
835 | 879 | sym = findsymbolicname(darg) |
|
836 | 880 | if not sym: |
|
837 | 881 | raise |
|
838 | 882 | hint = _("keyword '%s' does not support map operation") % sym |
|
839 | 883 | raise error.ParseError(bytes(err), hint=hint) |
|
840 | 884 | for lm in _iteroverlaymaps(context, mapping, diter): |
|
841 | 885 | yield evalrawexp(context, lm, targ) |
|
842 | 886 | |
|
843 | 887 | def runmap(context, mapping, data): |
|
844 | 888 | darg, targ = data |
|
845 | 889 | d = evalwrapped(context, mapping, darg) |
|
846 | 890 | return mappedgenerator(_applymap, args=(mapping, d, darg, targ)) |
|
847 | 891 | |
|
848 | 892 | def runmember(context, mapping, data): |
|
849 | 893 | darg, memb = data |
|
850 | 894 | d = evalwrapped(context, mapping, darg) |
|
851 | 895 | if isinstance(d, mappable): |
|
852 | 896 | lm = context.overlaymap(mapping, d.tomap(context)) |
|
853 | 897 | return runsymbol(context, lm, memb) |
|
854 | 898 | try: |
|
855 | 899 | return d.getmember(context, mapping, memb) |
|
856 | 900 | except error.ParseError as err: |
|
857 | 901 | sym = findsymbolicname(darg) |
|
858 | 902 | if not sym: |
|
859 | 903 | raise |
|
860 | 904 | hint = _("keyword '%s' does not support member operation") % sym |
|
861 | 905 | raise error.ParseError(bytes(err), hint=hint) |
|
862 | 906 | |
|
863 | 907 | def runnegate(context, mapping, data): |
|
864 | 908 | data = evalinteger(context, mapping, data, |
|
865 | 909 | _('negation needs an integer argument')) |
|
866 | 910 | return -data |
|
867 | 911 | |
|
868 | 912 | def runarithmetic(context, mapping, data): |
|
869 | 913 | func, left, right = data |
|
870 | 914 | left = evalinteger(context, mapping, left, |
|
871 | 915 | _('arithmetic only defined on integers')) |
|
872 | 916 | right = evalinteger(context, mapping, right, |
|
873 | 917 | _('arithmetic only defined on integers')) |
|
874 | 918 | try: |
|
875 | 919 | return func(left, right) |
|
876 | 920 | except ZeroDivisionError: |
|
877 | 921 | raise error.Abort(_('division by zero is not defined')) |
|
878 | 922 | |
|
879 | 923 | def joinitems(itemiter, sep): |
|
880 | 924 | """Join items with the separator; Returns generator of bytes""" |
|
881 | 925 | first = True |
|
882 | 926 | for x in itemiter: |
|
883 | 927 | if first: |
|
884 | 928 | first = False |
|
885 | 929 | elif sep: |
|
886 | 930 | yield sep |
|
887 | 931 | yield x |
@@ -1,1378 +1,1420 b'' | |||
|
1 | 1 | Test template filters and functions |
|
2 | 2 | =================================== |
|
3 | 3 | |
|
4 | 4 | $ hg init a |
|
5 | 5 | $ cd a |
|
6 | 6 | $ echo a > a |
|
7 | 7 | $ hg add a |
|
8 | 8 | $ echo line 1 > b |
|
9 | 9 | $ echo line 2 >> b |
|
10 | 10 | $ hg commit -l b -d '1000000 0' -u 'User Name <user@hostname>' |
|
11 | 11 | |
|
12 | 12 | $ hg add b |
|
13 | 13 | $ echo other 1 > c |
|
14 | 14 | $ echo other 2 >> c |
|
15 | 15 | $ echo >> c |
|
16 | 16 | $ echo other 3 >> c |
|
17 | 17 | $ hg commit -l c -d '1100000 0' -u 'A. N. Other <other@place>' |
|
18 | 18 | |
|
19 | 19 | $ hg add c |
|
20 | 20 | $ hg commit -m 'no person' -d '1200000 0' -u 'other@place' |
|
21 | 21 | $ echo c >> c |
|
22 | 22 | $ hg commit -m 'no user, no domain' -d '1300000 0' -u 'person' |
|
23 | 23 | |
|
24 | 24 | $ echo foo > .hg/branch |
|
25 | 25 | $ hg commit -m 'new branch' -d '1400000 0' -u 'person' |
|
26 | 26 | |
|
27 | 27 | $ hg co -q 3 |
|
28 | 28 | $ echo other 4 >> d |
|
29 | 29 | $ hg add d |
|
30 | 30 | $ hg commit -m 'new head' -d '1500000 0' -u 'person' |
|
31 | 31 | |
|
32 | 32 | $ hg merge -q foo |
|
33 | 33 | $ hg commit -m 'merge' -d '1500001 0' -u 'person' |
|
34 | 34 | |
|
35 | 35 | Second branch starting at nullrev: |
|
36 | 36 | |
|
37 | 37 | $ hg update null |
|
38 | 38 | 0 files updated, 0 files merged, 4 files removed, 0 files unresolved |
|
39 | 39 | $ echo second > second |
|
40 | 40 | $ hg add second |
|
41 | 41 | $ hg commit -m second -d '1000000 0' -u 'User Name <user@hostname>' |
|
42 | 42 | created new head |
|
43 | 43 | |
|
44 | 44 | $ echo third > third |
|
45 | 45 | $ hg add third |
|
46 | 46 | $ hg mv second fourth |
|
47 | 47 | $ hg commit -m third -d "2020-01-01 10:01" |
|
48 | 48 | |
|
49 | 49 | $ hg phase -r 5 --public |
|
50 | 50 | $ hg phase -r 7 --secret --force |
|
51 | 51 | |
|
52 | 52 | Filters work: |
|
53 | 53 | |
|
54 | 54 | $ hg log --template '{author|domain}\n' |
|
55 | 55 | |
|
56 | 56 | hostname |
|
57 | 57 | |
|
58 | 58 | |
|
59 | 59 | |
|
60 | 60 | |
|
61 | 61 | place |
|
62 | 62 | place |
|
63 | 63 | hostname |
|
64 | 64 | |
|
65 | 65 | $ hg log --template '{author|person}\n' |
|
66 | 66 | test |
|
67 | 67 | User Name |
|
68 | 68 | person |
|
69 | 69 | person |
|
70 | 70 | person |
|
71 | 71 | person |
|
72 | 72 | other |
|
73 | 73 | A. N. Other |
|
74 | 74 | User Name |
|
75 | 75 | |
|
76 | 76 | $ hg log --template '{author|user}\n' |
|
77 | 77 | test |
|
78 | 78 | user |
|
79 | 79 | person |
|
80 | 80 | person |
|
81 | 81 | person |
|
82 | 82 | person |
|
83 | 83 | other |
|
84 | 84 | other |
|
85 | 85 | user |
|
86 | 86 | |
|
87 | 87 | $ hg log --template '{date|date}\n' |
|
88 | 88 | Wed Jan 01 10:01:00 2020 +0000 |
|
89 | 89 | Mon Jan 12 13:46:40 1970 +0000 |
|
90 | 90 | Sun Jan 18 08:40:01 1970 +0000 |
|
91 | 91 | Sun Jan 18 08:40:00 1970 +0000 |
|
92 | 92 | Sat Jan 17 04:53:20 1970 +0000 |
|
93 | 93 | Fri Jan 16 01:06:40 1970 +0000 |
|
94 | 94 | Wed Jan 14 21:20:00 1970 +0000 |
|
95 | 95 | Tue Jan 13 17:33:20 1970 +0000 |
|
96 | 96 | Mon Jan 12 13:46:40 1970 +0000 |
|
97 | 97 | |
|
98 | 98 | $ hg log --template '{date|isodate}\n' |
|
99 | 99 | 2020-01-01 10:01 +0000 |
|
100 | 100 | 1970-01-12 13:46 +0000 |
|
101 | 101 | 1970-01-18 08:40 +0000 |
|
102 | 102 | 1970-01-18 08:40 +0000 |
|
103 | 103 | 1970-01-17 04:53 +0000 |
|
104 | 104 | 1970-01-16 01:06 +0000 |
|
105 | 105 | 1970-01-14 21:20 +0000 |
|
106 | 106 | 1970-01-13 17:33 +0000 |
|
107 | 107 | 1970-01-12 13:46 +0000 |
|
108 | 108 | |
|
109 | 109 | $ hg log --template '{date|isodatesec}\n' |
|
110 | 110 | 2020-01-01 10:01:00 +0000 |
|
111 | 111 | 1970-01-12 13:46:40 +0000 |
|
112 | 112 | 1970-01-18 08:40:01 +0000 |
|
113 | 113 | 1970-01-18 08:40:00 +0000 |
|
114 | 114 | 1970-01-17 04:53:20 +0000 |
|
115 | 115 | 1970-01-16 01:06:40 +0000 |
|
116 | 116 | 1970-01-14 21:20:00 +0000 |
|
117 | 117 | 1970-01-13 17:33:20 +0000 |
|
118 | 118 | 1970-01-12 13:46:40 +0000 |
|
119 | 119 | |
|
120 | 120 | $ hg log --template '{date|rfc822date}\n' |
|
121 | 121 | Wed, 01 Jan 2020 10:01:00 +0000 |
|
122 | 122 | Mon, 12 Jan 1970 13:46:40 +0000 |
|
123 | 123 | Sun, 18 Jan 1970 08:40:01 +0000 |
|
124 | 124 | Sun, 18 Jan 1970 08:40:00 +0000 |
|
125 | 125 | Sat, 17 Jan 1970 04:53:20 +0000 |
|
126 | 126 | Fri, 16 Jan 1970 01:06:40 +0000 |
|
127 | 127 | Wed, 14 Jan 1970 21:20:00 +0000 |
|
128 | 128 | Tue, 13 Jan 1970 17:33:20 +0000 |
|
129 | 129 | Mon, 12 Jan 1970 13:46:40 +0000 |
|
130 | 130 | |
|
131 | 131 | $ hg log --template '{desc|firstline}\n' |
|
132 | 132 | third |
|
133 | 133 | second |
|
134 | 134 | merge |
|
135 | 135 | new head |
|
136 | 136 | new branch |
|
137 | 137 | no user, no domain |
|
138 | 138 | no person |
|
139 | 139 | other 1 |
|
140 | 140 | line 1 |
|
141 | 141 | |
|
142 | 142 | $ hg log --template '{node|short}\n' |
|
143 | 143 | 95c24699272e |
|
144 | 144 | 29114dbae42b |
|
145 | 145 | d41e714fe50d |
|
146 | 146 | 13207e5a10d9 |
|
147 | 147 | bbe44766e73d |
|
148 | 148 | 10e46f2dcbf4 |
|
149 | 149 | 97054abb4ab8 |
|
150 | 150 | b608e9d1a3f0 |
|
151 | 151 | 1e4e1b8f71e0 |
|
152 | 152 | |
|
153 | 153 | $ hg log --template '<changeset author="{author|xmlescape}"/>\n' |
|
154 | 154 | <changeset author="test"/> |
|
155 | 155 | <changeset author="User Name <user@hostname>"/> |
|
156 | 156 | <changeset author="person"/> |
|
157 | 157 | <changeset author="person"/> |
|
158 | 158 | <changeset author="person"/> |
|
159 | 159 | <changeset author="person"/> |
|
160 | 160 | <changeset author="other@place"/> |
|
161 | 161 | <changeset author="A. N. Other <other@place>"/> |
|
162 | 162 | <changeset author="User Name <user@hostname>"/> |
|
163 | 163 | |
|
164 | 164 | $ hg log --template '{rev}: {children}\n' |
|
165 | 165 | 8: |
|
166 | 166 | 7: 8:95c24699272e |
|
167 | 167 | 6: |
|
168 | 168 | 5: 6:d41e714fe50d |
|
169 | 169 | 4: 6:d41e714fe50d |
|
170 | 170 | 3: 4:bbe44766e73d 5:13207e5a10d9 |
|
171 | 171 | 2: 3:10e46f2dcbf4 |
|
172 | 172 | 1: 2:97054abb4ab8 |
|
173 | 173 | 0: 1:b608e9d1a3f0 |
|
174 | 174 | |
|
175 | 175 | Formatnode filter works: |
|
176 | 176 | |
|
177 | 177 | $ hg -q log -r 0 --template '{node|formatnode}\n' |
|
178 | 178 | 1e4e1b8f71e0 |
|
179 | 179 | |
|
180 | 180 | $ hg log -r 0 --template '{node|formatnode}\n' |
|
181 | 181 | 1e4e1b8f71e0 |
|
182 | 182 | |
|
183 | 183 | $ hg -v log -r 0 --template '{node|formatnode}\n' |
|
184 | 184 | 1e4e1b8f71e0 |
|
185 | 185 | |
|
186 | 186 | $ hg --debug log -r 0 --template '{node|formatnode}\n' |
|
187 | 187 | 1e4e1b8f71e05681d422154f5421e385fec3454f |
|
188 | 188 | |
|
189 | 189 | Age filter: |
|
190 | 190 | |
|
191 | 191 | $ hg init unstable-hash |
|
192 | 192 | $ cd unstable-hash |
|
193 | 193 | $ hg log --template '{date|age}\n' > /dev/null || exit 1 |
|
194 | 194 | |
|
195 | 195 | >>> from __future__ import absolute_import |
|
196 | 196 | >>> import datetime |
|
197 | 197 | >>> fp = open('a', 'wb') |
|
198 | 198 | >>> n = datetime.datetime.now() + datetime.timedelta(366 * 7) |
|
199 | 199 | >>> fp.write(b'%d-%d-%d 00:00' % (n.year, n.month, n.day)) and None |
|
200 | 200 | >>> fp.close() |
|
201 | 201 | $ hg add a |
|
202 | 202 | $ hg commit -m future -d "`cat a`" |
|
203 | 203 | |
|
204 | 204 | $ hg log -l1 --template '{date|age}\n' |
|
205 | 205 | 7 years from now |
|
206 | 206 | |
|
207 | 207 | $ cd .. |
|
208 | 208 | $ rm -rf unstable-hash |
|
209 | 209 | |
|
210 | 210 | Filename filters: |
|
211 | 211 | |
|
212 | 212 | $ hg debugtemplate '{"foo/bar"|basename}|{"foo/"|basename}|{"foo"|basename}|\n' |
|
213 | 213 | bar||foo| |
|
214 | 214 | $ hg debugtemplate '{"foo/bar"|dirname}|{"foo/"|dirname}|{"foo"|dirname}|\n' |
|
215 | 215 | foo|foo|| |
|
216 | 216 | $ hg debugtemplate '{"foo/bar"|stripdir}|{"foo/"|stripdir}|{"foo"|stripdir}|\n' |
|
217 | 217 | foo|foo|foo| |
|
218 | 218 | |
|
219 | 219 | commondir() filter: |
|
220 | 220 | |
|
221 | 221 | $ hg debugtemplate '{""|splitlines|commondir}\n' |
|
222 | 222 | |
|
223 | 223 | $ hg debugtemplate '{"foo/bar\nfoo/baz\nfoo/foobar\n"|splitlines|commondir}\n' |
|
224 | 224 | foo |
|
225 | 225 | $ hg debugtemplate '{"foo/bar\nfoo/bar\n"|splitlines|commondir}\n' |
|
226 | 226 | foo |
|
227 | 227 | $ hg debugtemplate '{"/foo/bar\n/foo/bar\n"|splitlines|commondir}\n' |
|
228 | 228 | foo |
|
229 | 229 | $ hg debugtemplate '{"/foo\n/foo\n"|splitlines|commondir}\n' |
|
230 | 230 | |
|
231 | 231 | $ hg debugtemplate '{"foo/bar\nbar/baz"|splitlines|commondir}\n' |
|
232 | 232 | |
|
233 | 233 | $ hg debugtemplate '{"foo/bar\nbar/baz\nbar/foo\n"|splitlines|commondir}\n' |
|
234 | 234 | |
|
235 | 235 | $ hg debugtemplate '{"foo/../bar\nfoo/bar"|splitlines|commondir}\n' |
|
236 | 236 | foo |
|
237 | 237 | $ hg debugtemplate '{"foo\n/foo"|splitlines|commondir}\n' |
|
238 | 238 | |
|
239 | 239 | |
|
240 | 240 | $ hg log -r null -T '{rev|commondir}' |
|
241 | 241 | hg: parse error: argument is not a list of text |
|
242 | 242 | (template filter 'commondir' is not compatible with keyword 'rev') |
|
243 | 243 | [255] |
|
244 | 244 | |
|
245 | 245 | Add a dummy commit to make up for the instability of the above: |
|
246 | 246 | |
|
247 | 247 | $ echo a > a |
|
248 | 248 | $ hg add a |
|
249 | 249 | $ hg ci -m future |
|
250 | 250 | |
|
251 | 251 | Count filter: |
|
252 | 252 | |
|
253 | 253 | $ hg log -l1 --template '{node|count} {node|short|count}\n' |
|
254 | 254 | 40 12 |
|
255 | 255 | |
|
256 | 256 | $ hg log -l1 --template '{revset("null^")|count} {revset(".")|count} {revset("0::3")|count}\n' |
|
257 | 257 | 0 1 4 |
|
258 | 258 | |
|
259 | 259 | $ hg log -G --template '{rev}: children: {children|count}, \ |
|
260 | 260 | > tags: {tags|count}, file_adds: {file_adds|count}, \ |
|
261 | 261 | > ancestors: {revset("ancestors(%s)", rev)|count}' |
|
262 | 262 | @ 9: children: 0, tags: 1, file_adds: 1, ancestors: 3 |
|
263 | 263 | | |
|
264 | 264 | o 8: children: 1, tags: 0, file_adds: 2, ancestors: 2 |
|
265 | 265 | | |
|
266 | 266 | o 7: children: 1, tags: 0, file_adds: 1, ancestors: 1 |
|
267 | 267 | |
|
268 | 268 | o 6: children: 0, tags: 0, file_adds: 0, ancestors: 7 |
|
269 | 269 | |\ |
|
270 | 270 | | o 5: children: 1, tags: 0, file_adds: 1, ancestors: 5 |
|
271 | 271 | | | |
|
272 | 272 | o | 4: children: 1, tags: 0, file_adds: 0, ancestors: 5 |
|
273 | 273 | |/ |
|
274 | 274 | o 3: children: 2, tags: 0, file_adds: 0, ancestors: 4 |
|
275 | 275 | | |
|
276 | 276 | o 2: children: 1, tags: 0, file_adds: 1, ancestors: 3 |
|
277 | 277 | | |
|
278 | 278 | o 1: children: 1, tags: 0, file_adds: 1, ancestors: 2 |
|
279 | 279 | | |
|
280 | 280 | o 0: children: 1, tags: 0, file_adds: 1, ancestors: 1 |
|
281 | 281 | |
|
282 | 282 | |
|
283 | 283 | $ hg log -l1 -T '{termwidth|count}\n' |
|
284 | 284 | hg: parse error: not countable |
|
285 | 285 | (template filter 'count' is not compatible with keyword 'termwidth') |
|
286 | 286 | [255] |
|
287 | 287 | |
|
288 | 288 | Upper/lower filters: |
|
289 | 289 | |
|
290 | 290 | $ hg log -r0 --template '{branch|upper}\n' |
|
291 | 291 | DEFAULT |
|
292 | 292 | $ hg log -r0 --template '{author|lower}\n' |
|
293 | 293 | user name <user@hostname> |
|
294 | 294 | $ hg log -r0 --template '{date|upper}\n' |
|
295 | 295 | 1000000.00 |
|
296 | 296 | |
|
297 | 297 | Add a commit that does all possible modifications at once |
|
298 | 298 | |
|
299 | 299 | $ echo modify >> third |
|
300 | 300 | $ touch b |
|
301 | 301 | $ hg add b |
|
302 | 302 | $ hg mv fourth fifth |
|
303 | 303 | $ hg rm a |
|
304 | 304 | $ hg ci -m "Modify, add, remove, rename" |
|
305 | 305 | |
|
306 | 306 | Pass generator object created by template function to filter |
|
307 | 307 | |
|
308 | 308 | $ hg log -l 1 --template '{if(author, author)|user}\n' |
|
309 | 309 | test |
|
310 | 310 | |
|
311 | 311 | Test diff function: |
|
312 | 312 | |
|
313 | 313 | $ hg diff -c 8 |
|
314 | 314 | diff -r 29114dbae42b -r 95c24699272e fourth |
|
315 | 315 | --- /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
316 | 316 | +++ b/fourth Wed Jan 01 10:01:00 2020 +0000 |
|
317 | 317 | @@ -0,0 +1,1 @@ |
|
318 | 318 | +second |
|
319 | 319 | diff -r 29114dbae42b -r 95c24699272e second |
|
320 | 320 | --- a/second Mon Jan 12 13:46:40 1970 +0000 |
|
321 | 321 | +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
322 | 322 | @@ -1,1 +0,0 @@ |
|
323 | 323 | -second |
|
324 | 324 | diff -r 29114dbae42b -r 95c24699272e third |
|
325 | 325 | --- /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
326 | 326 | +++ b/third Wed Jan 01 10:01:00 2020 +0000 |
|
327 | 327 | @@ -0,0 +1,1 @@ |
|
328 | 328 | +third |
|
329 | 329 | |
|
330 | 330 | $ hg log -r 8 -T "{diff()}" |
|
331 | 331 | diff -r 29114dbae42b -r 95c24699272e fourth |
|
332 | 332 | --- /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
333 | 333 | +++ b/fourth Wed Jan 01 10:01:00 2020 +0000 |
|
334 | 334 | @@ -0,0 +1,1 @@ |
|
335 | 335 | +second |
|
336 | 336 | diff -r 29114dbae42b -r 95c24699272e second |
|
337 | 337 | --- a/second Mon Jan 12 13:46:40 1970 +0000 |
|
338 | 338 | +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
339 | 339 | @@ -1,1 +0,0 @@ |
|
340 | 340 | -second |
|
341 | 341 | diff -r 29114dbae42b -r 95c24699272e third |
|
342 | 342 | --- /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
343 | 343 | +++ b/third Wed Jan 01 10:01:00 2020 +0000 |
|
344 | 344 | @@ -0,0 +1,1 @@ |
|
345 | 345 | +third |
|
346 | 346 | |
|
347 | 347 | $ hg log -r 8 -T "{diff('glob:f*')}" |
|
348 | 348 | diff -r 29114dbae42b -r 95c24699272e fourth |
|
349 | 349 | --- /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
350 | 350 | +++ b/fourth Wed Jan 01 10:01:00 2020 +0000 |
|
351 | 351 | @@ -0,0 +1,1 @@ |
|
352 | 352 | +second |
|
353 | 353 | |
|
354 | 354 | $ hg log -r 8 -T "{diff('', 'glob:f*')}" |
|
355 | 355 | diff -r 29114dbae42b -r 95c24699272e second |
|
356 | 356 | --- a/second Mon Jan 12 13:46:40 1970 +0000 |
|
357 | 357 | +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
358 | 358 | @@ -1,1 +0,0 @@ |
|
359 | 359 | -second |
|
360 | 360 | diff -r 29114dbae42b -r 95c24699272e third |
|
361 | 361 | --- /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
362 | 362 | +++ b/third Wed Jan 01 10:01:00 2020 +0000 |
|
363 | 363 | @@ -0,0 +1,1 @@ |
|
364 | 364 | +third |
|
365 | 365 | |
|
366 | 366 | $ hg log -r 8 -T "{diff('FOURTH'|lower)}" |
|
367 | 367 | diff -r 29114dbae42b -r 95c24699272e fourth |
|
368 | 368 | --- /dev/null Thu Jan 01 00:00:00 1970 +0000 |
|
369 | 369 | +++ b/fourth Wed Jan 01 10:01:00 2020 +0000 |
|
370 | 370 | @@ -0,0 +1,1 @@ |
|
371 | 371 | +second |
|
372 | 372 | |
|
373 | 373 | $ cd .. |
|
374 | 374 | |
|
375 | 375 | latesttag() function: |
|
376 | 376 | |
|
377 | 377 | $ hg init latesttag |
|
378 | 378 | $ cd latesttag |
|
379 | 379 | |
|
380 | 380 | $ echo a > file |
|
381 | 381 | $ hg ci -Am a -d '0 0' |
|
382 | 382 | adding file |
|
383 | 383 | |
|
384 | 384 | $ echo b >> file |
|
385 | 385 | $ hg ci -m b -d '1 0' |
|
386 | 386 | |
|
387 | 387 | $ echo c >> head1 |
|
388 | 388 | $ hg ci -Am h1c -d '2 0' |
|
389 | 389 | adding head1 |
|
390 | 390 | |
|
391 | 391 | $ hg update -q 1 |
|
392 | 392 | $ echo d >> head2 |
|
393 | 393 | $ hg ci -Am h2d -d '3 0' |
|
394 | 394 | adding head2 |
|
395 | 395 | created new head |
|
396 | 396 | |
|
397 | 397 | $ echo e >> head2 |
|
398 | 398 | $ hg ci -m h2e -d '4 0' |
|
399 | 399 | |
|
400 | 400 | $ hg merge -q |
|
401 | 401 | $ hg ci -m merge -d '5 -3600' |
|
402 | 402 | |
|
403 | 403 | $ hg tag -r 1 -m t1 -d '6 0' t1 |
|
404 | 404 | $ hg tag -r 2 -m t2 -d '7 0' t2 |
|
405 | 405 | $ hg tag -r 3 -m t3 -d '8 0' t3 |
|
406 | 406 | $ hg tag -r 4 -m t4 -d '4 0' t4 # older than t2, but should not matter |
|
407 | 407 | $ hg tag -r 5 -m t5 -d '9 0' t5 |
|
408 | 408 | $ hg tag -r 3 -m at3 -d '10 0' at3 |
|
409 | 409 | |
|
410 | 410 | $ hg log -G --template "{rev}: {latesttag('re:^t[13]$') % '{tag}, C: {changes}, D: {distance}'}\n" |
|
411 | 411 | @ 11: t3, C: 9, D: 8 |
|
412 | 412 | | |
|
413 | 413 | o 10: t3, C: 8, D: 7 |
|
414 | 414 | | |
|
415 | 415 | o 9: t3, C: 7, D: 6 |
|
416 | 416 | | |
|
417 | 417 | o 8: t3, C: 6, D: 5 |
|
418 | 418 | | |
|
419 | 419 | o 7: t3, C: 5, D: 4 |
|
420 | 420 | | |
|
421 | 421 | o 6: t3, C: 4, D: 3 |
|
422 | 422 | | |
|
423 | 423 | o 5: t3, C: 3, D: 2 |
|
424 | 424 | |\ |
|
425 | 425 | | o 4: t3, C: 1, D: 1 |
|
426 | 426 | | | |
|
427 | 427 | | o 3: t3, C: 0, D: 0 |
|
428 | 428 | | | |
|
429 | 429 | o | 2: t1, C: 1, D: 1 |
|
430 | 430 | |/ |
|
431 | 431 | o 1: t1, C: 0, D: 0 |
|
432 | 432 | | |
|
433 | 433 | o 0: null, C: 1, D: 1 |
|
434 | 434 | |
|
435 | 435 | |
|
436 | 436 | $ cd .. |
|
437 | 437 | |
|
438 | Test filter() empty values: | |
|
439 | ||
|
440 | $ hg log -R a -r 1 -T '{filter(desc|splitlines) % "{line}\n"}' | |
|
441 | other 1 | |
|
442 | other 2 | |
|
443 | other 3 | |
|
444 | $ hg log -R a -r 0 -T '{filter(dict(a=0, b=1) % "{ifeq(key, "a", "{value}\n")}")}' | |
|
445 | 0 | |
|
446 | ||
|
447 | 0 should not be falsy | |
|
448 | ||
|
449 | $ hg log -R a -r 0 -T '{filter(revset("0:2"))}\n' | |
|
450 | 0 1 2 | |
|
451 | ||
|
452 | Test filter() shouldn't crash: | |
|
453 | ||
|
454 | $ hg log -R a -r 0 -T '{filter(extras)}\n' | |
|
455 | branch=default | |
|
456 | $ hg log -R a -r 0 -T '{filter(files)}\n' | |
|
457 | a | |
|
458 | ||
|
459 | Test filter() unsupported arguments: | |
|
460 | ||
|
461 | $ hg log -R a -r 0 -T '{filter()}\n' | |
|
462 | hg: parse error: filter expects one argument | |
|
463 | [255] | |
|
464 | $ hg log -R a -r 0 -T '{filter(date)}\n' | |
|
465 | hg: parse error: date is not iterable | |
|
466 | [255] | |
|
467 | $ hg log -R a -r 0 -T '{filter(rev)}\n' | |
|
468 | hg: parse error: 0 is not iterable | |
|
469 | [255] | |
|
470 | $ hg log -R a -r 0 -T '{filter(desc|firstline)}\n' | |
|
471 | hg: parse error: 'line 1' is not filterable | |
|
472 | [255] | |
|
473 | $ hg log -R a -r 0 -T '{filter(manifest)}\n' | |
|
474 | hg: parse error: '0:a0c8bcbbb45c' is not filterable | |
|
475 | [255] | |
|
476 | $ hg log -R a -r 0 -T '{filter(succsandmarkers)}\n' | |
|
477 | hg: parse error: not filterable without template | |
|
478 | [255] | |
|
479 | ||
|
438 | 480 |
|
|
439 | 481 | |
|
440 | 482 | $ hg log -R latesttag -r tip -T '{join(manifest, ".")}\n' |
|
441 | 483 | 1.1.:.2.b.c.6.e.9.0.0.6.c.e.2 |
|
442 | 484 | $ hg log -R latesttag -r tip -T '{join(get(extras, "branch"), ".")}\n' |
|
443 | 485 | d.e.f.a.u.l.t |
|
444 | 486 | |
|
445 | 487 | Test join() over string |
|
446 | 488 | |
|
447 | 489 | $ hg log -R latesttag -r tip -T '{join(rev|stringify, ".")}\n' |
|
448 | 490 | 1.1 |
|
449 | 491 | |
|
450 | 492 | Test join() over uniterable |
|
451 | 493 | |
|
452 | 494 | $ hg log -R latesttag -r tip -T '{join(rev, "")}\n' |
|
453 | 495 | hg: parse error: 11 is not iterable |
|
454 | 496 | [255] |
|
455 | 497 | |
|
456 | 498 | Test min/max of integers |
|
457 | 499 | |
|
458 | 500 | $ hg log -R latesttag -l1 -T '{min(revset("9:10"))}\n' |
|
459 | 501 | 9 |
|
460 | 502 | $ hg log -R latesttag -l1 -T '{max(revset("9:10"))}\n' |
|
461 | 503 | 10 |
|
462 | 504 | |
|
463 | 505 | Test min/max over map operation: |
|
464 | 506 | |
|
465 | 507 | $ hg log -R latesttag -r3 -T '{min(tags % "{tag}")}\n' |
|
466 | 508 | at3 |
|
467 | 509 | $ hg log -R latesttag -r3 -T '{max(tags % "{tag}")}\n' |
|
468 | 510 | t3 |
|
469 | 511 | |
|
470 | 512 | Test min/max of strings: |
|
471 | 513 | |
|
472 | 514 | $ hg log -R latesttag -l1 -T '{min(desc)}\n' |
|
473 | 515 | 3 |
|
474 | 516 | $ hg log -R latesttag -l1 -T '{max(desc)}\n' |
|
475 | 517 | t |
|
476 | 518 | |
|
477 | 519 | Test min/max of non-iterable: |
|
478 | 520 | |
|
479 | 521 | $ hg debugtemplate '{min(1)}' |
|
480 | 522 | hg: parse error: 1 is not iterable |
|
481 | 523 | (min first argument should be an iterable) |
|
482 | 524 | [255] |
|
483 | 525 | $ hg debugtemplate '{max(2)}' |
|
484 | 526 | hg: parse error: 2 is not iterable |
|
485 | 527 | (max first argument should be an iterable) |
|
486 | 528 | [255] |
|
487 | 529 | |
|
488 | 530 | $ hg log -R latesttag -l1 -T '{min(date)}' |
|
489 | 531 | hg: parse error: date is not iterable |
|
490 | 532 | (min first argument should be an iterable) |
|
491 | 533 | [255] |
|
492 | 534 | $ hg log -R latesttag -l1 -T '{max(date)}' |
|
493 | 535 | hg: parse error: date is not iterable |
|
494 | 536 | (max first argument should be an iterable) |
|
495 | 537 | [255] |
|
496 | 538 | |
|
497 | 539 | Test min/max of empty sequence: |
|
498 | 540 | |
|
499 | 541 | $ hg debugtemplate '{min("")}' |
|
500 | 542 | hg: parse error: empty string |
|
501 | 543 | (min first argument should be an iterable) |
|
502 | 544 | [255] |
|
503 | 545 | $ hg debugtemplate '{max("")}' |
|
504 | 546 | hg: parse error: empty string |
|
505 | 547 | (max first argument should be an iterable) |
|
506 | 548 | [255] |
|
507 | 549 | $ hg debugtemplate '{min(dict())}' |
|
508 | 550 | hg: parse error: empty sequence |
|
509 | 551 | (min first argument should be an iterable) |
|
510 | 552 | [255] |
|
511 | 553 | $ hg debugtemplate '{max(dict())}' |
|
512 | 554 | hg: parse error: empty sequence |
|
513 | 555 | (max first argument should be an iterable) |
|
514 | 556 | [255] |
|
515 | 557 | $ hg debugtemplate '{min(dict() % "")}' |
|
516 | 558 | hg: parse error: empty sequence |
|
517 | 559 | (min first argument should be an iterable) |
|
518 | 560 | [255] |
|
519 | 561 | $ hg debugtemplate '{max(dict() % "")}' |
|
520 | 562 | hg: parse error: empty sequence |
|
521 | 563 | (max first argument should be an iterable) |
|
522 | 564 | [255] |
|
523 | 565 | |
|
524 | 566 | Test min/max of if() result |
|
525 | 567 | |
|
526 | 568 | $ cd latesttag |
|
527 | 569 | $ hg log -l1 -T '{min(if(true, revset("9:10"), ""))}\n' |
|
528 | 570 | 9 |
|
529 | 571 | $ hg log -l1 -T '{max(if(false, "", revset("9:10")))}\n' |
|
530 | 572 | 10 |
|
531 | 573 | $ hg log -l1 -T '{min(ifcontains("a", "aa", revset("9:10"), ""))}\n' |
|
532 | 574 | 9 |
|
533 | 575 | $ hg log -l1 -T '{max(ifcontains("a", "bb", "", revset("9:10")))}\n' |
|
534 | 576 | 10 |
|
535 | 577 | $ hg log -l1 -T '{min(ifeq(0, 0, revset("9:10"), ""))}\n' |
|
536 | 578 | 9 |
|
537 | 579 | $ hg log -l1 -T '{max(ifeq(0, 1, "", revset("9:10")))}\n' |
|
538 | 580 | 10 |
|
539 | 581 | $ cd .. |
|
540 | 582 | |
|
541 | 583 | Test laziness of if() then/else clause |
|
542 | 584 | |
|
543 | 585 | $ hg debugtemplate '{count(0)}' |
|
544 | 586 | hg: parse error: not countable |
|
545 | 587 | (incompatible use of template filter 'count') |
|
546 | 588 | [255] |
|
547 | 589 | $ hg debugtemplate '{if(true, "", count(0))}' |
|
548 | 590 | $ hg debugtemplate '{if(false, count(0), "")}' |
|
549 | 591 | $ hg debugtemplate '{ifcontains("a", "aa", "", count(0))}' |
|
550 | 592 | $ hg debugtemplate '{ifcontains("a", "bb", count(0), "")}' |
|
551 | 593 | $ hg debugtemplate '{ifeq(0, 0, "", count(0))}' |
|
552 | 594 | $ hg debugtemplate '{ifeq(0, 1, count(0), "")}' |
|
553 | 595 | |
|
554 | 596 | Test the sub function of templating for expansion: |
|
555 | 597 | |
|
556 | 598 | $ hg log -R latesttag -r 10 --template '{sub("[0-9]", "x", "{rev}")}\n' |
|
557 | 599 | xx |
|
558 | 600 | |
|
559 | 601 | $ hg log -R latesttag -r 10 -T '{sub("[", "x", rev)}\n' |
|
560 | 602 | hg: parse error: sub got an invalid pattern: [ |
|
561 | 603 | [255] |
|
562 | 604 | $ hg log -R latesttag -r 10 -T '{sub("[0-9]", r"\1", rev)}\n' |
|
563 | 605 |
|
|
564 | 606 | [255] |
|
565 | 607 | |
|
566 | 608 | Test the strip function with chars specified: |
|
567 | 609 | |
|
568 | 610 | $ hg log -R latesttag --template '{desc}\n' |
|
569 | 611 | at3 |
|
570 | 612 | t5 |
|
571 | 613 | t4 |
|
572 | 614 | t3 |
|
573 | 615 | t2 |
|
574 | 616 | t1 |
|
575 | 617 | merge |
|
576 | 618 | h2e |
|
577 | 619 | h2d |
|
578 | 620 | h1c |
|
579 | 621 | b |
|
580 | 622 | a |
|
581 | 623 | |
|
582 | 624 | $ hg log -R latesttag --template '{strip(desc, "te")}\n' |
|
583 | 625 | at3 |
|
584 | 626 | 5 |
|
585 | 627 | 4 |
|
586 | 628 | 3 |
|
587 | 629 | 2 |
|
588 | 630 | 1 |
|
589 | 631 | merg |
|
590 | 632 | h2 |
|
591 | 633 | h2d |
|
592 | 634 | h1c |
|
593 | 635 | b |
|
594 | 636 | a |
|
595 | 637 | |
|
596 | 638 | Test date format: |
|
597 | 639 | |
|
598 | 640 | $ hg log -R latesttag --template 'date: {date(date, "%y %m %d %S %z")}\n' |
|
599 | 641 | date: 70 01 01 10 +0000 |
|
600 | 642 | date: 70 01 01 09 +0000 |
|
601 | 643 | date: 70 01 01 04 +0000 |
|
602 | 644 | date: 70 01 01 08 +0000 |
|
603 | 645 | date: 70 01 01 07 +0000 |
|
604 | 646 | date: 70 01 01 06 +0000 |
|
605 | 647 | date: 70 01 01 05 +0100 |
|
606 | 648 | date: 70 01 01 04 +0000 |
|
607 | 649 | date: 70 01 01 03 +0000 |
|
608 | 650 | date: 70 01 01 02 +0000 |
|
609 | 651 | date: 70 01 01 01 +0000 |
|
610 | 652 | date: 70 01 01 00 +0000 |
|
611 | 653 | |
|
612 | 654 | Test invalid date: |
|
613 | 655 | |
|
614 | 656 | $ hg log -R latesttag -T '{date(rev)}\n' |
|
615 | 657 | hg: parse error: date expects a date information |
|
616 | 658 | [255] |
|
617 | 659 | |
|
618 | 660 | Set up repository containing template fragments in commit metadata: |
|
619 | 661 | |
|
620 | 662 | $ hg init r |
|
621 | 663 | $ cd r |
|
622 | 664 | $ echo a > a |
|
623 | 665 | $ hg ci -Am '{rev}' |
|
624 | 666 | adding a |
|
625 | 667 | |
|
626 | 668 | $ hg branch -q 'text.{rev}' |
|
627 | 669 | $ echo aa >> aa |
|
628 | 670 | $ hg ci -u '{node|short}' -m 'desc to be wrapped desc to be wrapped' |
|
629 | 671 | |
|
630 | 672 | color effect can be specified without quoting: |
|
631 | 673 | |
|
632 | 674 | $ hg log --color=always -l 1 --template '{label(red, "text\n")}' |
|
633 | 675 | \x1b[0;31mtext\x1b[0m (esc) |
|
634 | 676 | |
|
635 | 677 | color effects can be nested (issue5413) |
|
636 | 678 | |
|
637 | 679 | $ hg debugtemplate --color=always \ |
|
638 | 680 |
> '{label(red, "red{label(magenta, "ma{label(cyan, "cyan")} |
|
639 | 681 | \x1b[0;31mred\x1b[0;35mma\x1b[0;36mcyan\x1b[0m\x1b[0;31m\x1b[0;35m\x1b[0;33myellow\x1b[0m\x1b[0;31m\x1b[0;35mgenta\x1b[0m (esc) |
|
640 | 682 | |
|
641 | 683 | pad() should interact well with color codes (issue5416) |
|
642 | 684 | |
|
643 | 685 | $ hg debugtemplate --color=always \ |
|
644 | 686 | > '{pad(label(red, "red"), 5, label(cyan, "-"))}\n' |
|
645 | 687 | \x1b[0;31mred\x1b[0m\x1b[0;36m-\x1b[0m\x1b[0;36m-\x1b[0m (esc) |
|
646 | 688 | |
|
647 | 689 | label should be no-op if color is disabled: |
|
648 | 690 | |
|
649 | 691 | $ hg log --color=never -l 1 --template '{label(red, "text\n")}' |
|
650 | 692 | text |
|
651 | 693 | $ hg log --config extensions.color=! -l 1 --template '{label(red, "text\n")}' |
|
652 | 694 | text |
|
653 | 695 | |
|
654 | 696 | Test branches inside if statement: |
|
655 | 697 | |
|
656 | 698 | $ hg log -r 0 --template '{if(branches, "yes", "no")}\n' |
|
657 | 699 | no |
|
658 | 700 | |
|
659 | 701 | Test dict constructor: |
|
660 | 702 | |
|
661 | 703 | $ hg log -r 0 -T '{dict(y=node|short, x=rev)}\n' |
|
662 | 704 | y=f7769ec2ab97 x=0 |
|
663 | 705 | $ hg log -r 0 -T '{dict(x=rev, y=node|short) % "{key}={value}\n"}' |
|
664 | 706 | x=0 |
|
665 | 707 | y=f7769ec2ab97 |
|
666 | 708 | $ hg log -r 0 -T '{dict(x=rev, y=node|short)|json}\n' |
|
667 | 709 | {"x": 0, "y": "f7769ec2ab97"} |
|
668 | 710 | $ hg log -r 0 -T '{dict()|json}\n' |
|
669 | 711 | {} |
|
670 | 712 | |
|
671 | 713 | $ hg log -r 0 -T '{dict(rev, node=node|short)}\n' |
|
672 | 714 | rev=0 node=f7769ec2ab97 |
|
673 | 715 | $ hg log -r 0 -T '{dict(rev, node|short)}\n' |
|
674 | 716 | rev=0 node=f7769ec2ab97 |
|
675 | 717 | |
|
676 | 718 | $ hg log -r 0 -T '{dict(rev, rev=rev)}\n' |
|
677 | 719 | hg: parse error: duplicated dict key 'rev' inferred |
|
678 | 720 | [255] |
|
679 | 721 | $ hg log -r 0 -T '{dict(node, node|short)}\n' |
|
680 | 722 | hg: parse error: duplicated dict key 'node' inferred |
|
681 | 723 | [255] |
|
682 | 724 | $ hg log -r 0 -T '{dict(1 + 2)}' |
|
683 | 725 | hg: parse error: dict key cannot be inferred |
|
684 | 726 | [255] |
|
685 | 727 | |
|
686 | 728 | $ hg log -r 0 -T '{dict(x=rev, x=node)}' |
|
687 | 729 | hg: parse error: dict got multiple values for keyword argument 'x' |
|
688 | 730 | [255] |
|
689 | 731 | |
|
690 | 732 | Test get function: |
|
691 | 733 | |
|
692 | 734 | $ hg log -r 0 --template '{get(extras, "branch")}\n' |
|
693 | 735 | default |
|
694 | 736 |
$ hg log -r 0 --template '{get(extras, "br{"anch"} |
|
695 | 737 | default |
|
696 | 738 | $ hg log -r 0 --template '{get(files, "should_fail")}\n' |
|
697 | 739 | hg: parse error: not a dictionary |
|
698 | 740 | (get() expects a dict as first argument) |
|
699 | 741 | [255] |
|
700 | 742 | |
|
701 | 743 | Test json filter applied to wrapped object: |
|
702 | 744 | |
|
703 | 745 | $ hg log -r0 -T '{files|json}\n' |
|
704 | 746 | ["a"] |
|
705 | 747 | $ hg log -r0 -T '{extras|json}\n' |
|
706 | 748 | {"branch": "default"} |
|
707 | 749 | $ hg log -r0 -T '{date|json}\n' |
|
708 | 750 | [0, 0] |
|
709 | 751 | |
|
710 | 752 | Test json filter applied to map result: |
|
711 | 753 | |
|
712 | 754 | $ hg log -r0 -T '{json(extras % "{key}")}\n' |
|
713 | 755 | ["branch"] |
|
714 | 756 | |
|
715 | 757 | Test localdate(date, tz) function: |
|
716 | 758 | |
|
717 | 759 | $ TZ=JST-09 hg log -r0 -T '{date|localdate|isodate}\n' |
|
718 | 760 | 1970-01-01 09:00 +0900 |
|
719 | 761 | $ TZ=JST-09 hg log -r0 -T '{localdate(date, "UTC")|isodate}\n' |
|
720 | 762 | 1970-01-01 00:00 +0000 |
|
721 | 763 | $ TZ=JST-09 hg log -r0 -T '{localdate(date, "blahUTC")|isodate}\n' |
|
722 | 764 | hg: parse error: localdate expects a timezone |
|
723 | 765 | [255] |
|
724 | 766 | $ TZ=JST-09 hg log -r0 -T '{localdate(date, "+0200")|isodate}\n' |
|
725 | 767 | 1970-01-01 02:00 +0200 |
|
726 | 768 | $ TZ=JST-09 hg log -r0 -T '{localdate(date, "0")|isodate}\n' |
|
727 | 769 | 1970-01-01 00:00 +0000 |
|
728 | 770 | $ TZ=JST-09 hg log -r0 -T '{localdate(date, 0)|isodate}\n' |
|
729 | 771 | 1970-01-01 00:00 +0000 |
|
730 | 772 | $ hg log -r0 -T '{localdate(date, "invalid")|isodate}\n' |
|
731 | 773 | hg: parse error: localdate expects a timezone |
|
732 | 774 | [255] |
|
733 | 775 | $ hg log -r0 -T '{localdate(date, date)|isodate}\n' |
|
734 | 776 | hg: parse error: localdate expects a timezone |
|
735 | 777 | [255] |
|
736 | 778 | |
|
737 | 779 | Test shortest(node) function: |
|
738 | 780 | |
|
739 | 781 | $ echo b > b |
|
740 | 782 | $ hg ci -qAm b |
|
741 | 783 | $ hg log --template '{shortest(node)}\n' |
|
742 | 784 | e777 |
|
743 | 785 | bcc7 |
|
744 | 786 | f776 |
|
745 | 787 | $ hg log --template '{shortest(node, 10)}\n' |
|
746 | 788 | e777603221 |
|
747 | 789 | bcc7ff960b |
|
748 | 790 | f7769ec2ab |
|
749 | 791 | $ hg log --template '{node|shortest}\n' -l1 |
|
750 | 792 | e777 |
|
751 | 793 | |
|
752 | 794 |
$ hg log -r 0 -T '{shortest(node, "1{"0"} |
|
753 | 795 | f7769ec2ab |
|
754 | 796 | $ hg log -r 0 -T '{shortest(node, "not an int")}\n' |
|
755 | 797 | hg: parse error: shortest() expects an integer minlength |
|
756 | 798 | [255] |
|
757 | 799 | |
|
758 | 800 | $ hg log -r 'wdir()' -T '{node|shortest}\n' |
|
759 | 801 | ffff |
|
760 | 802 | |
|
761 | 803 | $ hg log --template '{shortest("f")}\n' -l1 |
|
762 | 804 | f |
|
763 | 805 | |
|
764 | 806 | $ hg log --template '{shortest("0123456789012345678901234567890123456789")}\n' -l1 |
|
765 | 807 | 0123456789012345678901234567890123456789 |
|
766 | 808 | |
|
767 | 809 | $ hg log --template '{shortest("01234567890123456789012345678901234567890123456789")}\n' -l1 |
|
768 | 810 | 01234567890123456789012345678901234567890123456789 |
|
769 | 811 | |
|
770 | 812 | $ hg log --template '{shortest("not a hex string")}\n' -l1 |
|
771 | 813 | not a hex string |
|
772 | 814 | |
|
773 | 815 | $ hg log --template '{shortest("not a hex string, but it'\''s 40 bytes long")}\n' -l1 |
|
774 | 816 | not a hex string, but it's 40 bytes long |
|
775 | 817 | |
|
776 | 818 | $ hg log --template '{shortest("ffffffffffffffffffffffffffffffffffffffff")}\n' -l1 |
|
777 | 819 | ffff |
|
778 | 820 | |
|
779 | 821 | $ hg log --template '{shortest("fffffff")}\n' -l1 |
|
780 | 822 | ffff |
|
781 | 823 | |
|
782 | 824 | $ hg log --template '{shortest("ff")}\n' -l1 |
|
783 | 825 | ffff |
|
784 | 826 | |
|
785 | 827 | $ cd .. |
|
786 | 828 | |
|
787 | 829 | Test shortest(node) with the repo having short hash collision: |
|
788 | 830 | |
|
789 | 831 | $ hg init hashcollision |
|
790 | 832 | $ cd hashcollision |
|
791 | 833 | $ cat <<EOF >> .hg/hgrc |
|
792 | 834 | > [experimental] |
|
793 | 835 | > evolution.createmarkers=True |
|
794 | 836 | > EOF |
|
795 | 837 | $ echo 0 > a |
|
796 | 838 | $ hg ci -qAm 0 |
|
797 | 839 | $ for i in 17 129 248 242 480 580 617 1057 2857 4025; do |
|
798 | 840 | > hg up -q 0 |
|
799 | 841 | > echo $i > a |
|
800 | 842 | > hg ci -qm $i |
|
801 | 843 | > done |
|
802 | 844 | $ hg up -q null |
|
803 | 845 |
$ hg log -r0: -T '{rev}:{node}\n' |
|
804 | 846 | 0:b4e73ffab476aa0ee32ed81ca51e07169844bc6a |
|
805 | 847 | 1:11424df6dc1dd4ea255eae2b58eaca7831973bbc |
|
806 | 848 | 2:11407b3f1b9c3e76a79c1ec5373924df096f0499 |
|
807 | 849 | 3:11dd92fe0f39dfdaacdaa5f3997edc533875cfc4 |
|
808 | 850 | 4:10776689e627b465361ad5c296a20a487e153ca4 |
|
809 | 851 | 5:a00be79088084cb3aff086ab799f8790e01a976b |
|
810 | 852 | 6:a0b0acd79b4498d0052993d35a6a748dd51d13e6 |
|
811 | 853 | 7:a0457b3450b8e1b778f1163b31a435802987fe5d |
|
812 | 854 | 8:c56256a09cd28e5764f32e8e2810d0f01e2e357a |
|
813 | 855 | 9:c5623987d205cd6d9d8389bfc40fff9dbb670b48 |
|
814 | 856 | 10:c562ddd9c94164376c20b86b0b4991636a3bf84f |
|
815 | 857 | $ hg debugobsolete a00be79088084cb3aff086ab799f8790e01a976b |
|
816 | 858 | obsoleted 1 changesets |
|
817 | 859 | $ hg debugobsolete c5623987d205cd6d9d8389bfc40fff9dbb670b48 |
|
818 | 860 | obsoleted 1 changesets |
|
819 | 861 | $ hg debugobsolete c562ddd9c94164376c20b86b0b4991636a3bf84f |
|
820 | 862 | obsoleted 1 changesets |
|
821 | 863 | |
|
822 | 864 | nodes starting with '11' (we don't have the revision number '11' though) |
|
823 | 865 | |
|
824 | 866 |
$ hg log -r 1:3 -T ' |
|
825 | 867 | 1:1142 |
|
826 | 868 | 2:1140 |
|
827 | 869 | 3:11d |
|
828 | 870 | |
|
829 | 871 | '5:a00' is hidden, but still we have two nodes starting with 'a0' |
|
830 | 872 | |
|
831 | 873 |
$ hg log -r 6:7 -T ' |
|
832 | 874 | 6:a0b |
|
833 | 875 | 7:a04 |
|
834 | 876 | |
|
835 | 877 | node '10' conflicts with the revision number '10' even if it is hidden |
|
836 | 878 | (we could exclude hidden revision numbers, but currently we don't) |
|
837 | 879 | |
|
838 | 880 |
$ hg log -r 4 -T '{rev}:{shortest(node, 0)}\n' |
|
839 | 881 | 4:107 |
|
840 | 882 | $ hg log -r 4 -T '{rev}:{shortest(node, 0)}\n' --hidden |
|
841 | 883 | 4:107 |
|
842 | 884 | |
|
843 | 885 | node 'c562' should be unique if the other 'c562' nodes are hidden |
|
844 | 886 | (but we don't try the slow path to filter out hidden nodes for now) |
|
845 | 887 | |
|
846 | 888 |
$ hg log -r 8 -T ' |
|
847 | 889 | 8:c5625 |
|
848 | 890 | $ hg log -r 8:10 -T '{rev}:{node|shortest}\n' --hidden |
|
849 | 891 | 8:c5625 |
|
850 | 892 | 9:c5623 |
|
851 | 893 | 10:c562d |
|
852 | 894 | |
|
853 | 895 | $ cd .. |
|
854 | 896 | |
|
855 | 897 | Test pad function |
|
856 | 898 | |
|
857 | 899 | $ cd r |
|
858 | 900 | |
|
859 | 901 |
$ hg log --template ' |
|
860 | 902 | 2 test |
|
861 | 903 | 1 {node|short} |
|
862 | 904 | 0 test |
|
863 | 905 | |
|
864 | 906 |
$ hg log --template '{pad(rev, 20, " ", True)} {author|user}\n' |
|
865 | 907 | 2 test |
|
866 | 908 | 1 {node|short} |
|
867 | 909 | 0 test |
|
868 | 910 | |
|
869 | 911 |
$ hg log --template '{pad(rev, 20, "-", False)} {author|user}\n' |
|
870 | 912 | 2------------------- test |
|
871 | 913 | 1------------------- {node|short} |
|
872 | 914 | 0------------------- test |
|
873 | 915 | |
|
874 | 916 | Test template string in pad function |
|
875 | 917 | |
|
876 | 918 |
$ hg log -r 0 -T '{pad("\{{rev}}", 10)} {author|user}\n' |
|
877 | 919 | {0} test |
|
878 | 920 | |
|
879 | 921 |
$ hg log -r 0 -T '{pad(r"\{rev}", 10)} {author|user}\n' |
|
880 | 922 | \{rev} test |
|
881 | 923 | |
|
882 | 924 | Test width argument passed to pad function |
|
883 | 925 | |
|
884 | 926 |
$ hg log -r 0 -T '{pad(rev, "1{" |
|
885 | 927 | 0 test |
|
886 | 928 |
$ hg log -r 0 -T '{pad(rev, |
|
887 | 929 | hg: parse error: pad() expects an integer width |
|
888 | 930 | [255] |
|
889 | 931 | |
|
890 | 932 | Test invalid fillchar passed to pad function |
|
891 | 933 | |
|
892 | 934 |
$ hg log -r 0 -T '{pad(rev, 10, "")}\n' |
|
893 | 935 | hg: parse error: pad() expects a single fill character |
|
894 | 936 | [255] |
|
895 | 937 |
$ hg log -r 0 -T '{pad(rev, 10, "--")}\n' |
|
896 | 938 | hg: parse error: pad() expects a single fill character |
|
897 | 939 | [255] |
|
898 | 940 | |
|
899 | 941 | Test boolean argument passed to pad function |
|
900 | 942 | |
|
901 | 943 | no crash |
|
902 | 944 | |
|
903 | 945 |
$ hg log -r 0 -T '{pad(rev, 10, "-", "f{" |
|
904 | 946 | ---------0 |
|
905 | 947 | |
|
906 | 948 | string/literal |
|
907 | 949 | |
|
908 | 950 |
$ hg log -r 0 -T '{pad(rev, 10, "-", "false")}\n' |
|
909 | 951 | ---------0 |
|
910 | 952 |
$ hg log -r 0 -T '{pad(rev, 10, "-", false)}\n' |
|
911 | 953 | 0--------- |
|
912 | 954 |
$ hg log -r 0 -T '{pad(rev, 10, "-", "")}\n' |
|
913 | 955 | 0--------- |
|
914 | 956 | |
|
915 | 957 | unknown keyword is evaluated to '' |
|
916 | 958 | |
|
917 | 959 |
$ hg log -r 0 -T '{pad(rev, 10, "-", unknownkeyword)}\n' |
|
918 | 960 | 0--------- |
|
919 | 961 | |
|
920 | 962 | Test separate function |
|
921 | 963 | |
|
922 | 964 |
$ hg log -r 0 -T '{separate("-", "", "a", "b", "", "", "c", "")}\n' |
|
923 | 965 | a-b-c |
|
924 | 966 |
$ hg log -r 0 -T '{separate(" ", "{rev}:{node|short}", author|user, branch)}\n' |
|
925 | 967 | 0:f7769ec2ab97 test default |
|
926 | 968 |
$ hg log -r 0 --color=always -T '{separate(" ", "a", label(red, "b"), "c", label(red, ""), "d")}\n' |
|
927 | 969 | a \x1b[0;31mb\x1b[0m c d (esc) |
|
928 | 970 | |
|
929 | 971 | Test boolean expression/literal passed to if function |
|
930 | 972 | |
|
931 | 973 |
$ hg log -r 0 -T '{if(rev |
|
932 | 974 | rev 0 is True |
|
933 | 975 |
$ hg log -r 0 -T '{if(0 |
|
934 | 976 | literal 0 is True as well |
|
935 | 977 |
$ hg log -r 0 -T '{if(min(revset(r"0")), |
|
936 | 978 | 0 of hybriditem is also True |
|
937 | 979 |
$ hg log -r 0 -T '{if("", "", |
|
938 | 980 | empty string is False |
|
939 | 981 |
$ hg log -r 0 -T '{if(revset(r"0 - 0"), "", |
|
940 | 982 | empty list is False |
|
941 | 983 |
$ hg log -r 0 -T '{if(revset(r"0"), |
|
942 | 984 | non-empty list is True |
|
943 | 985 |
$ hg log -r 0 -T '{if(revset(r"0") % "", |
|
944 | 986 | list of empty strings is True |
|
945 | 987 |
$ hg log -r 0 -T '{if(true |
|
946 | 988 | true is True |
|
947 | 989 |
$ hg log -r 0 -T '{if(false, "", |
|
948 | 990 | false is False |
|
949 | 991 |
$ hg log -r 0 -T '{if("false", |
|
950 | 992 | non-empty string is True |
|
951 | 993 | |
|
952 | 994 | Test ifcontains function |
|
953 | 995 | |
|
954 | 996 |
$ hg log --template '{rev} {ifcontains(rev, " |
|
955 | 997 | 2 is in the string |
|
956 | 998 | 1 is not |
|
957 | 999 | 0 is in the string |
|
958 | 1000 | |
|
959 | 1001 |
$ hg log -T '{rev} {ifcontains(rev, |
|
960 | 1002 | 2 is in the string |
|
961 | 1003 | 1 is not |
|
962 | 1004 | 0 is in the string |
|
963 | 1005 | |
|
964 | 1006 |
$ hg log --template '{rev} {ifcontains("a", |
|
965 | 1007 | 2 did not add a |
|
966 | 1008 | 1 did not add a |
|
967 | 1009 | 0 added a |
|
968 | 1010 | |
|
969 | 1011 |
$ hg log --debug -T '{ |
|
970 | 1012 | 2 is parent of 1 |
|
971 | 1013 | 1 |
|
972 | 1014 | 0 |
|
973 | 1015 | |
|
974 | 1016 |
$ hg log -l1 -T '{ifcontains("branch", extras, "t", "f")}\n' |
|
975 | 1017 | t |
|
976 | 1018 |
$ hg log -l1 -T '{ifcontains("branch", extras % "{key}", "t", "f")}\n' |
|
977 | 1019 | t |
|
978 | 1020 |
$ hg log -l1 -T '{ifcontains("branc", extras % "{key}", "t", "f")}\n' |
|
979 | 1021 | f |
|
980 | 1022 |
$ hg log -l1 -T '{ifcontains("branc", stringify(extras % "{key}"), "t", "f")}\n' |
|
981 | 1023 | t |
|
982 | 1024 | |
|
983 | 1025 | Test revset function |
|
984 | 1026 | |
|
985 | 1027 |
$ hg log --template '{rev} {ifcontains(rev, revset("."), " |
|
986 | 1028 | 2 current rev |
|
987 | 1029 | 1 not current rev |
|
988 | 1030 | 0 not current rev |
|
989 | 1031 | |
|
990 | 1032 |
$ hg log --template '{rev} {ifcontains(rev, |
|
991 | 1033 | 2 match rev |
|
992 | 1034 | 1 match rev |
|
993 | 1035 | 0 not match rev |
|
994 | 1036 | |
|
995 | 1037 |
$ hg log -T '{ifcontains(desc, revset(":"), "", |
|
996 | 1038 | type not match |
|
997 | 1039 | |
|
998 | 1040 |
$ hg log --template ' |
|
999 | 1041 | 2 Parents: 1 |
|
1000 | 1042 | 1 Parents: 0 |
|
1001 | 1043 | 0 Parents: |
|
1002 | 1044 | |
|
1003 | 1045 | $ cat >> .hg/hgrc <<EOF |
|
1004 | 1046 | > [revsetalias] |
|
1005 | 1047 | > myparents(\$1) = parents(\$1) |
|
1006 | 1048 | > EOF |
|
1007 | 1049 | $ hg log --template '{rev} Parents: {revset("myparents(%s)", rev)}\n' |
|
1008 | 1050 | 2 Parents: 1 |
|
1009 | 1051 | 1 Parents: 0 |
|
1010 | 1052 | 0 Parents: |
|
1011 | 1053 | |
|
1012 | 1054 | $ hg log --template 'Rev: {rev}\n{revset("::%s", rev) % "Ancestor: {revision}\n"}\n' |
|
1013 | 1055 | Rev: 2 |
|
1014 | 1056 | Ancestor: 0 |
|
1015 | 1057 | Ancestor: 1 |
|
1016 | 1058 | Ancestor: 2 |
|
1017 | 1059 | |
|
1018 | 1060 | Rev: 1 |
|
1019 | 1061 | Ancestor: 0 |
|
1020 | 1062 | Ancestor: 1 |
|
1021 | 1063 | |
|
1022 | 1064 | Rev: 0 |
|
1023 | 1065 | Ancestor: 0 |
|
1024 | 1066 | |
|
1025 | 1067 | $ hg log --template '{revset("TIP"|lower)}\n' -l1 |
|
1026 | 1068 | 2 |
|
1027 | 1069 | |
|
1028 | 1070 | $ hg log -T '{revset("%s", "t{"ip"}")}\n' -l1 |
|
1029 | 1071 | 2 |
|
1030 | 1072 | |
|
1031 | 1073 | a list template is evaluated for each item of revset/parents |
|
1032 | 1074 | |
|
1033 | 1075 | $ hg log -T '{rev} p: {revset("p1(%s)", rev) % "{rev}:{node|short}"}\n' |
|
1034 | 1076 | 2 p: 1:bcc7ff960b8e |
|
1035 | 1077 | 1 p: 0:f7769ec2ab97 |
|
1036 | 1078 | 0 p: |
|
1037 | 1079 | |
|
1038 | 1080 | $ hg log --debug -T '{rev} p:{parents % " {rev}:{node|short}"}\n' |
|
1039 | 1081 | 2 p: 1:bcc7ff960b8e -1:000000000000 |
|
1040 | 1082 | 1 p: 0:f7769ec2ab97 -1:000000000000 |
|
1041 | 1083 | 0 p: -1:000000000000 -1:000000000000 |
|
1042 | 1084 | |
|
1043 | 1085 | therefore, 'revcache' should be recreated for each rev |
|
1044 | 1086 | |
|
1045 | 1087 | $ hg log -T '{rev} {file_adds}\np {revset("p1(%s)", rev) % "{file_adds}"}\n' |
|
1046 | 1088 | 2 aa b |
|
1047 | 1089 | p |
|
1048 | 1090 | 1 |
|
1049 | 1091 | p a |
|
1050 | 1092 | 0 a |
|
1051 | 1093 | p |
|
1052 | 1094 | |
|
1053 | 1095 | $ hg log --debug -T '{rev} {file_adds}\np {parents % "{file_adds}"}\n' |
|
1054 | 1096 | 2 aa b |
|
1055 | 1097 | p |
|
1056 | 1098 | 1 |
|
1057 | 1099 | p a |
|
1058 | 1100 | 0 a |
|
1059 | 1101 | p |
|
1060 | 1102 | |
|
1061 | 1103 | a revset item must be evaluated as an integer revision, not an offset from tip |
|
1062 | 1104 | |
|
1063 | 1105 | $ hg log -l 1 -T '{revset("null") % "{rev}:{node|short}"}\n' |
|
1064 | 1106 | -1:000000000000 |
|
1065 | 1107 | $ hg log -l 1 -T '{revset("%s", "null") % "{rev}:{node|short}"}\n' |
|
1066 | 1108 | -1:000000000000 |
|
1067 | 1109 | |
|
1068 | 1110 | join() should pick '{rev}' from revset items: |
|
1069 | 1111 | |
|
1070 | 1112 | $ hg log -R ../a -T '{join(revset("parents(%d)", rev), ", ")}\n' -r6 |
|
1071 | 1113 | 4, 5 |
|
1072 | 1114 | |
|
1073 | 1115 | on the other hand, parents are formatted as '{rev}:{node|formatnode}' by |
|
1074 | 1116 | default. join() should agree with the default formatting: |
|
1075 | 1117 | |
|
1076 | 1118 | $ hg log -R ../a -T '{join(parents, ", ")}\n' -r6 |
|
1077 | 1119 | 5:13207e5a10d9, 4:bbe44766e73d |
|
1078 | 1120 | |
|
1079 | 1121 | $ hg log -R ../a -T '{join(parents, ",\n")}\n' -r6 --debug |
|
1080 | 1122 | 5:13207e5a10d9fd28ec424934298e176197f2c67f, |
|
1081 | 1123 | 4:bbe44766e73d5f11ed2177f1838de10c53ef3e74 |
|
1082 | 1124 | |
|
1083 | 1125 | Invalid arguments passed to revset() |
|
1084 | 1126 | |
|
1085 | 1127 | $ hg log -T '{revset("%whatever", 0)}\n' |
|
1086 | 1128 | hg: parse error: unexpected revspec format character w |
|
1087 | 1129 | [255] |
|
1088 | 1130 | $ hg log -T '{revset("%lwhatever", files)}\n' |
|
1089 | 1131 | hg: parse error: unexpected revspec format character w |
|
1090 | 1132 | [255] |
|
1091 | 1133 | $ hg log -T '{revset("%s %s", 0)}\n' |
|
1092 | 1134 | hg: parse error: missing argument for revspec |
|
1093 | 1135 | [255] |
|
1094 | 1136 | $ hg log -T '{revset("", 0)}\n' |
|
1095 | 1137 | hg: parse error: too many revspec arguments specified |
|
1096 | 1138 | [255] |
|
1097 | 1139 | $ hg log -T '{revset("%s", 0, 1)}\n' |
|
1098 | 1140 | hg: parse error: too many revspec arguments specified |
|
1099 | 1141 | [255] |
|
1100 | 1142 | $ hg log -T '{revset("%", 0)}\n' |
|
1101 | 1143 | hg: parse error: incomplete revspec format character |
|
1102 | 1144 | [255] |
|
1103 | 1145 | $ hg log -T '{revset("%l", 0)}\n' |
|
1104 | 1146 | hg: parse error: incomplete revspec format character |
|
1105 | 1147 | [255] |
|
1106 | 1148 | $ hg log -T '{revset("%d", 'foo')}\n' |
|
1107 | 1149 | hg: parse error: invalid argument for revspec |
|
1108 | 1150 | [255] |
|
1109 | 1151 | $ hg log -T '{revset("%ld", files)}\n' |
|
1110 | 1152 | hg: parse error: invalid argument for revspec |
|
1111 | 1153 | [255] |
|
1112 | 1154 | $ hg log -T '{revset("%ls", 0)}\n' |
|
1113 | 1155 | hg: parse error: invalid argument for revspec |
|
1114 | 1156 | [255] |
|
1115 | 1157 | $ hg log -T '{revset("%b", 'foo')}\n' |
|
1116 | 1158 | hg: parse error: invalid argument for revspec |
|
1117 | 1159 | [255] |
|
1118 | 1160 | $ hg log -T '{revset("%lb", files)}\n' |
|
1119 | 1161 | hg: parse error: invalid argument for revspec |
|
1120 | 1162 | [255] |
|
1121 | 1163 | $ hg log -T '{revset("%r", 0)}\n' |
|
1122 | 1164 | hg: parse error: invalid argument for revspec |
|
1123 | 1165 | [255] |
|
1124 | 1166 | |
|
1125 | 1167 | Test files function |
|
1126 | 1168 | |
|
1127 | 1169 | $ hg log -T "{rev}\n{join(files('*'), '\n')}\n" |
|
1128 | 1170 | 2 |
|
1129 | 1171 | a |
|
1130 | 1172 | aa |
|
1131 | 1173 | b |
|
1132 | 1174 | 1 |
|
1133 | 1175 | a |
|
1134 | 1176 | 0 |
|
1135 | 1177 | a |
|
1136 | 1178 | |
|
1137 | 1179 | $ hg log -T "{rev}\n{join(files('aa'), '\n')}\n" |
|
1138 | 1180 | 2 |
|
1139 | 1181 | aa |
|
1140 | 1182 | 1 |
|
1141 | 1183 | |
|
1142 | 1184 | 0 |
|
1143 | 1185 | |
|
1144 | 1186 | $ hg rm a |
|
1145 | 1187 | $ hg log -r "wdir()" -T "{rev}\n{join(files('*'), '\n')}\n" |
|
1146 | 1188 | 2147483647 |
|
1147 | 1189 | aa |
|
1148 | 1190 | b |
|
1149 | 1191 | $ hg revert a |
|
1150 | 1192 | |
|
1151 | 1193 | Test relpath function |
|
1152 | 1194 | |
|
1153 | 1195 | $ hg log -r0 -T '{files % "{file|relpath}\n"}' |
|
1154 | 1196 | a |
|
1155 | 1197 | $ cd .. |
|
1156 | 1198 | $ hg log -R r -r0 -T '{files % "{file|relpath}\n"}' |
|
1157 | 1199 | r/a |
|
1158 | 1200 | |
|
1159 | 1201 | Test stringify on sub expressions |
|
1160 | 1202 | |
|
1161 | 1203 | $ hg log -R a -r 8 --template '{join(files, if("1", if("1", ", ")))}\n' |
|
1162 | 1204 | fourth, second, third |
|
1163 | 1205 | $ hg log -R a -r 8 --template '{strip(if("1", if("1", "-abc-")), if("1", if("1", "-")))}\n' |
|
1164 | 1206 | abc |
|
1165 | 1207 | |
|
1166 | 1208 | Test splitlines |
|
1167 | 1209 | |
|
1168 | 1210 | $ hg log -Gv -R a --template "{splitlines(desc) % 'foo {line}\n'}" |
|
1169 | 1211 | @ foo Modify, add, remove, rename |
|
1170 | 1212 | | |
|
1171 | 1213 | o foo future |
|
1172 | 1214 | | |
|
1173 | 1215 | o foo third |
|
1174 | 1216 | | |
|
1175 | 1217 | o foo second |
|
1176 | 1218 | |
|
1177 | 1219 | o foo merge |
|
1178 | 1220 | |\ |
|
1179 | 1221 | | o foo new head |
|
1180 | 1222 | | | |
|
1181 | 1223 | o | foo new branch |
|
1182 | 1224 | |/ |
|
1183 | 1225 | o foo no user, no domain |
|
1184 | 1226 | | |
|
1185 | 1227 | o foo no person |
|
1186 | 1228 | | |
|
1187 | 1229 | o foo other 1 |
|
1188 | 1230 | | foo other 2 |
|
1189 | 1231 | | foo |
|
1190 | 1232 | | foo other 3 |
|
1191 | 1233 | o foo line 1 |
|
1192 | 1234 | foo line 2 |
|
1193 | 1235 | |
|
1194 | 1236 | $ hg log -R a -r0 -T '{desc|splitlines}\n' |
|
1195 | 1237 | line 1 line 2 |
|
1196 | 1238 | $ hg log -R a -r0 -T '{join(desc|splitlines, "|")}\n' |
|
1197 | 1239 | line 1|line 2 |
|
1198 | 1240 | |
|
1199 | 1241 | Test startswith |
|
1200 | 1242 | $ hg log -Gv -R a --template "{startswith(desc)}" |
|
1201 | 1243 | hg: parse error: startswith expects two arguments |
|
1202 | 1244 | [255] |
|
1203 | 1245 | |
|
1204 | 1246 | $ hg log -Gv -R a --template "{startswith('line', desc)}" |
|
1205 | 1247 | @ |
|
1206 | 1248 | | |
|
1207 | 1249 | o |
|
1208 | 1250 | | |
|
1209 | 1251 | o |
|
1210 | 1252 | | |
|
1211 | 1253 | o |
|
1212 | 1254 | |
|
1213 | 1255 | o |
|
1214 | 1256 | |\ |
|
1215 | 1257 | | o |
|
1216 | 1258 | | | |
|
1217 | 1259 | o | |
|
1218 | 1260 | |/ |
|
1219 | 1261 | o |
|
1220 | 1262 | | |
|
1221 | 1263 | o |
|
1222 | 1264 | | |
|
1223 | 1265 | o |
|
1224 | 1266 | | |
|
1225 | 1267 | o line 1 |
|
1226 | 1268 | line 2 |
|
1227 | 1269 | |
|
1228 | 1270 | Test word function (including index out of bounds graceful failure) |
|
1229 | 1271 | |
|
1230 | 1272 | $ hg log -Gv -R a --template "{word('1', desc)}" |
|
1231 | 1273 | @ add, |
|
1232 | 1274 | | |
|
1233 | 1275 | o |
|
1234 | 1276 | | |
|
1235 | 1277 | o |
|
1236 | 1278 | | |
|
1237 | 1279 | o |
|
1238 | 1280 | |
|
1239 | 1281 | o |
|
1240 | 1282 | |\ |
|
1241 | 1283 | | o head |
|
1242 | 1284 | | | |
|
1243 | 1285 | o | branch |
|
1244 | 1286 | |/ |
|
1245 | 1287 | o user, |
|
1246 | 1288 | | |
|
1247 | 1289 | o person |
|
1248 | 1290 | | |
|
1249 | 1291 | o 1 |
|
1250 | 1292 | | |
|
1251 | 1293 | o 1 |
|
1252 | 1294 | |
|
1253 | 1295 | |
|
1254 | 1296 | Test word third parameter used as splitter |
|
1255 | 1297 | |
|
1256 | 1298 | $ hg log -Gv -R a --template "{word('0', desc, 'o')}" |
|
1257 | 1299 | @ M |
|
1258 | 1300 | | |
|
1259 | 1301 | o future |
|
1260 | 1302 | | |
|
1261 | 1303 | o third |
|
1262 | 1304 | | |
|
1263 | 1305 | o sec |
|
1264 | 1306 | |
|
1265 | 1307 | o merge |
|
1266 | 1308 | |\ |
|
1267 | 1309 | | o new head |
|
1268 | 1310 | | | |
|
1269 | 1311 | o | new branch |
|
1270 | 1312 | |/ |
|
1271 | 1313 | o n |
|
1272 | 1314 | | |
|
1273 | 1315 | o n |
|
1274 | 1316 | | |
|
1275 | 1317 | o |
|
1276 | 1318 | | |
|
1277 | 1319 | o line 1 |
|
1278 | 1320 | line 2 |
|
1279 | 1321 | |
|
1280 | 1322 | Test word error messages for not enough and too many arguments |
|
1281 | 1323 | |
|
1282 | 1324 | $ hg log -Gv -R a --template "{word('0')}" |
|
1283 | 1325 | hg: parse error: word expects two or three arguments, got 1 |
|
1284 | 1326 | [255] |
|
1285 | 1327 | |
|
1286 | 1328 | $ hg log -Gv -R a --template "{word('0', desc, 'o', 'h', 'b', 'o', 'y')}" |
|
1287 | 1329 | hg: parse error: word expects two or three arguments, got 7 |
|
1288 | 1330 | [255] |
|
1289 | 1331 | |
|
1290 | 1332 | Test word for integer literal |
|
1291 | 1333 | |
|
1292 | 1334 | $ hg log -R a --template "{word(2, desc)}\n" -r0 |
|
1293 | 1335 | line |
|
1294 | 1336 | |
|
1295 | 1337 | Test word for invalid numbers |
|
1296 | 1338 | |
|
1297 | 1339 | $ hg log -Gv -R a --template "{word('a', desc)}" |
|
1298 | 1340 | hg: parse error: word expects an integer index |
|
1299 | 1341 | [255] |
|
1300 | 1342 | |
|
1301 | 1343 | Test word for out of range |
|
1302 | 1344 | |
|
1303 | 1345 | $ hg log -R a --template "{word(10000, desc)}" |
|
1304 | 1346 | $ hg log -R a --template "{word(-10000, desc)}" |
|
1305 | 1347 | |
|
1306 | 1348 | Test indent and not adding to empty lines |
|
1307 | 1349 | |
|
1308 | 1350 | $ hg log -T "-----\n{indent(desc, '>> ', ' > ')}\n" -r 0:1 -R a |
|
1309 | 1351 | ----- |
|
1310 | 1352 | > line 1 |
|
1311 | 1353 | >> line 2 |
|
1312 | 1354 | ----- |
|
1313 | 1355 | > other 1 |
|
1314 | 1356 | >> other 2 |
|
1315 | 1357 | |
|
1316 | 1358 | >> other 3 |
|
1317 | 1359 | |
|
1318 | 1360 | Test with non-strings like dates |
|
1319 | 1361 | |
|
1320 | 1362 | $ hg log -T "{indent(date, ' ')}\n" -r 2:3 -R a |
|
1321 | 1363 | 1200000.00 |
|
1322 | 1364 | 1300000.00 |
|
1323 | 1365 | |
|
1324 | 1366 | json filter should escape HTML tags so that the output can be embedded in hgweb: |
|
1325 | 1367 | |
|
1326 | 1368 | $ hg log -T "{'<foo@example.org>'|json}\n" -R a -l1 |
|
1327 | 1369 | "\u003cfoo@example.org\u003e" |
|
1328 | 1370 | |
|
1329 | 1371 | Set up repository for non-ascii encoding tests: |
|
1330 | 1372 | |
|
1331 | 1373 | $ hg init nonascii |
|
1332 | 1374 | $ cd nonascii |
|
1333 | 1375 | $ $PYTHON <<EOF |
|
1334 | 1376 | > open('latin1', 'wb').write(b'\xe9') |
|
1335 | 1377 | > open('utf-8', 'wb').write(b'\xc3\xa9') |
|
1336 | 1378 | > EOF |
|
1337 | 1379 | $ HGENCODING=utf-8 hg branch -q `cat utf-8` |
|
1338 | 1380 | $ HGENCODING=utf-8 hg ci -qAm "non-ascii branch: `cat utf-8`" utf-8 |
|
1339 | 1381 | |
|
1340 | 1382 | json filter should try round-trip conversion to utf-8: |
|
1341 | 1383 | |
|
1342 | 1384 | $ HGENCODING=ascii hg log -T "{branch|json}\n" -r0 |
|
1343 | 1385 | "\u00e9" |
|
1344 | 1386 | $ HGENCODING=ascii hg log -T "{desc|json}\n" -r0 |
|
1345 | 1387 | "non-ascii branch: \u00e9" |
|
1346 | 1388 | |
|
1347 | 1389 | json filter should take input as utf-8 if it was converted from utf-8: |
|
1348 | 1390 | |
|
1349 | 1391 | $ HGENCODING=latin-1 hg log -T "{branch|json}\n" -r0 |
|
1350 | 1392 | "\u00e9" |
|
1351 | 1393 | $ HGENCODING=latin-1 hg log -T "{desc|json}\n" -r0 |
|
1352 | 1394 | "non-ascii branch: \u00e9" |
|
1353 | 1395 | |
|
1354 | 1396 | json filter takes input as utf-8b: |
|
1355 | 1397 | |
|
1356 | 1398 | $ HGENCODING=ascii hg log -T "{'`cat utf-8`'|json}\n" -l1 |
|
1357 | 1399 | "\u00e9" |
|
1358 | 1400 | $ HGENCODING=ascii hg log -T "{'`cat latin1`'|json}\n" -l1 |
|
1359 | 1401 | "\udce9" |
|
1360 | 1402 | |
|
1361 | 1403 | utf8 filter: |
|
1362 | 1404 | |
|
1363 | 1405 | $ HGENCODING=ascii hg log -T "round-trip: {branch|utf8|hex}\n" -r0 |
|
1364 | 1406 | round-trip: c3a9 |
|
1365 | 1407 | $ HGENCODING=latin1 hg log -T "decoded: {'`cat latin1`'|utf8|hex}\n" -l1 |
|
1366 | 1408 | decoded: c3a9 |
|
1367 | 1409 | $ HGENCODING=ascii hg log -T "replaced: {'`cat latin1`'|utf8|hex}\n" -l1 |
|
1368 | 1410 | abort: decoding near * (glob) |
|
1369 | 1411 | [255] |
|
1370 | 1412 | $ hg log -T "coerced to string: {rev|utf8}\n" -r0 |
|
1371 | 1413 | coerced to string: 0 |
|
1372 | 1414 | |
|
1373 | 1415 | pad width: |
|
1374 | 1416 | |
|
1375 | 1417 | $ HGENCODING=utf-8 hg debugtemplate "{pad('`cat utf-8`', 2, '-')}\n" |
|
1376 | 1418 | \xc3\xa9- (esc) |
|
1377 | 1419 | |
|
1378 | 1420 | $ cd .. |
General Comments 0
You need to be logged in to leave comments.
Login now