##// END OF EJS Templates
scmutil: make shortesthexnodeidprefix() use unfiltered repo...
Martin von Zweigbergk -
r37726:8e854161 default
parent child Browse files
Show More
@@ -1,472 +1,470 b''
1 1 # show.py - Extension implementing `hg show`
2 2 #
3 3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """unified command to show various repository information (EXPERIMENTAL)
9 9
10 10 This extension provides the :hg:`show` command, which provides a central
11 11 command for displaying commonly-accessed repository data and views of that
12 12 data.
13 13
14 14 The following config options can influence operation.
15 15
16 16 ``commands``
17 17 ------------
18 18
19 19 ``show.aliasprefix``
20 20 List of strings that will register aliases for views. e.g. ``s`` will
21 21 effectively set config options ``alias.s<view> = show <view>`` for all
22 22 views. i.e. `hg swork` would execute `hg show work`.
23 23
24 24 Aliases that would conflict with existing registrations will not be
25 25 performed.
26 26 """
27 27
28 28 from __future__ import absolute_import
29 29
30 30 from mercurial.i18n import _
31 31 from mercurial.node import (
32 32 hex,
33 33 nullrev,
34 34 )
35 35 from mercurial import (
36 36 cmdutil,
37 37 commands,
38 38 destutil,
39 39 error,
40 40 formatter,
41 41 graphmod,
42 42 logcmdutil,
43 43 phases,
44 44 pycompat,
45 45 registrar,
46 46 revset,
47 47 revsetlang,
48 48 scmutil,
49 49 )
50 50
51 51 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
52 52 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
53 53 # be specifying the version(s) of Mercurial they are tested with, or
54 54 # leave the attribute unspecified.
55 55 testedwith = 'ships-with-hg-core'
56 56
57 57 cmdtable = {}
58 58 command = registrar.command(cmdtable)
59 59
60 60 revsetpredicate = registrar.revsetpredicate()
61 61
62 62 class showcmdfunc(registrar._funcregistrarbase):
63 63 """Register a function to be invoked for an `hg show <thing>`."""
64 64
65 65 # Used by _formatdoc().
66 66 _docformat = '%s -- %s'
67 67
68 68 def _extrasetup(self, name, func, fmtopic=None, csettopic=None):
69 69 """Called with decorator arguments to register a show view.
70 70
71 71 ``name`` is the sub-command name.
72 72
73 73 ``func`` is the function being decorated.
74 74
75 75 ``fmtopic`` is the topic in the style that will be rendered for
76 76 this view.
77 77
78 78 ``csettopic`` is the topic in the style to be used for a changeset
79 79 printer.
80 80
81 81 If ``fmtopic`` is specified, the view function will receive a
82 82 formatter instance. If ``csettopic`` is specified, the view
83 83 function will receive a changeset printer.
84 84 """
85 85 func._fmtopic = fmtopic
86 86 func._csettopic = csettopic
87 87
88 88 showview = showcmdfunc()
89 89
90 90 @command('show', [
91 91 # TODO: Switch this template flag to use cmdutil.formatteropts if
92 92 # 'hg show' becomes stable before --template/-T is stable. For now,
93 93 # we are putting it here without the '(EXPERIMENTAL)' flag because it
94 94 # is an important part of the 'hg show' user experience and the entire
95 95 # 'hg show' experience is experimental.
96 96 ('T', 'template', '', ('display with template'), _('TEMPLATE')),
97 97 ], _('VIEW'))
98 98 def show(ui, repo, view=None, template=None):
99 99 """show various repository information
100 100
101 101 A requested view of repository data is displayed.
102 102
103 103 If no view is requested, the list of available views is shown and the
104 104 command aborts.
105 105
106 106 .. note::
107 107
108 108 There are no backwards compatibility guarantees for the output of this
109 109 command. Output may change in any future Mercurial release.
110 110
111 111 Consumers wanting stable command output should specify a template via
112 112 ``-T/--template``.
113 113
114 114 List of available views:
115 115 """
116 116 if ui.plain() and not template:
117 117 hint = _('invoke with -T/--template to control output format')
118 118 raise error.Abort(_('must specify a template in plain mode'), hint=hint)
119 119
120 120 views = showview._table
121 121
122 122 if not view:
123 123 ui.pager('show')
124 124 # TODO consider using formatter here so available views can be
125 125 # rendered to custom format.
126 126 ui.write(_('available views:\n'))
127 127 ui.write('\n')
128 128
129 129 for name, func in sorted(views.items()):
130 130 ui.write(('%s\n') % pycompat.sysbytes(func.__doc__))
131 131
132 132 ui.write('\n')
133 133 raise error.Abort(_('no view requested'),
134 134 hint=_('use "hg show VIEW" to choose a view'))
135 135
136 136 # TODO use same logic as dispatch to perform prefix matching.
137 137 if view not in views:
138 138 raise error.Abort(_('unknown view: %s') % view,
139 139 hint=_('run "hg show" to see available views'))
140 140
141 141 template = template or 'show'
142 142
143 143 fn = views[view]
144 144 ui.pager('show')
145 145
146 146 if fn._fmtopic:
147 147 fmtopic = 'show%s' % fn._fmtopic
148 148 with ui.formatter(fmtopic, {'template': template}) as fm:
149 149 return fn(ui, repo, fm)
150 150 elif fn._csettopic:
151 151 ref = 'show%s' % fn._csettopic
152 152 spec = formatter.lookuptemplate(ui, ref, template)
153 153 displayer = logcmdutil.changesettemplater(ui, repo, spec, buffered=True)
154 154 return fn(ui, repo, displayer)
155 155 else:
156 156 return fn(ui, repo)
157 157
158 158 @showview('bookmarks', fmtopic='bookmarks')
159 159 def showbookmarks(ui, repo, fm):
160 160 """bookmarks and their associated changeset"""
161 161 marks = repo._bookmarks
162 162 if not len(marks):
163 163 # This is a bit hacky. Ideally, templates would have a way to
164 164 # specify an empty output, but we shouldn't corrupt JSON while
165 165 # waiting for this functionality.
166 166 if not isinstance(fm, formatter.jsonformatter):
167 167 ui.write(_('(no bookmarks set)\n'))
168 168 return
169 169
170 170 revs = [repo[node].rev() for node in marks.values()]
171 171 active = repo._activebookmark
172 172 longestname = max(len(b) for b in marks)
173 173 nodelen = longestshortest(repo, revs)
174 174
175 175 for bm, node in sorted(marks.items()):
176 176 fm.startitem()
177 177 fm.context(ctx=repo[node])
178 178 fm.write('bookmark', '%s', bm)
179 179 fm.write('node', fm.hexfunc(node), fm.hexfunc(node))
180 180 fm.data(active=bm == active,
181 181 longestbookmarklen=longestname,
182 182 nodelen=nodelen)
183 183
184 184 @showview('stack', csettopic='stack')
185 185 def showstack(ui, repo, displayer):
186 186 """current line of work"""
187 187 wdirctx = repo['.']
188 188 if wdirctx.rev() == nullrev:
189 189 raise error.Abort(_('stack view only available when there is a '
190 190 'working directory'))
191 191
192 192 if wdirctx.phase() == phases.public:
193 193 ui.write(_('(empty stack; working directory parent is a published '
194 194 'changeset)\n'))
195 195 return
196 196
197 197 # TODO extract "find stack" into a function to facilitate
198 198 # customization and reuse.
199 199
200 200 baserev = destutil.stackbase(ui, repo)
201 201 basectx = None
202 202
203 203 if baserev is None:
204 204 baserev = wdirctx.rev()
205 205 stackrevs = {wdirctx.rev()}
206 206 else:
207 207 stackrevs = set(repo.revs('%d::.', baserev))
208 208
209 209 ctx = repo[baserev]
210 210 if ctx.p1().rev() != nullrev:
211 211 basectx = ctx.p1()
212 212
213 213 # And relevant descendants.
214 214 branchpointattip = False
215 215 cl = repo.changelog
216 216
217 217 for rev in cl.descendants([wdirctx.rev()]):
218 218 ctx = repo[rev]
219 219
220 220 # Will only happen if . is public.
221 221 if ctx.phase() == phases.public:
222 222 break
223 223
224 224 stackrevs.add(ctx.rev())
225 225
226 226 # ctx.children() within a function iterating on descandants
227 227 # potentially has severe performance concerns because revlog.children()
228 228 # iterates over all revisions after ctx's node. However, the number of
229 229 # draft changesets should be a reasonably small number. So even if
230 230 # this is quadratic, the perf impact should be minimal.
231 231 if len(ctx.children()) > 1:
232 232 branchpointattip = True
233 233 break
234 234
235 235 stackrevs = list(sorted(stackrevs, reverse=True))
236 236
237 237 # Find likely target heads for the current stack. These are likely
238 238 # merge or rebase targets.
239 239 if basectx:
240 240 # TODO make this customizable?
241 241 newheads = set(repo.revs('heads(%d::) - %ld - not public()',
242 242 basectx.rev(), stackrevs))
243 243 else:
244 244 newheads = set()
245 245
246 246 allrevs = set(stackrevs) | newheads | set([baserev])
247 247 nodelen = longestshortest(repo, allrevs)
248 248
249 249 try:
250 250 cmdutil.findcmd('rebase', commands.table)
251 251 haverebase = True
252 252 except (error.AmbiguousCommand, error.UnknownCommand):
253 253 haverebase = False
254 254
255 255 # TODO use templating.
256 256 # TODO consider using graphmod. But it may not be necessary given
257 257 # our simplicity and the customizations required.
258 258 # TODO use proper graph symbols from graphmod
259 259
260 260 tres = formatter.templateresources(ui, repo)
261 261 shortesttmpl = formatter.maketemplater(ui, '{shortest(node, %d)}' % nodelen,
262 262 resources=tres)
263 263 def shortest(ctx):
264 264 return shortesttmpl.renderdefault({'ctx': ctx, 'node': ctx.hex()})
265 265
266 266 # We write out new heads to aid in DAG awareness and to help with decision
267 267 # making on how the stack should be reconciled with commits made since the
268 268 # branch point.
269 269 if newheads:
270 270 # Calculate distance from base so we can render the count and so we can
271 271 # sort display order by commit distance.
272 272 revdistance = {}
273 273 for head in newheads:
274 274 # There is some redundancy in DAG traversal here and therefore
275 275 # room to optimize.
276 276 ancestors = cl.ancestors([head], stoprev=basectx.rev())
277 277 revdistance[head] = len(list(ancestors))
278 278
279 279 sourcectx = repo[stackrevs[-1]]
280 280
281 281 sortedheads = sorted(newheads, key=lambda x: revdistance[x],
282 282 reverse=True)
283 283
284 284 for i, rev in enumerate(sortedheads):
285 285 ctx = repo[rev]
286 286
287 287 if i:
288 288 ui.write(': ')
289 289 else:
290 290 ui.write(' ')
291 291
292 292 ui.write(('o '))
293 293 displayer.show(ctx, nodelen=nodelen)
294 294 displayer.flush(ctx)
295 295 ui.write('\n')
296 296
297 297 if i:
298 298 ui.write(':/')
299 299 else:
300 300 ui.write(' /')
301 301
302 302 ui.write(' (')
303 303 ui.write(_('%d commits ahead') % revdistance[rev],
304 304 label='stack.commitdistance')
305 305
306 306 if haverebase:
307 307 # TODO may be able to omit --source in some scenarios
308 308 ui.write('; ')
309 309 ui.write(('hg rebase --source %s --dest %s' % (
310 310 shortest(sourcectx), shortest(ctx))),
311 311 label='stack.rebasehint')
312 312
313 313 ui.write(')\n')
314 314
315 315 ui.write(':\n: ')
316 316 ui.write(_('(stack head)\n'), label='stack.label')
317 317
318 318 if branchpointattip:
319 319 ui.write(' \\ / ')
320 320 ui.write(_('(multiple children)\n'), label='stack.label')
321 321 ui.write(' |\n')
322 322
323 323 for rev in stackrevs:
324 324 ctx = repo[rev]
325 325 symbol = '@' if rev == wdirctx.rev() else 'o'
326 326
327 327 if newheads:
328 328 ui.write(': ')
329 329 else:
330 330 ui.write(' ')
331 331
332 332 ui.write(symbol, ' ')
333 333 displayer.show(ctx, nodelen=nodelen)
334 334 displayer.flush(ctx)
335 335 ui.write('\n')
336 336
337 337 # TODO display histedit hint?
338 338
339 339 if basectx:
340 340 # Vertically and horizontally separate stack base from parent
341 341 # to reinforce stack boundary.
342 342 if newheads:
343 343 ui.write(':/ ')
344 344 else:
345 345 ui.write(' / ')
346 346
347 347 ui.write(_('(stack base)'), '\n', label='stack.label')
348 348 ui.write(('o '))
349 349
350 350 displayer.show(basectx, nodelen=nodelen)
351 351 displayer.flush(basectx)
352 352 ui.write('\n')
353 353
354 354 @revsetpredicate('_underway([commitage[, headage]])')
355 355 def underwayrevset(repo, subset, x):
356 356 args = revset.getargsdict(x, 'underway', 'commitage headage')
357 357 if 'commitage' not in args:
358 358 args['commitage'] = None
359 359 if 'headage' not in args:
360 360 args['headage'] = None
361 361
362 362 # We assume callers of this revset add a topographical sort on the
363 363 # result. This means there is no benefit to making the revset lazy
364 364 # since the topographical sort needs to consume all revs.
365 365 #
366 366 # With this in mind, we build up the set manually instead of constructing
367 367 # a complex revset. This enables faster execution.
368 368
369 369 # Mutable changesets (non-public) are the most important changesets
370 370 # to return. ``not public()`` will also pull in obsolete changesets if
371 371 # there is a non-obsolete changeset with obsolete ancestors. This is
372 372 # why we exclude obsolete changesets from this query.
373 373 rs = 'not public() and not obsolete()'
374 374 rsargs = []
375 375 if args['commitage']:
376 376 rs += ' and date(%s)'
377 377 rsargs.append(revsetlang.getstring(args['commitage'],
378 378 _('commitage requires a string')))
379 379
380 380 mutable = repo.revs(rs, *rsargs)
381 381 relevant = revset.baseset(mutable)
382 382
383 383 # Add parents of mutable changesets to provide context.
384 384 relevant += repo.revs('parents(%ld)', mutable)
385 385
386 386 # We also pull in (public) heads if they a) aren't closing a branch
387 387 # b) are recent.
388 388 rs = 'head() and not closed()'
389 389 rsargs = []
390 390 if args['headage']:
391 391 rs += ' and date(%s)'
392 392 rsargs.append(revsetlang.getstring(args['headage'],
393 393 _('headage requires a string')))
394 394
395 395 relevant += repo.revs(rs, *rsargs)
396 396
397 397 # Add working directory parent.
398 398 wdirrev = repo['.'].rev()
399 399 if wdirrev != nullrev:
400 400 relevant += revset.baseset({wdirrev})
401 401
402 402 return subset & relevant
403 403
404 404 @showview('work', csettopic='work')
405 405 def showwork(ui, repo, displayer):
406 406 """changesets that aren't finished"""
407 407 # TODO support date-based limiting when calling revset.
408 408 revs = repo.revs('sort(_underway(), topo)')
409 409 nodelen = longestshortest(repo, revs)
410 410
411 411 revdag = graphmod.dagwalker(repo, revs)
412 412
413 413 ui.setconfig('experimental', 'graphshorten', True)
414 414 logcmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges,
415 415 props={'nodelen': nodelen})
416 416
417 417 def extsetup(ui):
418 418 # Alias `hg <prefix><view>` to `hg show <view>`.
419 419 for prefix in ui.configlist('commands', 'show.aliasprefix'):
420 420 for view in showview._table:
421 421 name = '%s%s' % (prefix, view)
422 422
423 423 choice, allcommands = cmdutil.findpossible(name, commands.table,
424 424 strict=True)
425 425
426 426 # This alias is already a command name. Don't set it.
427 427 if name in choice:
428 428 continue
429 429
430 430 # Same for aliases.
431 431 if ui.config('alias', name, None):
432 432 continue
433 433
434 434 ui.setconfig('alias', name, 'show %s' % view, source='show')
435 435
436 436 def longestshortest(repo, revs, minlen=4):
437 437 """Return the length of the longest shortest node to identify revisions.
438 438
439 439 The result of this function can be used with the ``shortest()`` template
440 440 function to ensure that a value is unique and unambiguous for a given
441 441 set of nodes.
442 442
443 443 The number of revisions in the repo is taken into account to prevent
444 444 a numeric node prefix from conflicting with an integer revision number.
445 445 If we fail to do this, a value of e.g. ``10023`` could mean either
446 446 revision 10023 or node ``10023abc...``.
447 447 """
448 448 if not revs:
449 449 return minlen
450 # don't use filtered repo because it's slow. see templater.shortest().
451 450 cl = repo.changelog
452 return max(len(scmutil.shortesthexnodeidprefix(repo.unfiltered(),
453 hex(cl.node(r)),
451 return max(len(scmutil.shortesthexnodeidprefix(repo, hex(cl.node(r)),
454 452 minlen)) for r in revs)
455 453
456 454 # Adjust the docstring of the show command so it shows all registered views.
457 455 # This is a bit hacky because it runs at the end of module load. When moved
458 456 # into core or when another extension wants to provide a view, we'll need
459 457 # to do this more robustly.
460 458 # TODO make this more robust.
461 459 def _updatedocstring():
462 460 longest = max(map(len, showview._table.keys()))
463 461 entries = []
464 462 for key in sorted(showview._table.keys()):
465 463 entries.append(pycompat.sysstr(' %s %s' % (
466 464 key.ljust(longest), showview._table[key]._origdoc)))
467 465
468 466 cmdtable['show'][0].__doc__ = pycompat.sysstr('%s\n\n%s\n ') % (
469 467 cmdtable['show'][0].__doc__.rstrip(),
470 468 pycompat.sysstr('\n\n').join(entries))
471 469
472 470 _updatedocstring()
@@ -1,1552 +1,1555 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28
29 29 from . import (
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 revsetlang,
39 39 similar,
40 40 url,
41 41 util,
42 42 vfs,
43 43 )
44 44
45 45 from .utils import (
46 46 procutil,
47 47 stringutil,
48 48 )
49 49
50 50 if pycompat.iswindows:
51 51 from . import scmwindows as scmplatform
52 52 else:
53 53 from . import scmposix as scmplatform
54 54
55 55 termsize = scmplatform.termsize
56 56
57 57 class status(tuple):
58 58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
59 59 and 'ignored' properties are only relevant to the working copy.
60 60 '''
61 61
62 62 __slots__ = ()
63 63
64 64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
65 65 clean):
66 66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
67 67 ignored, clean))
68 68
69 69 @property
70 70 def modified(self):
71 71 '''files that have been modified'''
72 72 return self[0]
73 73
74 74 @property
75 75 def added(self):
76 76 '''files that have been added'''
77 77 return self[1]
78 78
79 79 @property
80 80 def removed(self):
81 81 '''files that have been removed'''
82 82 return self[2]
83 83
84 84 @property
85 85 def deleted(self):
86 86 '''files that are in the dirstate, but have been deleted from the
87 87 working copy (aka "missing")
88 88 '''
89 89 return self[3]
90 90
91 91 @property
92 92 def unknown(self):
93 93 '''files not in the dirstate that are not ignored'''
94 94 return self[4]
95 95
96 96 @property
97 97 def ignored(self):
98 98 '''files not in the dirstate that are ignored (by _dirignore())'''
99 99 return self[5]
100 100
101 101 @property
102 102 def clean(self):
103 103 '''files that have not been modified'''
104 104 return self[6]
105 105
106 106 def __repr__(self, *args, **kwargs):
107 107 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
108 108 'unknown=%r, ignored=%r, clean=%r>') % self)
109 109
110 110 def itersubrepos(ctx1, ctx2):
111 111 """find subrepos in ctx1 or ctx2"""
112 112 # Create a (subpath, ctx) mapping where we prefer subpaths from
113 113 # ctx1. The subpaths from ctx2 are important when the .hgsub file
114 114 # has been modified (in ctx2) but not yet committed (in ctx1).
115 115 subpaths = dict.fromkeys(ctx2.substate, ctx2)
116 116 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
117 117
118 118 missing = set()
119 119
120 120 for subpath in ctx2.substate:
121 121 if subpath not in ctx1.substate:
122 122 del subpaths[subpath]
123 123 missing.add(subpath)
124 124
125 125 for subpath, ctx in sorted(subpaths.iteritems()):
126 126 yield subpath, ctx.sub(subpath)
127 127
128 128 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
129 129 # status and diff will have an accurate result when it does
130 130 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
131 131 # against itself.
132 132 for subpath in missing:
133 133 yield subpath, ctx2.nullsub(subpath, ctx1)
134 134
135 135 def nochangesfound(ui, repo, excluded=None):
136 136 '''Report no changes for push/pull, excluded is None or a list of
137 137 nodes excluded from the push/pull.
138 138 '''
139 139 secretlist = []
140 140 if excluded:
141 141 for n in excluded:
142 142 ctx = repo[n]
143 143 if ctx.phase() >= phases.secret and not ctx.extinct():
144 144 secretlist.append(n)
145 145
146 146 if secretlist:
147 147 ui.status(_("no changes found (ignored %d secret changesets)\n")
148 148 % len(secretlist))
149 149 else:
150 150 ui.status(_("no changes found\n"))
151 151
152 152 def callcatch(ui, func):
153 153 """call func() with global exception handling
154 154
155 155 return func() if no exception happens. otherwise do some error handling
156 156 and return an exit code accordingly. does not handle all exceptions.
157 157 """
158 158 try:
159 159 try:
160 160 return func()
161 161 except: # re-raises
162 162 ui.traceback()
163 163 raise
164 164 # Global exception handling, alphabetically
165 165 # Mercurial-specific first, followed by built-in and library exceptions
166 166 except error.LockHeld as inst:
167 167 if inst.errno == errno.ETIMEDOUT:
168 168 reason = _('timed out waiting for lock held by %r') % inst.locker
169 169 else:
170 170 reason = _('lock held by %r') % inst.locker
171 171 ui.warn(_("abort: %s: %s\n")
172 172 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
173 173 if not inst.locker:
174 174 ui.warn(_("(lock might be very busy)\n"))
175 175 except error.LockUnavailable as inst:
176 176 ui.warn(_("abort: could not lock %s: %s\n") %
177 177 (inst.desc or stringutil.forcebytestr(inst.filename),
178 178 encoding.strtolocal(inst.strerror)))
179 179 except error.OutOfBandError as inst:
180 180 if inst.args:
181 181 msg = _("abort: remote error:\n")
182 182 else:
183 183 msg = _("abort: remote error\n")
184 184 ui.warn(msg)
185 185 if inst.args:
186 186 ui.warn(''.join(inst.args))
187 187 if inst.hint:
188 188 ui.warn('(%s)\n' % inst.hint)
189 189 except error.RepoError as inst:
190 190 ui.warn(_("abort: %s!\n") % inst)
191 191 if inst.hint:
192 192 ui.warn(_("(%s)\n") % inst.hint)
193 193 except error.ResponseError as inst:
194 194 ui.warn(_("abort: %s") % inst.args[0])
195 195 msg = inst.args[1]
196 196 if isinstance(msg, type(u'')):
197 197 msg = pycompat.sysbytes(msg)
198 198 if not isinstance(msg, bytes):
199 199 ui.warn(" %r\n" % (msg,))
200 200 elif not msg:
201 201 ui.warn(_(" empty string\n"))
202 202 else:
203 203 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
204 204 except error.CensoredNodeError as inst:
205 205 ui.warn(_("abort: file censored %s!\n") % inst)
206 206 except error.RevlogError as inst:
207 207 ui.warn(_("abort: %s!\n") % inst)
208 208 except error.InterventionRequired as inst:
209 209 ui.warn("%s\n" % inst)
210 210 if inst.hint:
211 211 ui.warn(_("(%s)\n") % inst.hint)
212 212 return 1
213 213 except error.WdirUnsupported:
214 214 ui.warn(_("abort: working directory revision cannot be specified\n"))
215 215 except error.Abort as inst:
216 216 ui.warn(_("abort: %s\n") % inst)
217 217 if inst.hint:
218 218 ui.warn(_("(%s)\n") % inst.hint)
219 219 except ImportError as inst:
220 220 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
221 221 m = stringutil.forcebytestr(inst).split()[-1]
222 222 if m in "mpatch bdiff".split():
223 223 ui.warn(_("(did you forget to compile extensions?)\n"))
224 224 elif m in "zlib".split():
225 225 ui.warn(_("(is your Python install correct?)\n"))
226 226 except IOError as inst:
227 227 if util.safehasattr(inst, "code"):
228 228 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
229 229 elif util.safehasattr(inst, "reason"):
230 230 try: # usually it is in the form (errno, strerror)
231 231 reason = inst.reason.args[1]
232 232 except (AttributeError, IndexError):
233 233 # it might be anything, for example a string
234 234 reason = inst.reason
235 235 if isinstance(reason, unicode):
236 236 # SSLError of Python 2.7.9 contains a unicode
237 237 reason = encoding.unitolocal(reason)
238 238 ui.warn(_("abort: error: %s\n") % reason)
239 239 elif (util.safehasattr(inst, "args")
240 240 and inst.args and inst.args[0] == errno.EPIPE):
241 241 pass
242 242 elif getattr(inst, "strerror", None):
243 243 if getattr(inst, "filename", None):
244 244 ui.warn(_("abort: %s: %s\n") % (
245 245 encoding.strtolocal(inst.strerror),
246 246 stringutil.forcebytestr(inst.filename)))
247 247 else:
248 248 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
249 249 else:
250 250 raise
251 251 except OSError as inst:
252 252 if getattr(inst, "filename", None) is not None:
253 253 ui.warn(_("abort: %s: '%s'\n") % (
254 254 encoding.strtolocal(inst.strerror),
255 255 stringutil.forcebytestr(inst.filename)))
256 256 else:
257 257 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 258 except MemoryError:
259 259 ui.warn(_("abort: out of memory\n"))
260 260 except SystemExit as inst:
261 261 # Commands shouldn't sys.exit directly, but give a return code.
262 262 # Just in case catch this and and pass exit code to caller.
263 263 return inst.code
264 264 except socket.error as inst:
265 265 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
266 266
267 267 return -1
268 268
269 269 def checknewlabel(repo, lbl, kind):
270 270 # Do not use the "kind" parameter in ui output.
271 271 # It makes strings difficult to translate.
272 272 if lbl in ['tip', '.', 'null']:
273 273 raise error.Abort(_("the name '%s' is reserved") % lbl)
274 274 for c in (':', '\0', '\n', '\r'):
275 275 if c in lbl:
276 276 raise error.Abort(
277 277 _("%r cannot be used in a name") % pycompat.bytestr(c))
278 278 try:
279 279 int(lbl)
280 280 raise error.Abort(_("cannot use an integer as a name"))
281 281 except ValueError:
282 282 pass
283 283 if lbl.strip() != lbl:
284 284 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
285 285
286 286 def checkfilename(f):
287 287 '''Check that the filename f is an acceptable filename for a tracked file'''
288 288 if '\r' in f or '\n' in f:
289 289 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
290 290
291 291 def checkportable(ui, f):
292 292 '''Check if filename f is portable and warn or abort depending on config'''
293 293 checkfilename(f)
294 294 abort, warn = checkportabilityalert(ui)
295 295 if abort or warn:
296 296 msg = util.checkwinfilename(f)
297 297 if msg:
298 298 msg = "%s: %s" % (msg, procutil.shellquote(f))
299 299 if abort:
300 300 raise error.Abort(msg)
301 301 ui.warn(_("warning: %s\n") % msg)
302 302
303 303 def checkportabilityalert(ui):
304 304 '''check if the user's config requests nothing, a warning, or abort for
305 305 non-portable filenames'''
306 306 val = ui.config('ui', 'portablefilenames')
307 307 lval = val.lower()
308 308 bval = stringutil.parsebool(val)
309 309 abort = pycompat.iswindows or lval == 'abort'
310 310 warn = bval or lval == 'warn'
311 311 if bval is None and not (warn or abort or lval == 'ignore'):
312 312 raise error.ConfigError(
313 313 _("ui.portablefilenames value is invalid ('%s')") % val)
314 314 return abort, warn
315 315
316 316 class casecollisionauditor(object):
317 317 def __init__(self, ui, abort, dirstate):
318 318 self._ui = ui
319 319 self._abort = abort
320 320 allfiles = '\0'.join(dirstate._map)
321 321 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
322 322 self._dirstate = dirstate
323 323 # The purpose of _newfiles is so that we don't complain about
324 324 # case collisions if someone were to call this object with the
325 325 # same filename twice.
326 326 self._newfiles = set()
327 327
328 328 def __call__(self, f):
329 329 if f in self._newfiles:
330 330 return
331 331 fl = encoding.lower(f)
332 332 if fl in self._loweredfiles and f not in self._dirstate:
333 333 msg = _('possible case-folding collision for %s') % f
334 334 if self._abort:
335 335 raise error.Abort(msg)
336 336 self._ui.warn(_("warning: %s\n") % msg)
337 337 self._loweredfiles.add(fl)
338 338 self._newfiles.add(f)
339 339
340 340 def filteredhash(repo, maxrev):
341 341 """build hash of filtered revisions in the current repoview.
342 342
343 343 Multiple caches perform up-to-date validation by checking that the
344 344 tiprev and tipnode stored in the cache file match the current repository.
345 345 However, this is not sufficient for validating repoviews because the set
346 346 of revisions in the view may change without the repository tiprev and
347 347 tipnode changing.
348 348
349 349 This function hashes all the revs filtered from the view and returns
350 350 that SHA-1 digest.
351 351 """
352 352 cl = repo.changelog
353 353 if not cl.filteredrevs:
354 354 return None
355 355 key = None
356 356 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
357 357 if revs:
358 358 s = hashlib.sha1()
359 359 for rev in revs:
360 360 s.update('%d;' % rev)
361 361 key = s.digest()
362 362 return key
363 363
364 364 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
365 365 '''yield every hg repository under path, always recursively.
366 366 The recurse flag will only control recursion into repo working dirs'''
367 367 def errhandler(err):
368 368 if err.filename == path:
369 369 raise err
370 370 samestat = getattr(os.path, 'samestat', None)
371 371 if followsym and samestat is not None:
372 372 def adddir(dirlst, dirname):
373 373 dirstat = os.stat(dirname)
374 374 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
375 375 if not match:
376 376 dirlst.append(dirstat)
377 377 return not match
378 378 else:
379 379 followsym = False
380 380
381 381 if (seen_dirs is None) and followsym:
382 382 seen_dirs = []
383 383 adddir(seen_dirs, path)
384 384 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
385 385 dirs.sort()
386 386 if '.hg' in dirs:
387 387 yield root # found a repository
388 388 qroot = os.path.join(root, '.hg', 'patches')
389 389 if os.path.isdir(os.path.join(qroot, '.hg')):
390 390 yield qroot # we have a patch queue repo here
391 391 if recurse:
392 392 # avoid recursing inside the .hg directory
393 393 dirs.remove('.hg')
394 394 else:
395 395 dirs[:] = [] # don't descend further
396 396 elif followsym:
397 397 newdirs = []
398 398 for d in dirs:
399 399 fname = os.path.join(root, d)
400 400 if adddir(seen_dirs, fname):
401 401 if os.path.islink(fname):
402 402 for hgname in walkrepos(fname, True, seen_dirs):
403 403 yield hgname
404 404 else:
405 405 newdirs.append(d)
406 406 dirs[:] = newdirs
407 407
408 408 def binnode(ctx):
409 409 """Return binary node id for a given basectx"""
410 410 node = ctx.node()
411 411 if node is None:
412 412 return wdirid
413 413 return node
414 414
415 415 def intrev(ctx):
416 416 """Return integer for a given basectx that can be used in comparison or
417 417 arithmetic operation"""
418 418 rev = ctx.rev()
419 419 if rev is None:
420 420 return wdirrev
421 421 return rev
422 422
423 423 def formatchangeid(ctx):
424 424 """Format changectx as '{rev}:{node|formatnode}', which is the default
425 425 template provided by logcmdutil.changesettemplater"""
426 426 repo = ctx.repo()
427 427 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
428 428
429 429 def formatrevnode(ui, rev, node):
430 430 """Format given revision and node depending on the current verbosity"""
431 431 if ui.debugflag:
432 432 hexfunc = hex
433 433 else:
434 434 hexfunc = short
435 435 return '%d:%s' % (rev, hexfunc(node))
436 436
437 437 def resolvehexnodeidprefix(repo, prefix):
438 438 # Uses unfiltered repo because it's faster when prefix is ambiguous/
439 # This matches the "shortest" template function.
439 # This matches the shortesthexnodeidprefix() function below.
440 440 node = repo.unfiltered().changelog._partialmatch(prefix)
441 441 if node is None:
442 442 return
443 443 repo.changelog.rev(node) # make sure node isn't filtered
444 444 return node
445 445
446 446 def shortesthexnodeidprefix(repo, hexnode, minlength=1):
447 447 """Find the shortest unambiguous prefix that matches hexnode."""
448 return repo.changelog.shortest(hexnode, minlength)
448 # _partialmatch() of filtered changelog could take O(len(repo)) time,
449 # which would be unacceptably slow. so we look for hash collision in
450 # unfiltered space, which means some hashes may be slightly longer.
451 return repo.unfiltered().changelog.shortest(hexnode, minlength)
449 452
450 453 def isrevsymbol(repo, symbol):
451 454 """Checks if a symbol exists in the repo.
452 455
453 456 See revsymbol() for details. Raises error.LookupError if the symbol is an
454 457 ambiguous nodeid prefix.
455 458 """
456 459 try:
457 460 revsymbol(repo, symbol)
458 461 return True
459 462 except error.RepoLookupError:
460 463 return False
461 464
462 465 def revsymbol(repo, symbol):
463 466 """Returns a context given a single revision symbol (as string).
464 467
465 468 This is similar to revsingle(), but accepts only a single revision symbol,
466 469 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
467 470 not "max(public())".
468 471 """
469 472 if not isinstance(symbol, bytes):
470 473 msg = ("symbol (%s of type %s) was not a string, did you mean "
471 474 "repo[symbol]?" % (symbol, type(symbol)))
472 475 raise error.ProgrammingError(msg)
473 476 try:
474 477 if symbol in ('.', 'tip', 'null'):
475 478 return repo[symbol]
476 479
477 480 try:
478 481 r = int(symbol)
479 482 if '%d' % r != symbol:
480 483 raise ValueError
481 484 l = len(repo.changelog)
482 485 if r < 0:
483 486 r += l
484 487 if r < 0 or r >= l and r != wdirrev:
485 488 raise ValueError
486 489 return repo[r]
487 490 except error.FilteredIndexError:
488 491 raise
489 492 except (ValueError, OverflowError, IndexError):
490 493 pass
491 494
492 495 if len(symbol) == 40:
493 496 try:
494 497 node = bin(symbol)
495 498 rev = repo.changelog.rev(node)
496 499 return repo[rev]
497 500 except error.FilteredLookupError:
498 501 raise
499 502 except (TypeError, LookupError):
500 503 pass
501 504
502 505 # look up bookmarks through the name interface
503 506 try:
504 507 node = repo.names.singlenode(repo, symbol)
505 508 rev = repo.changelog.rev(node)
506 509 return repo[rev]
507 510 except KeyError:
508 511 pass
509 512
510 513 node = resolvehexnodeidprefix(repo, symbol)
511 514 if node is not None:
512 515 rev = repo.changelog.rev(node)
513 516 return repo[rev]
514 517
515 518 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
516 519
517 520 except error.WdirUnsupported:
518 521 return repo[None]
519 522 except (error.FilteredIndexError, error.FilteredLookupError,
520 523 error.FilteredRepoLookupError):
521 524 raise _filterederror(repo, symbol)
522 525
523 526 def _filterederror(repo, changeid):
524 527 """build an exception to be raised about a filtered changeid
525 528
526 529 This is extracted in a function to help extensions (eg: evolve) to
527 530 experiment with various message variants."""
528 531 if repo.filtername.startswith('visible'):
529 532
530 533 # Check if the changeset is obsolete
531 534 unfilteredrepo = repo.unfiltered()
532 535 ctx = revsymbol(unfilteredrepo, changeid)
533 536
534 537 # If the changeset is obsolete, enrich the message with the reason
535 538 # that made this changeset not visible
536 539 if ctx.obsolete():
537 540 msg = obsutil._getfilteredreason(repo, changeid, ctx)
538 541 else:
539 542 msg = _("hidden revision '%s'") % changeid
540 543
541 544 hint = _('use --hidden to access hidden revisions')
542 545
543 546 return error.FilteredRepoLookupError(msg, hint=hint)
544 547 msg = _("filtered revision '%s' (not in '%s' subset)")
545 548 msg %= (changeid, repo.filtername)
546 549 return error.FilteredRepoLookupError(msg)
547 550
548 551 def revsingle(repo, revspec, default='.', localalias=None):
549 552 if not revspec and revspec != 0:
550 553 return repo[default]
551 554
552 555 l = revrange(repo, [revspec], localalias=localalias)
553 556 if not l:
554 557 raise error.Abort(_('empty revision set'))
555 558 return repo[l.last()]
556 559
557 560 def _pairspec(revspec):
558 561 tree = revsetlang.parse(revspec)
559 562 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
560 563
561 564 def revpairnodes(repo, revs):
562 565 repo.ui.deprecwarn("revpairnodes is deprecated, please use revpair", "4.6")
563 566 ctx1, ctx2 = revpair(repo, revs)
564 567 return ctx1.node(), ctx2.node()
565 568
566 569 def revpair(repo, revs):
567 570 if not revs:
568 571 return repo['.'], repo[None]
569 572
570 573 l = revrange(repo, revs)
571 574
572 575 if not l:
573 576 first = second = None
574 577 elif l.isascending():
575 578 first = l.min()
576 579 second = l.max()
577 580 elif l.isdescending():
578 581 first = l.max()
579 582 second = l.min()
580 583 else:
581 584 first = l.first()
582 585 second = l.last()
583 586
584 587 if first is None:
585 588 raise error.Abort(_('empty revision range'))
586 589 if (first == second and len(revs) >= 2
587 590 and not all(revrange(repo, [r]) for r in revs)):
588 591 raise error.Abort(_('empty revision on one side of range'))
589 592
590 593 # if top-level is range expression, the result must always be a pair
591 594 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
592 595 return repo[first], repo[None]
593 596
594 597 return repo[first], repo[second]
595 598
596 599 def revrange(repo, specs, localalias=None):
597 600 """Execute 1 to many revsets and return the union.
598 601
599 602 This is the preferred mechanism for executing revsets using user-specified
600 603 config options, such as revset aliases.
601 604
602 605 The revsets specified by ``specs`` will be executed via a chained ``OR``
603 606 expression. If ``specs`` is empty, an empty result is returned.
604 607
605 608 ``specs`` can contain integers, in which case they are assumed to be
606 609 revision numbers.
607 610
608 611 It is assumed the revsets are already formatted. If you have arguments
609 612 that need to be expanded in the revset, call ``revsetlang.formatspec()``
610 613 and pass the result as an element of ``specs``.
611 614
612 615 Specifying a single revset is allowed.
613 616
614 617 Returns a ``revset.abstractsmartset`` which is a list-like interface over
615 618 integer revisions.
616 619 """
617 620 allspecs = []
618 621 for spec in specs:
619 622 if isinstance(spec, int):
620 623 spec = revsetlang.formatspec('rev(%d)', spec)
621 624 allspecs.append(spec)
622 625 return repo.anyrevs(allspecs, user=True, localalias=localalias)
623 626
624 627 def meaningfulparents(repo, ctx):
625 628 """Return list of meaningful (or all if debug) parentrevs for rev.
626 629
627 630 For merges (two non-nullrev revisions) both parents are meaningful.
628 631 Otherwise the first parent revision is considered meaningful if it
629 632 is not the preceding revision.
630 633 """
631 634 parents = ctx.parents()
632 635 if len(parents) > 1:
633 636 return parents
634 637 if repo.ui.debugflag:
635 638 return [parents[0], repo['null']]
636 639 if parents[0].rev() >= intrev(ctx) - 1:
637 640 return []
638 641 return parents
639 642
640 643 def expandpats(pats):
641 644 '''Expand bare globs when running on windows.
642 645 On posix we assume it already has already been done by sh.'''
643 646 if not util.expandglobs:
644 647 return list(pats)
645 648 ret = []
646 649 for kindpat in pats:
647 650 kind, pat = matchmod._patsplit(kindpat, None)
648 651 if kind is None:
649 652 try:
650 653 globbed = glob.glob(pat)
651 654 except re.error:
652 655 globbed = [pat]
653 656 if globbed:
654 657 ret.extend(globbed)
655 658 continue
656 659 ret.append(kindpat)
657 660 return ret
658 661
659 662 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
660 663 badfn=None):
661 664 '''Return a matcher and the patterns that were used.
662 665 The matcher will warn about bad matches, unless an alternate badfn callback
663 666 is provided.'''
664 667 if pats == ("",):
665 668 pats = []
666 669 if opts is None:
667 670 opts = {}
668 671 if not globbed and default == 'relpath':
669 672 pats = expandpats(pats or [])
670 673
671 674 def bad(f, msg):
672 675 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
673 676
674 677 if badfn is None:
675 678 badfn = bad
676 679
677 680 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
678 681 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
679 682
680 683 if m.always():
681 684 pats = []
682 685 return m, pats
683 686
684 687 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
685 688 badfn=None):
686 689 '''Return a matcher that will warn about bad matches.'''
687 690 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
688 691
689 692 def matchall(repo):
690 693 '''Return a matcher that will efficiently match everything.'''
691 694 return matchmod.always(repo.root, repo.getcwd())
692 695
693 696 def matchfiles(repo, files, badfn=None):
694 697 '''Return a matcher that will efficiently match exactly these files.'''
695 698 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
696 699
697 700 def parsefollowlinespattern(repo, rev, pat, msg):
698 701 """Return a file name from `pat` pattern suitable for usage in followlines
699 702 logic.
700 703 """
701 704 if not matchmod.patkind(pat):
702 705 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
703 706 else:
704 707 ctx = repo[rev]
705 708 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
706 709 files = [f for f in ctx if m(f)]
707 710 if len(files) != 1:
708 711 raise error.ParseError(msg)
709 712 return files[0]
710 713
711 714 def origpath(ui, repo, filepath):
712 715 '''customize where .orig files are created
713 716
714 717 Fetch user defined path from config file: [ui] origbackuppath = <path>
715 718 Fall back to default (filepath with .orig suffix) if not specified
716 719 '''
717 720 origbackuppath = ui.config('ui', 'origbackuppath')
718 721 if not origbackuppath:
719 722 return filepath + ".orig"
720 723
721 724 # Convert filepath from an absolute path into a path inside the repo.
722 725 filepathfromroot = util.normpath(os.path.relpath(filepath,
723 726 start=repo.root))
724 727
725 728 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
726 729 origbackupdir = origvfs.dirname(filepathfromroot)
727 730 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
728 731 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
729 732
730 733 # Remove any files that conflict with the backup file's path
731 734 for f in reversed(list(util.finddirs(filepathfromroot))):
732 735 if origvfs.isfileorlink(f):
733 736 ui.note(_('removing conflicting file: %s\n')
734 737 % origvfs.join(f))
735 738 origvfs.unlink(f)
736 739 break
737 740
738 741 origvfs.makedirs(origbackupdir)
739 742
740 743 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
741 744 ui.note(_('removing conflicting directory: %s\n')
742 745 % origvfs.join(filepathfromroot))
743 746 origvfs.rmtree(filepathfromroot, forcibly=True)
744 747
745 748 return origvfs.join(filepathfromroot)
746 749
747 750 class _containsnode(object):
748 751 """proxy __contains__(node) to container.__contains__ which accepts revs"""
749 752
750 753 def __init__(self, repo, revcontainer):
751 754 self._torev = repo.changelog.rev
752 755 self._revcontains = revcontainer.__contains__
753 756
754 757 def __contains__(self, node):
755 758 return self._revcontains(self._torev(node))
756 759
757 760 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
758 761 """do common cleanups when old nodes are replaced by new nodes
759 762
760 763 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
761 764 (we might also want to move working directory parent in the future)
762 765
763 766 By default, bookmark moves are calculated automatically from 'replacements',
764 767 but 'moves' can be used to override that. Also, 'moves' may include
765 768 additional bookmark moves that should not have associated obsmarkers.
766 769
767 770 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
768 771 have replacements. operation is a string, like "rebase".
769 772
770 773 metadata is dictionary containing metadata to be stored in obsmarker if
771 774 obsolescence is enabled.
772 775 """
773 776 if not replacements and not moves:
774 777 return
775 778
776 779 # translate mapping's other forms
777 780 if not util.safehasattr(replacements, 'items'):
778 781 replacements = {n: () for n in replacements}
779 782
780 783 # Calculate bookmark movements
781 784 if moves is None:
782 785 moves = {}
783 786 # Unfiltered repo is needed since nodes in replacements might be hidden.
784 787 unfi = repo.unfiltered()
785 788 for oldnode, newnodes in replacements.items():
786 789 if oldnode in moves:
787 790 continue
788 791 if len(newnodes) > 1:
789 792 # usually a split, take the one with biggest rev number
790 793 newnode = next(unfi.set('max(%ln)', newnodes)).node()
791 794 elif len(newnodes) == 0:
792 795 # move bookmark backwards
793 796 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
794 797 list(replacements)))
795 798 if roots:
796 799 newnode = roots[0].node()
797 800 else:
798 801 newnode = nullid
799 802 else:
800 803 newnode = newnodes[0]
801 804 moves[oldnode] = newnode
802 805
803 806 with repo.transaction('cleanup') as tr:
804 807 # Move bookmarks
805 808 bmarks = repo._bookmarks
806 809 bmarkchanges = []
807 810 allnewnodes = [n for ns in replacements.values() for n in ns]
808 811 for oldnode, newnode in moves.items():
809 812 oldbmarks = repo.nodebookmarks(oldnode)
810 813 if not oldbmarks:
811 814 continue
812 815 from . import bookmarks # avoid import cycle
813 816 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
814 817 (util.rapply(pycompat.maybebytestr, oldbmarks),
815 818 hex(oldnode), hex(newnode)))
816 819 # Delete divergent bookmarks being parents of related newnodes
817 820 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
818 821 allnewnodes, newnode, oldnode)
819 822 deletenodes = _containsnode(repo, deleterevs)
820 823 for name in oldbmarks:
821 824 bmarkchanges.append((name, newnode))
822 825 for b in bookmarks.divergent2delete(repo, deletenodes, name):
823 826 bmarkchanges.append((b, None))
824 827
825 828 if bmarkchanges:
826 829 bmarks.applychanges(repo, tr, bmarkchanges)
827 830
828 831 # Obsolete or strip nodes
829 832 if obsolete.isenabled(repo, obsolete.createmarkersopt):
830 833 # If a node is already obsoleted, and we want to obsolete it
831 834 # without a successor, skip that obssolete request since it's
832 835 # unnecessary. That's the "if s or not isobs(n)" check below.
833 836 # Also sort the node in topology order, that might be useful for
834 837 # some obsstore logic.
835 838 # NOTE: the filtering and sorting might belong to createmarkers.
836 839 isobs = unfi.obsstore.successors.__contains__
837 840 torev = unfi.changelog.rev
838 841 sortfunc = lambda ns: torev(ns[0])
839 842 rels = [(unfi[n], tuple(unfi[m] for m in s))
840 843 for n, s in sorted(replacements.items(), key=sortfunc)
841 844 if s or not isobs(n)]
842 845 if rels:
843 846 obsolete.createmarkers(repo, rels, operation=operation,
844 847 metadata=metadata)
845 848 else:
846 849 from . import repair # avoid import cycle
847 850 tostrip = list(replacements)
848 851 if tostrip:
849 852 repair.delayedstrip(repo.ui, repo, tostrip, operation)
850 853
851 854 def addremove(repo, matcher, prefix, opts=None):
852 855 if opts is None:
853 856 opts = {}
854 857 m = matcher
855 858 dry_run = opts.get('dry_run')
856 859 try:
857 860 similarity = float(opts.get('similarity') or 0)
858 861 except ValueError:
859 862 raise error.Abort(_('similarity must be a number'))
860 863 if similarity < 0 or similarity > 100:
861 864 raise error.Abort(_('similarity must be between 0 and 100'))
862 865 similarity /= 100.0
863 866
864 867 ret = 0
865 868 join = lambda f: os.path.join(prefix, f)
866 869
867 870 wctx = repo[None]
868 871 for subpath in sorted(wctx.substate):
869 872 submatch = matchmod.subdirmatcher(subpath, m)
870 873 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
871 874 sub = wctx.sub(subpath)
872 875 try:
873 876 if sub.addremove(submatch, prefix, opts):
874 877 ret = 1
875 878 except error.LookupError:
876 879 repo.ui.status(_("skipping missing subrepository: %s\n")
877 880 % join(subpath))
878 881
879 882 rejected = []
880 883 def badfn(f, msg):
881 884 if f in m.files():
882 885 m.bad(f, msg)
883 886 rejected.append(f)
884 887
885 888 badmatch = matchmod.badmatch(m, badfn)
886 889 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
887 890 badmatch)
888 891
889 892 unknownset = set(unknown + forgotten)
890 893 toprint = unknownset.copy()
891 894 toprint.update(deleted)
892 895 for abs in sorted(toprint):
893 896 if repo.ui.verbose or not m.exact(abs):
894 897 if abs in unknownset:
895 898 status = _('adding %s\n') % m.uipath(abs)
896 899 else:
897 900 status = _('removing %s\n') % m.uipath(abs)
898 901 repo.ui.status(status)
899 902
900 903 renames = _findrenames(repo, m, added + unknown, removed + deleted,
901 904 similarity)
902 905
903 906 if not dry_run:
904 907 _markchanges(repo, unknown + forgotten, deleted, renames)
905 908
906 909 for f in rejected:
907 910 if f in m.files():
908 911 return 1
909 912 return ret
910 913
911 914 def marktouched(repo, files, similarity=0.0):
912 915 '''Assert that files have somehow been operated upon. files are relative to
913 916 the repo root.'''
914 917 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
915 918 rejected = []
916 919
917 920 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
918 921
919 922 if repo.ui.verbose:
920 923 unknownset = set(unknown + forgotten)
921 924 toprint = unknownset.copy()
922 925 toprint.update(deleted)
923 926 for abs in sorted(toprint):
924 927 if abs in unknownset:
925 928 status = _('adding %s\n') % abs
926 929 else:
927 930 status = _('removing %s\n') % abs
928 931 repo.ui.status(status)
929 932
930 933 renames = _findrenames(repo, m, added + unknown, removed + deleted,
931 934 similarity)
932 935
933 936 _markchanges(repo, unknown + forgotten, deleted, renames)
934 937
935 938 for f in rejected:
936 939 if f in m.files():
937 940 return 1
938 941 return 0
939 942
940 943 def _interestingfiles(repo, matcher):
941 944 '''Walk dirstate with matcher, looking for files that addremove would care
942 945 about.
943 946
944 947 This is different from dirstate.status because it doesn't care about
945 948 whether files are modified or clean.'''
946 949 added, unknown, deleted, removed, forgotten = [], [], [], [], []
947 950 audit_path = pathutil.pathauditor(repo.root, cached=True)
948 951
949 952 ctx = repo[None]
950 953 dirstate = repo.dirstate
951 954 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
952 955 unknown=True, ignored=False, full=False)
953 956 for abs, st in walkresults.iteritems():
954 957 dstate = dirstate[abs]
955 958 if dstate == '?' and audit_path.check(abs):
956 959 unknown.append(abs)
957 960 elif dstate != 'r' and not st:
958 961 deleted.append(abs)
959 962 elif dstate == 'r' and st:
960 963 forgotten.append(abs)
961 964 # for finding renames
962 965 elif dstate == 'r' and not st:
963 966 removed.append(abs)
964 967 elif dstate == 'a':
965 968 added.append(abs)
966 969
967 970 return added, unknown, deleted, removed, forgotten
968 971
969 972 def _findrenames(repo, matcher, added, removed, similarity):
970 973 '''Find renames from removed files to added ones.'''
971 974 renames = {}
972 975 if similarity > 0:
973 976 for old, new, score in similar.findrenames(repo, added, removed,
974 977 similarity):
975 978 if (repo.ui.verbose or not matcher.exact(old)
976 979 or not matcher.exact(new)):
977 980 repo.ui.status(_('recording removal of %s as rename to %s '
978 981 '(%d%% similar)\n') %
979 982 (matcher.rel(old), matcher.rel(new),
980 983 score * 100))
981 984 renames[new] = old
982 985 return renames
983 986
984 987 def _markchanges(repo, unknown, deleted, renames):
985 988 '''Marks the files in unknown as added, the files in deleted as removed,
986 989 and the files in renames as copied.'''
987 990 wctx = repo[None]
988 991 with repo.wlock():
989 992 wctx.forget(deleted)
990 993 wctx.add(unknown)
991 994 for new, old in renames.iteritems():
992 995 wctx.copy(old, new)
993 996
994 997 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
995 998 """Update the dirstate to reflect the intent of copying src to dst. For
996 999 different reasons it might not end with dst being marked as copied from src.
997 1000 """
998 1001 origsrc = repo.dirstate.copied(src) or src
999 1002 if dst == origsrc: # copying back a copy?
1000 1003 if repo.dirstate[dst] not in 'mn' and not dryrun:
1001 1004 repo.dirstate.normallookup(dst)
1002 1005 else:
1003 1006 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1004 1007 if not ui.quiet:
1005 1008 ui.warn(_("%s has not been committed yet, so no copy "
1006 1009 "data will be stored for %s.\n")
1007 1010 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1008 1011 if repo.dirstate[dst] in '?r' and not dryrun:
1009 1012 wctx.add([dst])
1010 1013 elif not dryrun:
1011 1014 wctx.copy(origsrc, dst)
1012 1015
1013 1016 def readrequires(opener, supported):
1014 1017 '''Reads and parses .hg/requires and checks if all entries found
1015 1018 are in the list of supported features.'''
1016 1019 requirements = set(opener.read("requires").splitlines())
1017 1020 missings = []
1018 1021 for r in requirements:
1019 1022 if r not in supported:
1020 1023 if not r or not r[0:1].isalnum():
1021 1024 raise error.RequirementError(_(".hg/requires file is corrupt"))
1022 1025 missings.append(r)
1023 1026 missings.sort()
1024 1027 if missings:
1025 1028 raise error.RequirementError(
1026 1029 _("repository requires features unknown to this Mercurial: %s")
1027 1030 % " ".join(missings),
1028 1031 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1029 1032 " for more information"))
1030 1033 return requirements
1031 1034
1032 1035 def writerequires(opener, requirements):
1033 1036 with opener('requires', 'w') as fp:
1034 1037 for r in sorted(requirements):
1035 1038 fp.write("%s\n" % r)
1036 1039
1037 1040 class filecachesubentry(object):
1038 1041 def __init__(self, path, stat):
1039 1042 self.path = path
1040 1043 self.cachestat = None
1041 1044 self._cacheable = None
1042 1045
1043 1046 if stat:
1044 1047 self.cachestat = filecachesubentry.stat(self.path)
1045 1048
1046 1049 if self.cachestat:
1047 1050 self._cacheable = self.cachestat.cacheable()
1048 1051 else:
1049 1052 # None means we don't know yet
1050 1053 self._cacheable = None
1051 1054
1052 1055 def refresh(self):
1053 1056 if self.cacheable():
1054 1057 self.cachestat = filecachesubentry.stat(self.path)
1055 1058
1056 1059 def cacheable(self):
1057 1060 if self._cacheable is not None:
1058 1061 return self._cacheable
1059 1062
1060 1063 # we don't know yet, assume it is for now
1061 1064 return True
1062 1065
1063 1066 def changed(self):
1064 1067 # no point in going further if we can't cache it
1065 1068 if not self.cacheable():
1066 1069 return True
1067 1070
1068 1071 newstat = filecachesubentry.stat(self.path)
1069 1072
1070 1073 # we may not know if it's cacheable yet, check again now
1071 1074 if newstat and self._cacheable is None:
1072 1075 self._cacheable = newstat.cacheable()
1073 1076
1074 1077 # check again
1075 1078 if not self._cacheable:
1076 1079 return True
1077 1080
1078 1081 if self.cachestat != newstat:
1079 1082 self.cachestat = newstat
1080 1083 return True
1081 1084 else:
1082 1085 return False
1083 1086
1084 1087 @staticmethod
1085 1088 def stat(path):
1086 1089 try:
1087 1090 return util.cachestat(path)
1088 1091 except OSError as e:
1089 1092 if e.errno != errno.ENOENT:
1090 1093 raise
1091 1094
1092 1095 class filecacheentry(object):
1093 1096 def __init__(self, paths, stat=True):
1094 1097 self._entries = []
1095 1098 for path in paths:
1096 1099 self._entries.append(filecachesubentry(path, stat))
1097 1100
1098 1101 def changed(self):
1099 1102 '''true if any entry has changed'''
1100 1103 for entry in self._entries:
1101 1104 if entry.changed():
1102 1105 return True
1103 1106 return False
1104 1107
1105 1108 def refresh(self):
1106 1109 for entry in self._entries:
1107 1110 entry.refresh()
1108 1111
1109 1112 class filecache(object):
1110 1113 '''A property like decorator that tracks files under .hg/ for updates.
1111 1114
1112 1115 Records stat info when called in _filecache.
1113 1116
1114 1117 On subsequent calls, compares old stat info with new info, and recreates the
1115 1118 object when any of the files changes, updating the new stat info in
1116 1119 _filecache.
1117 1120
1118 1121 Mercurial either atomic renames or appends for files under .hg,
1119 1122 so to ensure the cache is reliable we need the filesystem to be able
1120 1123 to tell us if a file has been replaced. If it can't, we fallback to
1121 1124 recreating the object on every call (essentially the same behavior as
1122 1125 propertycache).
1123 1126
1124 1127 '''
1125 1128 def __init__(self, *paths):
1126 1129 self.paths = paths
1127 1130
1128 1131 def join(self, obj, fname):
1129 1132 """Used to compute the runtime path of a cached file.
1130 1133
1131 1134 Users should subclass filecache and provide their own version of this
1132 1135 function to call the appropriate join function on 'obj' (an instance
1133 1136 of the class that its member function was decorated).
1134 1137 """
1135 1138 raise NotImplementedError
1136 1139
1137 1140 def __call__(self, func):
1138 1141 self.func = func
1139 1142 self.name = func.__name__.encode('ascii')
1140 1143 return self
1141 1144
1142 1145 def __get__(self, obj, type=None):
1143 1146 # if accessed on the class, return the descriptor itself.
1144 1147 if obj is None:
1145 1148 return self
1146 1149 # do we need to check if the file changed?
1147 1150 if self.name in obj.__dict__:
1148 1151 assert self.name in obj._filecache, self.name
1149 1152 return obj.__dict__[self.name]
1150 1153
1151 1154 entry = obj._filecache.get(self.name)
1152 1155
1153 1156 if entry:
1154 1157 if entry.changed():
1155 1158 entry.obj = self.func(obj)
1156 1159 else:
1157 1160 paths = [self.join(obj, path) for path in self.paths]
1158 1161
1159 1162 # We stat -before- creating the object so our cache doesn't lie if
1160 1163 # a writer modified between the time we read and stat
1161 1164 entry = filecacheentry(paths, True)
1162 1165 entry.obj = self.func(obj)
1163 1166
1164 1167 obj._filecache[self.name] = entry
1165 1168
1166 1169 obj.__dict__[self.name] = entry.obj
1167 1170 return entry.obj
1168 1171
1169 1172 def __set__(self, obj, value):
1170 1173 if self.name not in obj._filecache:
1171 1174 # we add an entry for the missing value because X in __dict__
1172 1175 # implies X in _filecache
1173 1176 paths = [self.join(obj, path) for path in self.paths]
1174 1177 ce = filecacheentry(paths, False)
1175 1178 obj._filecache[self.name] = ce
1176 1179 else:
1177 1180 ce = obj._filecache[self.name]
1178 1181
1179 1182 ce.obj = value # update cached copy
1180 1183 obj.__dict__[self.name] = value # update copy returned by obj.x
1181 1184
1182 1185 def __delete__(self, obj):
1183 1186 try:
1184 1187 del obj.__dict__[self.name]
1185 1188 except KeyError:
1186 1189 raise AttributeError(self.name)
1187 1190
1188 1191 def extdatasource(repo, source):
1189 1192 """Gather a map of rev -> value dict from the specified source
1190 1193
1191 1194 A source spec is treated as a URL, with a special case shell: type
1192 1195 for parsing the output from a shell command.
1193 1196
1194 1197 The data is parsed as a series of newline-separated records where
1195 1198 each record is a revision specifier optionally followed by a space
1196 1199 and a freeform string value. If the revision is known locally, it
1197 1200 is converted to a rev, otherwise the record is skipped.
1198 1201
1199 1202 Note that both key and value are treated as UTF-8 and converted to
1200 1203 the local encoding. This allows uniformity between local and
1201 1204 remote data sources.
1202 1205 """
1203 1206
1204 1207 spec = repo.ui.config("extdata", source)
1205 1208 if not spec:
1206 1209 raise error.Abort(_("unknown extdata source '%s'") % source)
1207 1210
1208 1211 data = {}
1209 1212 src = proc = None
1210 1213 try:
1211 1214 if spec.startswith("shell:"):
1212 1215 # external commands should be run relative to the repo root
1213 1216 cmd = spec[6:]
1214 1217 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1215 1218 close_fds=procutil.closefds,
1216 1219 stdout=subprocess.PIPE, cwd=repo.root)
1217 1220 src = proc.stdout
1218 1221 else:
1219 1222 # treat as a URL or file
1220 1223 src = url.open(repo.ui, spec)
1221 1224 for l in src:
1222 1225 if " " in l:
1223 1226 k, v = l.strip().split(" ", 1)
1224 1227 else:
1225 1228 k, v = l.strip(), ""
1226 1229
1227 1230 k = encoding.tolocal(k)
1228 1231 try:
1229 1232 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1230 1233 except (error.LookupError, error.RepoLookupError):
1231 1234 pass # we ignore data for nodes that don't exist locally
1232 1235 finally:
1233 1236 if proc:
1234 1237 proc.communicate()
1235 1238 if src:
1236 1239 src.close()
1237 1240 if proc and proc.returncode != 0:
1238 1241 raise error.Abort(_("extdata command '%s' failed: %s")
1239 1242 % (cmd, procutil.explainexit(proc.returncode)))
1240 1243
1241 1244 return data
1242 1245
1243 1246 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1244 1247 if lock is None:
1245 1248 raise error.LockInheritanceContractViolation(
1246 1249 'lock can only be inherited while held')
1247 1250 if environ is None:
1248 1251 environ = {}
1249 1252 with lock.inherit() as locker:
1250 1253 environ[envvar] = locker
1251 1254 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1252 1255
1253 1256 def wlocksub(repo, cmd, *args, **kwargs):
1254 1257 """run cmd as a subprocess that allows inheriting repo's wlock
1255 1258
1256 1259 This can only be called while the wlock is held. This takes all the
1257 1260 arguments that ui.system does, and returns the exit code of the
1258 1261 subprocess."""
1259 1262 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1260 1263 **kwargs)
1261 1264
1262 1265 def gdinitconfig(ui):
1263 1266 """helper function to know if a repo should be created as general delta
1264 1267 """
1265 1268 # experimental config: format.generaldelta
1266 1269 return (ui.configbool('format', 'generaldelta')
1267 1270 or ui.configbool('format', 'usegeneraldelta'))
1268 1271
1269 1272 def gddeltaconfig(ui):
1270 1273 """helper function to know if incoming delta should be optimised
1271 1274 """
1272 1275 # experimental config: format.generaldelta
1273 1276 return ui.configbool('format', 'generaldelta')
1274 1277
1275 1278 class simplekeyvaluefile(object):
1276 1279 """A simple file with key=value lines
1277 1280
1278 1281 Keys must be alphanumerics and start with a letter, values must not
1279 1282 contain '\n' characters"""
1280 1283 firstlinekey = '__firstline'
1281 1284
1282 1285 def __init__(self, vfs, path, keys=None):
1283 1286 self.vfs = vfs
1284 1287 self.path = path
1285 1288
1286 1289 def read(self, firstlinenonkeyval=False):
1287 1290 """Read the contents of a simple key-value file
1288 1291
1289 1292 'firstlinenonkeyval' indicates whether the first line of file should
1290 1293 be treated as a key-value pair or reuturned fully under the
1291 1294 __firstline key."""
1292 1295 lines = self.vfs.readlines(self.path)
1293 1296 d = {}
1294 1297 if firstlinenonkeyval:
1295 1298 if not lines:
1296 1299 e = _("empty simplekeyvalue file")
1297 1300 raise error.CorruptedState(e)
1298 1301 # we don't want to include '\n' in the __firstline
1299 1302 d[self.firstlinekey] = lines[0][:-1]
1300 1303 del lines[0]
1301 1304
1302 1305 try:
1303 1306 # the 'if line.strip()' part prevents us from failing on empty
1304 1307 # lines which only contain '\n' therefore are not skipped
1305 1308 # by 'if line'
1306 1309 updatedict = dict(line[:-1].split('=', 1) for line in lines
1307 1310 if line.strip())
1308 1311 if self.firstlinekey in updatedict:
1309 1312 e = _("%r can't be used as a key")
1310 1313 raise error.CorruptedState(e % self.firstlinekey)
1311 1314 d.update(updatedict)
1312 1315 except ValueError as e:
1313 1316 raise error.CorruptedState(str(e))
1314 1317 return d
1315 1318
1316 1319 def write(self, data, firstline=None):
1317 1320 """Write key=>value mapping to a file
1318 1321 data is a dict. Keys must be alphanumerical and start with a letter.
1319 1322 Values must not contain newline characters.
1320 1323
1321 1324 If 'firstline' is not None, it is written to file before
1322 1325 everything else, as it is, not in a key=value form"""
1323 1326 lines = []
1324 1327 if firstline is not None:
1325 1328 lines.append('%s\n' % firstline)
1326 1329
1327 1330 for k, v in data.items():
1328 1331 if k == self.firstlinekey:
1329 1332 e = "key name '%s' is reserved" % self.firstlinekey
1330 1333 raise error.ProgrammingError(e)
1331 1334 if not k[0:1].isalpha():
1332 1335 e = "keys must start with a letter in a key-value file"
1333 1336 raise error.ProgrammingError(e)
1334 1337 if not k.isalnum():
1335 1338 e = "invalid key name in a simple key-value file"
1336 1339 raise error.ProgrammingError(e)
1337 1340 if '\n' in v:
1338 1341 e = "invalid value in a simple key-value file"
1339 1342 raise error.ProgrammingError(e)
1340 1343 lines.append("%s=%s\n" % (k, v))
1341 1344 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1342 1345 fp.write(''.join(lines))
1343 1346
1344 1347 _reportobsoletedsource = [
1345 1348 'debugobsolete',
1346 1349 'pull',
1347 1350 'push',
1348 1351 'serve',
1349 1352 'unbundle',
1350 1353 ]
1351 1354
1352 1355 _reportnewcssource = [
1353 1356 'pull',
1354 1357 'unbundle',
1355 1358 ]
1356 1359
1357 1360 # a list of (repo, ctx, files) functions called by various commands to allow
1358 1361 # extensions to ensure the corresponding files are available locally, before the
1359 1362 # command uses them.
1360 1363 fileprefetchhooks = util.hooks()
1361 1364
1362 1365 # A marker that tells the evolve extension to suppress its own reporting
1363 1366 _reportstroubledchangesets = True
1364 1367
1365 1368 def registersummarycallback(repo, otr, txnname=''):
1366 1369 """register a callback to issue a summary after the transaction is closed
1367 1370 """
1368 1371 def txmatch(sources):
1369 1372 return any(txnname.startswith(source) for source in sources)
1370 1373
1371 1374 categories = []
1372 1375
1373 1376 def reportsummary(func):
1374 1377 """decorator for report callbacks."""
1375 1378 # The repoview life cycle is shorter than the one of the actual
1376 1379 # underlying repository. So the filtered object can die before the
1377 1380 # weakref is used leading to troubles. We keep a reference to the
1378 1381 # unfiltered object and restore the filtering when retrieving the
1379 1382 # repository through the weakref.
1380 1383 filtername = repo.filtername
1381 1384 reporef = weakref.ref(repo.unfiltered())
1382 1385 def wrapped(tr):
1383 1386 repo = reporef()
1384 1387 if filtername:
1385 1388 repo = repo.filtered(filtername)
1386 1389 func(repo, tr)
1387 1390 newcat = '%02i-txnreport' % len(categories)
1388 1391 otr.addpostclose(newcat, wrapped)
1389 1392 categories.append(newcat)
1390 1393 return wrapped
1391 1394
1392 1395 if txmatch(_reportobsoletedsource):
1393 1396 @reportsummary
1394 1397 def reportobsoleted(repo, tr):
1395 1398 obsoleted = obsutil.getobsoleted(repo, tr)
1396 1399 if obsoleted:
1397 1400 repo.ui.status(_('obsoleted %i changesets\n')
1398 1401 % len(obsoleted))
1399 1402
1400 1403 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1401 1404 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1402 1405 instabilitytypes = [
1403 1406 ('orphan', 'orphan'),
1404 1407 ('phase-divergent', 'phasedivergent'),
1405 1408 ('content-divergent', 'contentdivergent'),
1406 1409 ]
1407 1410
1408 1411 def getinstabilitycounts(repo):
1409 1412 filtered = repo.changelog.filteredrevs
1410 1413 counts = {}
1411 1414 for instability, revset in instabilitytypes:
1412 1415 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1413 1416 filtered)
1414 1417 return counts
1415 1418
1416 1419 oldinstabilitycounts = getinstabilitycounts(repo)
1417 1420 @reportsummary
1418 1421 def reportnewinstabilities(repo, tr):
1419 1422 newinstabilitycounts = getinstabilitycounts(repo)
1420 1423 for instability, revset in instabilitytypes:
1421 1424 delta = (newinstabilitycounts[instability] -
1422 1425 oldinstabilitycounts[instability])
1423 1426 if delta > 0:
1424 1427 repo.ui.warn(_('%i new %s changesets\n') %
1425 1428 (delta, instability))
1426 1429
1427 1430 if txmatch(_reportnewcssource):
1428 1431 @reportsummary
1429 1432 def reportnewcs(repo, tr):
1430 1433 """Report the range of new revisions pulled/unbundled."""
1431 1434 newrevs = tr.changes.get('revs', xrange(0, 0))
1432 1435 if not newrevs:
1433 1436 return
1434 1437
1435 1438 # Compute the bounds of new revisions' range, excluding obsoletes.
1436 1439 unfi = repo.unfiltered()
1437 1440 revs = unfi.revs('%ld and not obsolete()', newrevs)
1438 1441 if not revs:
1439 1442 # Got only obsoletes.
1440 1443 return
1441 1444 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1442 1445
1443 1446 if minrev == maxrev:
1444 1447 revrange = minrev
1445 1448 else:
1446 1449 revrange = '%s:%s' % (minrev, maxrev)
1447 1450 repo.ui.status(_('new changesets %s\n') % revrange)
1448 1451
1449 1452 def nodesummaries(repo, nodes, maxnumnodes=4):
1450 1453 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1451 1454 return ' '.join(short(h) for h in nodes)
1452 1455 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1453 1456 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1454 1457
1455 1458 def enforcesinglehead(repo, tr, desc):
1456 1459 """check that no named branch has multiple heads"""
1457 1460 if desc in ('strip', 'repair'):
1458 1461 # skip the logic during strip
1459 1462 return
1460 1463 visible = repo.filtered('visible')
1461 1464 # possible improvement: we could restrict the check to affected branch
1462 1465 for name, heads in visible.branchmap().iteritems():
1463 1466 if len(heads) > 1:
1464 1467 msg = _('rejecting multiple heads on branch "%s"')
1465 1468 msg %= name
1466 1469 hint = _('%d heads: %s')
1467 1470 hint %= (len(heads), nodesummaries(repo, heads))
1468 1471 raise error.Abort(msg, hint=hint)
1469 1472
1470 1473 def wrapconvertsink(sink):
1471 1474 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1472 1475 before it is used, whether or not the convert extension was formally loaded.
1473 1476 """
1474 1477 return sink
1475 1478
1476 1479 def unhidehashlikerevs(repo, specs, hiddentype):
1477 1480 """parse the user specs and unhide changesets whose hash or revision number
1478 1481 is passed.
1479 1482
1480 1483 hiddentype can be: 1) 'warn': warn while unhiding changesets
1481 1484 2) 'nowarn': don't warn while unhiding changesets
1482 1485
1483 1486 returns a repo object with the required changesets unhidden
1484 1487 """
1485 1488 if not repo.filtername or not repo.ui.configbool('experimental',
1486 1489 'directaccess'):
1487 1490 return repo
1488 1491
1489 1492 if repo.filtername not in ('visible', 'visible-hidden'):
1490 1493 return repo
1491 1494
1492 1495 symbols = set()
1493 1496 for spec in specs:
1494 1497 try:
1495 1498 tree = revsetlang.parse(spec)
1496 1499 except error.ParseError: # will be reported by scmutil.revrange()
1497 1500 continue
1498 1501
1499 1502 symbols.update(revsetlang.gethashlikesymbols(tree))
1500 1503
1501 1504 if not symbols:
1502 1505 return repo
1503 1506
1504 1507 revs = _getrevsfromsymbols(repo, symbols)
1505 1508
1506 1509 if not revs:
1507 1510 return repo
1508 1511
1509 1512 if hiddentype == 'warn':
1510 1513 unfi = repo.unfiltered()
1511 1514 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1512 1515 repo.ui.warn(_("warning: accessing hidden changesets for write "
1513 1516 "operation: %s\n") % revstr)
1514 1517
1515 1518 # we have to use new filtername to separate branch/tags cache until we can
1516 1519 # disbale these cache when revisions are dynamically pinned.
1517 1520 return repo.filtered('visible-hidden', revs)
1518 1521
1519 1522 def _getrevsfromsymbols(repo, symbols):
1520 1523 """parse the list of symbols and returns a set of revision numbers of hidden
1521 1524 changesets present in symbols"""
1522 1525 revs = set()
1523 1526 unfi = repo.unfiltered()
1524 1527 unficl = unfi.changelog
1525 1528 cl = repo.changelog
1526 1529 tiprev = len(unficl)
1527 1530 pmatch = unficl._partialmatch
1528 1531 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1529 1532 for s in symbols:
1530 1533 try:
1531 1534 n = int(s)
1532 1535 if n <= tiprev:
1533 1536 if not allowrevnums:
1534 1537 continue
1535 1538 else:
1536 1539 if n not in cl:
1537 1540 revs.add(n)
1538 1541 continue
1539 1542 except ValueError:
1540 1543 pass
1541 1544
1542 1545 try:
1543 1546 s = pmatch(s)
1544 1547 except (error.LookupError, error.WdirUnsupported):
1545 1548 s = None
1546 1549
1547 1550 if s is not None:
1548 1551 rev = unficl.rev(s)
1549 1552 if rev not in cl:
1550 1553 revs.add(rev)
1551 1554
1552 1555 return revs
@@ -1,676 +1,673 b''
1 1 # templatefuncs.py - common template functions
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 color,
15 15 encoding,
16 16 error,
17 17 minirst,
18 18 obsutil,
19 19 pycompat,
20 20 registrar,
21 21 revset as revsetmod,
22 22 revsetlang,
23 23 scmutil,
24 24 templatefilters,
25 25 templatekw,
26 26 templateutil,
27 27 util,
28 28 )
29 29 from .utils import (
30 30 dateutil,
31 31 stringutil,
32 32 )
33 33
34 34 evalrawexp = templateutil.evalrawexp
35 35 evalfuncarg = templateutil.evalfuncarg
36 36 evalboolean = templateutil.evalboolean
37 37 evaldate = templateutil.evaldate
38 38 evalinteger = templateutil.evalinteger
39 39 evalstring = templateutil.evalstring
40 40 evalstringliteral = templateutil.evalstringliteral
41 41
42 42 # dict of template built-in functions
43 43 funcs = {}
44 44 templatefunc = registrar.templatefunc(funcs)
45 45
46 46 @templatefunc('date(date[, fmt])')
47 47 def date(context, mapping, args):
48 48 """Format a date. See :hg:`help dates` for formatting
49 49 strings. The default is a Unix date format, including the timezone:
50 50 "Mon Sep 04 15:13:13 2006 0700"."""
51 51 if not (1 <= len(args) <= 2):
52 52 # i18n: "date" is a keyword
53 53 raise error.ParseError(_("date expects one or two arguments"))
54 54
55 55 date = evaldate(context, mapping, args[0],
56 56 # i18n: "date" is a keyword
57 57 _("date expects a date information"))
58 58 fmt = None
59 59 if len(args) == 2:
60 60 fmt = evalstring(context, mapping, args[1])
61 61 if fmt is None:
62 62 return dateutil.datestr(date)
63 63 else:
64 64 return dateutil.datestr(date, fmt)
65 65
66 66 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
67 67 def dict_(context, mapping, args):
68 68 """Construct a dict from key-value pairs. A key may be omitted if
69 69 a value expression can provide an unambiguous name."""
70 70 data = util.sortdict()
71 71
72 72 for v in args['args']:
73 73 k = templateutil.findsymbolicname(v)
74 74 if not k:
75 75 raise error.ParseError(_('dict key cannot be inferred'))
76 76 if k in data or k in args['kwargs']:
77 77 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
78 78 data[k] = evalfuncarg(context, mapping, v)
79 79
80 80 data.update((k, evalfuncarg(context, mapping, v))
81 81 for k, v in args['kwargs'].iteritems())
82 82 return templateutil.hybriddict(data)
83 83
84 84 @templatefunc('diff([includepattern [, excludepattern]])')
85 85 def diff(context, mapping, args):
86 86 """Show a diff, optionally
87 87 specifying files to include or exclude."""
88 88 if len(args) > 2:
89 89 # i18n: "diff" is a keyword
90 90 raise error.ParseError(_("diff expects zero, one, or two arguments"))
91 91
92 92 def getpatterns(i):
93 93 if i < len(args):
94 94 s = evalstring(context, mapping, args[i]).strip()
95 95 if s:
96 96 return [s]
97 97 return []
98 98
99 99 ctx = context.resource(mapping, 'ctx')
100 100 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
101 101
102 102 return ''.join(chunks)
103 103
104 104 @templatefunc('extdata(source)', argspec='source')
105 105 def extdata(context, mapping, args):
106 106 """Show a text read from the specified extdata source. (EXPERIMENTAL)"""
107 107 if 'source' not in args:
108 108 # i18n: "extdata" is a keyword
109 109 raise error.ParseError(_('extdata expects one argument'))
110 110
111 111 source = evalstring(context, mapping, args['source'])
112 112 cache = context.resource(mapping, 'cache').setdefault('extdata', {})
113 113 ctx = context.resource(mapping, 'ctx')
114 114 if source in cache:
115 115 data = cache[source]
116 116 else:
117 117 data = cache[source] = scmutil.extdatasource(ctx.repo(), source)
118 118 return data.get(ctx.rev(), '')
119 119
120 120 @templatefunc('files(pattern)')
121 121 def files(context, mapping, args):
122 122 """All files of the current changeset matching the pattern. See
123 123 :hg:`help patterns`."""
124 124 if not len(args) == 1:
125 125 # i18n: "files" is a keyword
126 126 raise error.ParseError(_("files expects one argument"))
127 127
128 128 raw = evalstring(context, mapping, args[0])
129 129 ctx = context.resource(mapping, 'ctx')
130 130 m = ctx.match([raw])
131 131 files = list(ctx.matches(m))
132 132 return templateutil.compatlist(context, mapping, "file", files)
133 133
134 134 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
135 135 def fill(context, mapping, args):
136 136 """Fill many
137 137 paragraphs with optional indentation. See the "fill" filter."""
138 138 if not (1 <= len(args) <= 4):
139 139 # i18n: "fill" is a keyword
140 140 raise error.ParseError(_("fill expects one to four arguments"))
141 141
142 142 text = evalstring(context, mapping, args[0])
143 143 width = 76
144 144 initindent = ''
145 145 hangindent = ''
146 146 if 2 <= len(args) <= 4:
147 147 width = evalinteger(context, mapping, args[1],
148 148 # i18n: "fill" is a keyword
149 149 _("fill expects an integer width"))
150 150 try:
151 151 initindent = evalstring(context, mapping, args[2])
152 152 hangindent = evalstring(context, mapping, args[3])
153 153 except IndexError:
154 154 pass
155 155
156 156 return templatefilters.fill(text, width, initindent, hangindent)
157 157
158 158 @templatefunc('formatnode(node)')
159 159 def formatnode(context, mapping, args):
160 160 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
161 161 if len(args) != 1:
162 162 # i18n: "formatnode" is a keyword
163 163 raise error.ParseError(_("formatnode expects one argument"))
164 164
165 165 ui = context.resource(mapping, 'ui')
166 166 node = evalstring(context, mapping, args[0])
167 167 if ui.debugflag:
168 168 return node
169 169 return templatefilters.short(node)
170 170
171 171 @templatefunc('mailmap(author)')
172 172 def mailmap(context, mapping, args):
173 173 """Return the author, updated according to the value
174 174 set in the .mailmap file"""
175 175 if len(args) != 1:
176 176 raise error.ParseError(_("mailmap expects one argument"))
177 177
178 178 author = evalstring(context, mapping, args[0])
179 179
180 180 cache = context.resource(mapping, 'cache')
181 181 repo = context.resource(mapping, 'repo')
182 182
183 183 if 'mailmap' not in cache:
184 184 data = repo.wvfs.tryread('.mailmap')
185 185 cache['mailmap'] = stringutil.parsemailmap(data)
186 186
187 187 return stringutil.mapname(cache['mailmap'], author)
188 188
189 189 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
190 190 argspec='text width fillchar left')
191 191 def pad(context, mapping, args):
192 192 """Pad text with a
193 193 fill character."""
194 194 if 'text' not in args or 'width' not in args:
195 195 # i18n: "pad" is a keyword
196 196 raise error.ParseError(_("pad() expects two to four arguments"))
197 197
198 198 width = evalinteger(context, mapping, args['width'],
199 199 # i18n: "pad" is a keyword
200 200 _("pad() expects an integer width"))
201 201
202 202 text = evalstring(context, mapping, args['text'])
203 203
204 204 left = False
205 205 fillchar = ' '
206 206 if 'fillchar' in args:
207 207 fillchar = evalstring(context, mapping, args['fillchar'])
208 208 if len(color.stripeffects(fillchar)) != 1:
209 209 # i18n: "pad" is a keyword
210 210 raise error.ParseError(_("pad() expects a single fill character"))
211 211 if 'left' in args:
212 212 left = evalboolean(context, mapping, args['left'])
213 213
214 214 fillwidth = width - encoding.colwidth(color.stripeffects(text))
215 215 if fillwidth <= 0:
216 216 return text
217 217 if left:
218 218 return fillchar * fillwidth + text
219 219 else:
220 220 return text + fillchar * fillwidth
221 221
222 222 @templatefunc('indent(text, indentchars[, firstline])')
223 223 def indent(context, mapping, args):
224 224 """Indents all non-empty lines
225 225 with the characters given in the indentchars string. An optional
226 226 third parameter will override the indent for the first line only
227 227 if present."""
228 228 if not (2 <= len(args) <= 3):
229 229 # i18n: "indent" is a keyword
230 230 raise error.ParseError(_("indent() expects two or three arguments"))
231 231
232 232 text = evalstring(context, mapping, args[0])
233 233 indent = evalstring(context, mapping, args[1])
234 234
235 235 if len(args) == 3:
236 236 firstline = evalstring(context, mapping, args[2])
237 237 else:
238 238 firstline = indent
239 239
240 240 # the indent function doesn't indent the first line, so we do it here
241 241 return templatefilters.indent(firstline + text, indent)
242 242
243 243 @templatefunc('get(dict, key)')
244 244 def get(context, mapping, args):
245 245 """Get an attribute/key from an object. Some keywords
246 246 are complex types. This function allows you to obtain the value of an
247 247 attribute on these types."""
248 248 if len(args) != 2:
249 249 # i18n: "get" is a keyword
250 250 raise error.ParseError(_("get() expects two arguments"))
251 251
252 252 dictarg = evalfuncarg(context, mapping, args[0])
253 253 if not util.safehasattr(dictarg, 'get'):
254 254 # i18n: "get" is a keyword
255 255 raise error.ParseError(_("get() expects a dict as first argument"))
256 256
257 257 key = evalfuncarg(context, mapping, args[1])
258 258 return templateutil.getdictitem(dictarg, key)
259 259
260 260 @templatefunc('if(expr, then[, else])')
261 261 def if_(context, mapping, args):
262 262 """Conditionally execute based on the result of
263 263 an expression."""
264 264 if not (2 <= len(args) <= 3):
265 265 # i18n: "if" is a keyword
266 266 raise error.ParseError(_("if expects two or three arguments"))
267 267
268 268 test = evalboolean(context, mapping, args[0])
269 269 if test:
270 270 return evalrawexp(context, mapping, args[1])
271 271 elif len(args) == 3:
272 272 return evalrawexp(context, mapping, args[2])
273 273
274 274 @templatefunc('ifcontains(needle, haystack, then[, else])')
275 275 def ifcontains(context, mapping, args):
276 276 """Conditionally execute based
277 277 on whether the item "needle" is in "haystack"."""
278 278 if not (3 <= len(args) <= 4):
279 279 # i18n: "ifcontains" is a keyword
280 280 raise error.ParseError(_("ifcontains expects three or four arguments"))
281 281
282 282 haystack = evalfuncarg(context, mapping, args[1])
283 283 keytype = getattr(haystack, 'keytype', None)
284 284 try:
285 285 needle = evalrawexp(context, mapping, args[0])
286 286 needle = templateutil.unwrapastype(context, mapping, needle,
287 287 keytype or bytes)
288 288 found = (needle in haystack)
289 289 except error.ParseError:
290 290 found = False
291 291
292 292 if found:
293 293 return evalrawexp(context, mapping, args[2])
294 294 elif len(args) == 4:
295 295 return evalrawexp(context, mapping, args[3])
296 296
297 297 @templatefunc('ifeq(expr1, expr2, then[, else])')
298 298 def ifeq(context, mapping, args):
299 299 """Conditionally execute based on
300 300 whether 2 items are equivalent."""
301 301 if not (3 <= len(args) <= 4):
302 302 # i18n: "ifeq" is a keyword
303 303 raise error.ParseError(_("ifeq expects three or four arguments"))
304 304
305 305 test = evalstring(context, mapping, args[0])
306 306 match = evalstring(context, mapping, args[1])
307 307 if test == match:
308 308 return evalrawexp(context, mapping, args[2])
309 309 elif len(args) == 4:
310 310 return evalrawexp(context, mapping, args[3])
311 311
312 312 @templatefunc('join(list, sep)')
313 313 def join(context, mapping, args):
314 314 """Join items in a list with a delimiter."""
315 315 if not (1 <= len(args) <= 2):
316 316 # i18n: "join" is a keyword
317 317 raise error.ParseError(_("join expects one or two arguments"))
318 318
319 319 joinset = evalrawexp(context, mapping, args[0])
320 320 joiner = " "
321 321 if len(args) > 1:
322 322 joiner = evalstring(context, mapping, args[1])
323 323 if isinstance(joinset, templateutil.wrapped):
324 324 return joinset.join(context, mapping, joiner)
325 325 # TODO: perhaps a generator should be stringify()-ed here, but we can't
326 326 # because hgweb abuses it as a keyword that returns a list of dicts.
327 327 joinset = templateutil.unwrapvalue(context, mapping, joinset)
328 328 return templateutil.joinitems(pycompat.maybebytestr(joinset), joiner)
329 329
330 330 @templatefunc('label(label, expr)')
331 331 def label(context, mapping, args):
332 332 """Apply a label to generated content. Content with
333 333 a label applied can result in additional post-processing, such as
334 334 automatic colorization."""
335 335 if len(args) != 2:
336 336 # i18n: "label" is a keyword
337 337 raise error.ParseError(_("label expects two arguments"))
338 338
339 339 ui = context.resource(mapping, 'ui')
340 340 thing = evalstring(context, mapping, args[1])
341 341 # preserve unknown symbol as literal so effects like 'red', 'bold',
342 342 # etc. don't need to be quoted
343 343 label = evalstringliteral(context, mapping, args[0])
344 344
345 345 return ui.label(thing, label)
346 346
347 347 @templatefunc('latesttag([pattern])')
348 348 def latesttag(context, mapping, args):
349 349 """The global tags matching the given pattern on the
350 350 most recent globally tagged ancestor of this changeset.
351 351 If no such tags exist, the "{tag}" template resolves to
352 352 the string "null"."""
353 353 if len(args) > 1:
354 354 # i18n: "latesttag" is a keyword
355 355 raise error.ParseError(_("latesttag expects at most one argument"))
356 356
357 357 pattern = None
358 358 if len(args) == 1:
359 359 pattern = evalstring(context, mapping, args[0])
360 360 return templatekw.showlatesttags(context, mapping, pattern)
361 361
362 362 @templatefunc('localdate(date[, tz])')
363 363 def localdate(context, mapping, args):
364 364 """Converts a date to the specified timezone.
365 365 The default is local date."""
366 366 if not (1 <= len(args) <= 2):
367 367 # i18n: "localdate" is a keyword
368 368 raise error.ParseError(_("localdate expects one or two arguments"))
369 369
370 370 date = evaldate(context, mapping, args[0],
371 371 # i18n: "localdate" is a keyword
372 372 _("localdate expects a date information"))
373 373 if len(args) >= 2:
374 374 tzoffset = None
375 375 tz = evalfuncarg(context, mapping, args[1])
376 376 if isinstance(tz, bytes):
377 377 tzoffset, remainder = dateutil.parsetimezone(tz)
378 378 if remainder:
379 379 tzoffset = None
380 380 if tzoffset is None:
381 381 try:
382 382 tzoffset = int(tz)
383 383 except (TypeError, ValueError):
384 384 # i18n: "localdate" is a keyword
385 385 raise error.ParseError(_("localdate expects a timezone"))
386 386 else:
387 387 tzoffset = dateutil.makedate()[1]
388 388 return (date[0], tzoffset)
389 389
390 390 @templatefunc('max(iterable)')
391 391 def max_(context, mapping, args, **kwargs):
392 392 """Return the max of an iterable"""
393 393 if len(args) != 1:
394 394 # i18n: "max" is a keyword
395 395 raise error.ParseError(_("max expects one argument"))
396 396
397 397 iterable = evalfuncarg(context, mapping, args[0])
398 398 try:
399 399 x = max(pycompat.maybebytestr(iterable))
400 400 except (TypeError, ValueError):
401 401 # i18n: "max" is a keyword
402 402 raise error.ParseError(_("max first argument should be an iterable"))
403 403 return templateutil.wraphybridvalue(iterable, x, x)
404 404
405 405 @templatefunc('min(iterable)')
406 406 def min_(context, mapping, args, **kwargs):
407 407 """Return the min of an iterable"""
408 408 if len(args) != 1:
409 409 # i18n: "min" is a keyword
410 410 raise error.ParseError(_("min expects one argument"))
411 411
412 412 iterable = evalfuncarg(context, mapping, args[0])
413 413 try:
414 414 x = min(pycompat.maybebytestr(iterable))
415 415 except (TypeError, ValueError):
416 416 # i18n: "min" is a keyword
417 417 raise error.ParseError(_("min first argument should be an iterable"))
418 418 return templateutil.wraphybridvalue(iterable, x, x)
419 419
420 420 @templatefunc('mod(a, b)')
421 421 def mod(context, mapping, args):
422 422 """Calculate a mod b such that a / b + a mod b == a"""
423 423 if not len(args) == 2:
424 424 # i18n: "mod" is a keyword
425 425 raise error.ParseError(_("mod expects two arguments"))
426 426
427 427 func = lambda a, b: a % b
428 428 return templateutil.runarithmetic(context, mapping,
429 429 (func, args[0], args[1]))
430 430
431 431 @templatefunc('obsfateoperations(markers)')
432 432 def obsfateoperations(context, mapping, args):
433 433 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
434 434 if len(args) != 1:
435 435 # i18n: "obsfateoperations" is a keyword
436 436 raise error.ParseError(_("obsfateoperations expects one argument"))
437 437
438 438 markers = evalfuncarg(context, mapping, args[0])
439 439
440 440 try:
441 441 data = obsutil.markersoperations(markers)
442 442 return templateutil.hybridlist(data, name='operation')
443 443 except (TypeError, KeyError):
444 444 # i18n: "obsfateoperations" is a keyword
445 445 errmsg = _("obsfateoperations first argument should be an iterable")
446 446 raise error.ParseError(errmsg)
447 447
448 448 @templatefunc('obsfatedate(markers)')
449 449 def obsfatedate(context, mapping, args):
450 450 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
451 451 if len(args) != 1:
452 452 # i18n: "obsfatedate" is a keyword
453 453 raise error.ParseError(_("obsfatedate expects one argument"))
454 454
455 455 markers = evalfuncarg(context, mapping, args[0])
456 456
457 457 try:
458 458 data = obsutil.markersdates(markers)
459 459 return templateutil.hybridlist(data, name='date', fmt='%d %d')
460 460 except (TypeError, KeyError):
461 461 # i18n: "obsfatedate" is a keyword
462 462 errmsg = _("obsfatedate first argument should be an iterable")
463 463 raise error.ParseError(errmsg)
464 464
465 465 @templatefunc('obsfateusers(markers)')
466 466 def obsfateusers(context, mapping, args):
467 467 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
468 468 if len(args) != 1:
469 469 # i18n: "obsfateusers" is a keyword
470 470 raise error.ParseError(_("obsfateusers expects one argument"))
471 471
472 472 markers = evalfuncarg(context, mapping, args[0])
473 473
474 474 try:
475 475 data = obsutil.markersusers(markers)
476 476 return templateutil.hybridlist(data, name='user')
477 477 except (TypeError, KeyError, ValueError):
478 478 # i18n: "obsfateusers" is a keyword
479 479 msg = _("obsfateusers first argument should be an iterable of "
480 480 "obsmakers")
481 481 raise error.ParseError(msg)
482 482
483 483 @templatefunc('obsfateverb(successors, markers)')
484 484 def obsfateverb(context, mapping, args):
485 485 """Compute obsfate related information based on successors (EXPERIMENTAL)"""
486 486 if len(args) != 2:
487 487 # i18n: "obsfateverb" is a keyword
488 488 raise error.ParseError(_("obsfateverb expects two arguments"))
489 489
490 490 successors = evalfuncarg(context, mapping, args[0])
491 491 markers = evalfuncarg(context, mapping, args[1])
492 492
493 493 try:
494 494 return obsutil.obsfateverb(successors, markers)
495 495 except TypeError:
496 496 # i18n: "obsfateverb" is a keyword
497 497 errmsg = _("obsfateverb first argument should be countable")
498 498 raise error.ParseError(errmsg)
499 499
500 500 @templatefunc('relpath(path)')
501 501 def relpath(context, mapping, args):
502 502 """Convert a repository-absolute path into a filesystem path relative to
503 503 the current working directory."""
504 504 if len(args) != 1:
505 505 # i18n: "relpath" is a keyword
506 506 raise error.ParseError(_("relpath expects one argument"))
507 507
508 508 repo = context.resource(mapping, 'ctx').repo()
509 509 path = evalstring(context, mapping, args[0])
510 510 return repo.pathto(path)
511 511
512 512 @templatefunc('revset(query[, formatargs...])')
513 513 def revset(context, mapping, args):
514 514 """Execute a revision set query. See
515 515 :hg:`help revset`."""
516 516 if not len(args) > 0:
517 517 # i18n: "revset" is a keyword
518 518 raise error.ParseError(_("revset expects one or more arguments"))
519 519
520 520 raw = evalstring(context, mapping, args[0])
521 521 ctx = context.resource(mapping, 'ctx')
522 522 repo = ctx.repo()
523 523
524 524 def query(expr):
525 525 m = revsetmod.match(repo.ui, expr, lookup=revsetmod.lookupfn(repo))
526 526 return m(repo)
527 527
528 528 if len(args) > 1:
529 529 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
530 530 revs = query(revsetlang.formatspec(raw, *formatargs))
531 531 revs = list(revs)
532 532 else:
533 533 cache = context.resource(mapping, 'cache')
534 534 revsetcache = cache.setdefault("revsetcache", {})
535 535 if raw in revsetcache:
536 536 revs = revsetcache[raw]
537 537 else:
538 538 revs = query(raw)
539 539 revs = list(revs)
540 540 revsetcache[raw] = revs
541 541 return templatekw.showrevslist(context, mapping, "revision", revs)
542 542
543 543 @templatefunc('rstdoc(text, style)')
544 544 def rstdoc(context, mapping, args):
545 545 """Format reStructuredText."""
546 546 if len(args) != 2:
547 547 # i18n: "rstdoc" is a keyword
548 548 raise error.ParseError(_("rstdoc expects two arguments"))
549 549
550 550 text = evalstring(context, mapping, args[0])
551 551 style = evalstring(context, mapping, args[1])
552 552
553 553 return minirst.format(text, style=style, keep=['verbose'])
554 554
555 555 @templatefunc('separate(sep, args)', argspec='sep *args')
556 556 def separate(context, mapping, args):
557 557 """Add a separator between non-empty arguments."""
558 558 if 'sep' not in args:
559 559 # i18n: "separate" is a keyword
560 560 raise error.ParseError(_("separate expects at least one argument"))
561 561
562 562 sep = evalstring(context, mapping, args['sep'])
563 563 first = True
564 564 for arg in args['args']:
565 565 argstr = evalstring(context, mapping, arg)
566 566 if not argstr:
567 567 continue
568 568 if first:
569 569 first = False
570 570 else:
571 571 yield sep
572 572 yield argstr
573 573
574 574 @templatefunc('shortest(node, minlength=4)')
575 575 def shortest(context, mapping, args):
576 576 """Obtain the shortest representation of
577 577 a node."""
578 578 if not (1 <= len(args) <= 2):
579 579 # i18n: "shortest" is a keyword
580 580 raise error.ParseError(_("shortest() expects one or two arguments"))
581 581
582 582 node = evalstring(context, mapping, args[0])
583 583
584 584 minlength = 4
585 585 if len(args) > 1:
586 586 minlength = evalinteger(context, mapping, args[1],
587 587 # i18n: "shortest" is a keyword
588 588 _("shortest() expects an integer minlength"))
589 589
590 # _partialmatch() of filtered changelog could take O(len(repo)) time,
591 # which would be unacceptably slow. so we look for hash collision in
592 # unfiltered space, which means some hashes may be slightly longer.
593 590 repo = context.resource(mapping, 'ctx')._repo
594 return scmutil.shortesthexnodeidprefix(repo.unfiltered(), node, minlength)
591 return scmutil.shortesthexnodeidprefix(repo, node, minlength)
595 592
596 593 @templatefunc('strip(text[, chars])')
597 594 def strip(context, mapping, args):
598 595 """Strip characters from a string. By default,
599 596 strips all leading and trailing whitespace."""
600 597 if not (1 <= len(args) <= 2):
601 598 # i18n: "strip" is a keyword
602 599 raise error.ParseError(_("strip expects one or two arguments"))
603 600
604 601 text = evalstring(context, mapping, args[0])
605 602 if len(args) == 2:
606 603 chars = evalstring(context, mapping, args[1])
607 604 return text.strip(chars)
608 605 return text.strip()
609 606
610 607 @templatefunc('sub(pattern, replacement, expression)')
611 608 def sub(context, mapping, args):
612 609 """Perform text substitution
613 610 using regular expressions."""
614 611 if len(args) != 3:
615 612 # i18n: "sub" is a keyword
616 613 raise error.ParseError(_("sub expects three arguments"))
617 614
618 615 pat = evalstring(context, mapping, args[0])
619 616 rpl = evalstring(context, mapping, args[1])
620 617 src = evalstring(context, mapping, args[2])
621 618 try:
622 619 patre = re.compile(pat)
623 620 except re.error:
624 621 # i18n: "sub" is a keyword
625 622 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
626 623 try:
627 624 yield patre.sub(rpl, src)
628 625 except re.error:
629 626 # i18n: "sub" is a keyword
630 627 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
631 628
632 629 @templatefunc('startswith(pattern, text)')
633 630 def startswith(context, mapping, args):
634 631 """Returns the value from the "text" argument
635 632 if it begins with the content from the "pattern" argument."""
636 633 if len(args) != 2:
637 634 # i18n: "startswith" is a keyword
638 635 raise error.ParseError(_("startswith expects two arguments"))
639 636
640 637 patn = evalstring(context, mapping, args[0])
641 638 text = evalstring(context, mapping, args[1])
642 639 if text.startswith(patn):
643 640 return text
644 641 return ''
645 642
646 643 @templatefunc('word(number, text[, separator])')
647 644 def word(context, mapping, args):
648 645 """Return the nth word from a string."""
649 646 if not (2 <= len(args) <= 3):
650 647 # i18n: "word" is a keyword
651 648 raise error.ParseError(_("word expects two or three arguments, got %d")
652 649 % len(args))
653 650
654 651 num = evalinteger(context, mapping, args[0],
655 652 # i18n: "word" is a keyword
656 653 _("word expects an integer index"))
657 654 text = evalstring(context, mapping, args[1])
658 655 if len(args) == 3:
659 656 splitter = evalstring(context, mapping, args[2])
660 657 else:
661 658 splitter = None
662 659
663 660 tokens = text.split(splitter)
664 661 if num >= len(tokens) or num < -len(tokens):
665 662 return ''
666 663 else:
667 664 return tokens[num]
668 665
669 666 def loadfunction(ui, extname, registrarobj):
670 667 """Load template function from specified registrarobj
671 668 """
672 669 for name, func in registrarobj._table.iteritems():
673 670 funcs[name] = func
674 671
675 672 # tell hggettext to extract docstrings from these functions:
676 673 i18nfunctions = funcs.values()
General Comments 0
You need to be logged in to leave comments. Login now