##// END OF EJS Templates
scmutil: make shortesthexnodeidprefix() use unfiltered repo...
Martin von Zweigbergk -
r37726:8e854161 default
parent child Browse files
Show More
@@ -1,472 +1,470 b''
1 # show.py - Extension implementing `hg show`
1 # show.py - Extension implementing `hg show`
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """unified command to show various repository information (EXPERIMENTAL)
8 """unified command to show various repository information (EXPERIMENTAL)
9
9
10 This extension provides the :hg:`show` command, which provides a central
10 This extension provides the :hg:`show` command, which provides a central
11 command for displaying commonly-accessed repository data and views of that
11 command for displaying commonly-accessed repository data and views of that
12 data.
12 data.
13
13
14 The following config options can influence operation.
14 The following config options can influence operation.
15
15
16 ``commands``
16 ``commands``
17 ------------
17 ------------
18
18
19 ``show.aliasprefix``
19 ``show.aliasprefix``
20 List of strings that will register aliases for views. e.g. ``s`` will
20 List of strings that will register aliases for views. e.g. ``s`` will
21 effectively set config options ``alias.s<view> = show <view>`` for all
21 effectively set config options ``alias.s<view> = show <view>`` for all
22 views. i.e. `hg swork` would execute `hg show work`.
22 views. i.e. `hg swork` would execute `hg show work`.
23
23
24 Aliases that would conflict with existing registrations will not be
24 Aliases that would conflict with existing registrations will not be
25 performed.
25 performed.
26 """
26 """
27
27
28 from __future__ import absolute_import
28 from __future__ import absolute_import
29
29
30 from mercurial.i18n import _
30 from mercurial.i18n import _
31 from mercurial.node import (
31 from mercurial.node import (
32 hex,
32 hex,
33 nullrev,
33 nullrev,
34 )
34 )
35 from mercurial import (
35 from mercurial import (
36 cmdutil,
36 cmdutil,
37 commands,
37 commands,
38 destutil,
38 destutil,
39 error,
39 error,
40 formatter,
40 formatter,
41 graphmod,
41 graphmod,
42 logcmdutil,
42 logcmdutil,
43 phases,
43 phases,
44 pycompat,
44 pycompat,
45 registrar,
45 registrar,
46 revset,
46 revset,
47 revsetlang,
47 revsetlang,
48 scmutil,
48 scmutil,
49 )
49 )
50
50
51 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
51 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
52 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
52 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
53 # be specifying the version(s) of Mercurial they are tested with, or
53 # be specifying the version(s) of Mercurial they are tested with, or
54 # leave the attribute unspecified.
54 # leave the attribute unspecified.
55 testedwith = 'ships-with-hg-core'
55 testedwith = 'ships-with-hg-core'
56
56
57 cmdtable = {}
57 cmdtable = {}
58 command = registrar.command(cmdtable)
58 command = registrar.command(cmdtable)
59
59
60 revsetpredicate = registrar.revsetpredicate()
60 revsetpredicate = registrar.revsetpredicate()
61
61
62 class showcmdfunc(registrar._funcregistrarbase):
62 class showcmdfunc(registrar._funcregistrarbase):
63 """Register a function to be invoked for an `hg show <thing>`."""
63 """Register a function to be invoked for an `hg show <thing>`."""
64
64
65 # Used by _formatdoc().
65 # Used by _formatdoc().
66 _docformat = '%s -- %s'
66 _docformat = '%s -- %s'
67
67
68 def _extrasetup(self, name, func, fmtopic=None, csettopic=None):
68 def _extrasetup(self, name, func, fmtopic=None, csettopic=None):
69 """Called with decorator arguments to register a show view.
69 """Called with decorator arguments to register a show view.
70
70
71 ``name`` is the sub-command name.
71 ``name`` is the sub-command name.
72
72
73 ``func`` is the function being decorated.
73 ``func`` is the function being decorated.
74
74
75 ``fmtopic`` is the topic in the style that will be rendered for
75 ``fmtopic`` is the topic in the style that will be rendered for
76 this view.
76 this view.
77
77
78 ``csettopic`` is the topic in the style to be used for a changeset
78 ``csettopic`` is the topic in the style to be used for a changeset
79 printer.
79 printer.
80
80
81 If ``fmtopic`` is specified, the view function will receive a
81 If ``fmtopic`` is specified, the view function will receive a
82 formatter instance. If ``csettopic`` is specified, the view
82 formatter instance. If ``csettopic`` is specified, the view
83 function will receive a changeset printer.
83 function will receive a changeset printer.
84 """
84 """
85 func._fmtopic = fmtopic
85 func._fmtopic = fmtopic
86 func._csettopic = csettopic
86 func._csettopic = csettopic
87
87
88 showview = showcmdfunc()
88 showview = showcmdfunc()
89
89
90 @command('show', [
90 @command('show', [
91 # TODO: Switch this template flag to use cmdutil.formatteropts if
91 # TODO: Switch this template flag to use cmdutil.formatteropts if
92 # 'hg show' becomes stable before --template/-T is stable. For now,
92 # 'hg show' becomes stable before --template/-T is stable. For now,
93 # we are putting it here without the '(EXPERIMENTAL)' flag because it
93 # we are putting it here without the '(EXPERIMENTAL)' flag because it
94 # is an important part of the 'hg show' user experience and the entire
94 # is an important part of the 'hg show' user experience and the entire
95 # 'hg show' experience is experimental.
95 # 'hg show' experience is experimental.
96 ('T', 'template', '', ('display with template'), _('TEMPLATE')),
96 ('T', 'template', '', ('display with template'), _('TEMPLATE')),
97 ], _('VIEW'))
97 ], _('VIEW'))
98 def show(ui, repo, view=None, template=None):
98 def show(ui, repo, view=None, template=None):
99 """show various repository information
99 """show various repository information
100
100
101 A requested view of repository data is displayed.
101 A requested view of repository data is displayed.
102
102
103 If no view is requested, the list of available views is shown and the
103 If no view is requested, the list of available views is shown and the
104 command aborts.
104 command aborts.
105
105
106 .. note::
106 .. note::
107
107
108 There are no backwards compatibility guarantees for the output of this
108 There are no backwards compatibility guarantees for the output of this
109 command. Output may change in any future Mercurial release.
109 command. Output may change in any future Mercurial release.
110
110
111 Consumers wanting stable command output should specify a template via
111 Consumers wanting stable command output should specify a template via
112 ``-T/--template``.
112 ``-T/--template``.
113
113
114 List of available views:
114 List of available views:
115 """
115 """
116 if ui.plain() and not template:
116 if ui.plain() and not template:
117 hint = _('invoke with -T/--template to control output format')
117 hint = _('invoke with -T/--template to control output format')
118 raise error.Abort(_('must specify a template in plain mode'), hint=hint)
118 raise error.Abort(_('must specify a template in plain mode'), hint=hint)
119
119
120 views = showview._table
120 views = showview._table
121
121
122 if not view:
122 if not view:
123 ui.pager('show')
123 ui.pager('show')
124 # TODO consider using formatter here so available views can be
124 # TODO consider using formatter here so available views can be
125 # rendered to custom format.
125 # rendered to custom format.
126 ui.write(_('available views:\n'))
126 ui.write(_('available views:\n'))
127 ui.write('\n')
127 ui.write('\n')
128
128
129 for name, func in sorted(views.items()):
129 for name, func in sorted(views.items()):
130 ui.write(('%s\n') % pycompat.sysbytes(func.__doc__))
130 ui.write(('%s\n') % pycompat.sysbytes(func.__doc__))
131
131
132 ui.write('\n')
132 ui.write('\n')
133 raise error.Abort(_('no view requested'),
133 raise error.Abort(_('no view requested'),
134 hint=_('use "hg show VIEW" to choose a view'))
134 hint=_('use "hg show VIEW" to choose a view'))
135
135
136 # TODO use same logic as dispatch to perform prefix matching.
136 # TODO use same logic as dispatch to perform prefix matching.
137 if view not in views:
137 if view not in views:
138 raise error.Abort(_('unknown view: %s') % view,
138 raise error.Abort(_('unknown view: %s') % view,
139 hint=_('run "hg show" to see available views'))
139 hint=_('run "hg show" to see available views'))
140
140
141 template = template or 'show'
141 template = template or 'show'
142
142
143 fn = views[view]
143 fn = views[view]
144 ui.pager('show')
144 ui.pager('show')
145
145
146 if fn._fmtopic:
146 if fn._fmtopic:
147 fmtopic = 'show%s' % fn._fmtopic
147 fmtopic = 'show%s' % fn._fmtopic
148 with ui.formatter(fmtopic, {'template': template}) as fm:
148 with ui.formatter(fmtopic, {'template': template}) as fm:
149 return fn(ui, repo, fm)
149 return fn(ui, repo, fm)
150 elif fn._csettopic:
150 elif fn._csettopic:
151 ref = 'show%s' % fn._csettopic
151 ref = 'show%s' % fn._csettopic
152 spec = formatter.lookuptemplate(ui, ref, template)
152 spec = formatter.lookuptemplate(ui, ref, template)
153 displayer = logcmdutil.changesettemplater(ui, repo, spec, buffered=True)
153 displayer = logcmdutil.changesettemplater(ui, repo, spec, buffered=True)
154 return fn(ui, repo, displayer)
154 return fn(ui, repo, displayer)
155 else:
155 else:
156 return fn(ui, repo)
156 return fn(ui, repo)
157
157
158 @showview('bookmarks', fmtopic='bookmarks')
158 @showview('bookmarks', fmtopic='bookmarks')
159 def showbookmarks(ui, repo, fm):
159 def showbookmarks(ui, repo, fm):
160 """bookmarks and their associated changeset"""
160 """bookmarks and their associated changeset"""
161 marks = repo._bookmarks
161 marks = repo._bookmarks
162 if not len(marks):
162 if not len(marks):
163 # This is a bit hacky. Ideally, templates would have a way to
163 # This is a bit hacky. Ideally, templates would have a way to
164 # specify an empty output, but we shouldn't corrupt JSON while
164 # specify an empty output, but we shouldn't corrupt JSON while
165 # waiting for this functionality.
165 # waiting for this functionality.
166 if not isinstance(fm, formatter.jsonformatter):
166 if not isinstance(fm, formatter.jsonformatter):
167 ui.write(_('(no bookmarks set)\n'))
167 ui.write(_('(no bookmarks set)\n'))
168 return
168 return
169
169
170 revs = [repo[node].rev() for node in marks.values()]
170 revs = [repo[node].rev() for node in marks.values()]
171 active = repo._activebookmark
171 active = repo._activebookmark
172 longestname = max(len(b) for b in marks)
172 longestname = max(len(b) for b in marks)
173 nodelen = longestshortest(repo, revs)
173 nodelen = longestshortest(repo, revs)
174
174
175 for bm, node in sorted(marks.items()):
175 for bm, node in sorted(marks.items()):
176 fm.startitem()
176 fm.startitem()
177 fm.context(ctx=repo[node])
177 fm.context(ctx=repo[node])
178 fm.write('bookmark', '%s', bm)
178 fm.write('bookmark', '%s', bm)
179 fm.write('node', fm.hexfunc(node), fm.hexfunc(node))
179 fm.write('node', fm.hexfunc(node), fm.hexfunc(node))
180 fm.data(active=bm == active,
180 fm.data(active=bm == active,
181 longestbookmarklen=longestname,
181 longestbookmarklen=longestname,
182 nodelen=nodelen)
182 nodelen=nodelen)
183
183
184 @showview('stack', csettopic='stack')
184 @showview('stack', csettopic='stack')
185 def showstack(ui, repo, displayer):
185 def showstack(ui, repo, displayer):
186 """current line of work"""
186 """current line of work"""
187 wdirctx = repo['.']
187 wdirctx = repo['.']
188 if wdirctx.rev() == nullrev:
188 if wdirctx.rev() == nullrev:
189 raise error.Abort(_('stack view only available when there is a '
189 raise error.Abort(_('stack view only available when there is a '
190 'working directory'))
190 'working directory'))
191
191
192 if wdirctx.phase() == phases.public:
192 if wdirctx.phase() == phases.public:
193 ui.write(_('(empty stack; working directory parent is a published '
193 ui.write(_('(empty stack; working directory parent is a published '
194 'changeset)\n'))
194 'changeset)\n'))
195 return
195 return
196
196
197 # TODO extract "find stack" into a function to facilitate
197 # TODO extract "find stack" into a function to facilitate
198 # customization and reuse.
198 # customization and reuse.
199
199
200 baserev = destutil.stackbase(ui, repo)
200 baserev = destutil.stackbase(ui, repo)
201 basectx = None
201 basectx = None
202
202
203 if baserev is None:
203 if baserev is None:
204 baserev = wdirctx.rev()
204 baserev = wdirctx.rev()
205 stackrevs = {wdirctx.rev()}
205 stackrevs = {wdirctx.rev()}
206 else:
206 else:
207 stackrevs = set(repo.revs('%d::.', baserev))
207 stackrevs = set(repo.revs('%d::.', baserev))
208
208
209 ctx = repo[baserev]
209 ctx = repo[baserev]
210 if ctx.p1().rev() != nullrev:
210 if ctx.p1().rev() != nullrev:
211 basectx = ctx.p1()
211 basectx = ctx.p1()
212
212
213 # And relevant descendants.
213 # And relevant descendants.
214 branchpointattip = False
214 branchpointattip = False
215 cl = repo.changelog
215 cl = repo.changelog
216
216
217 for rev in cl.descendants([wdirctx.rev()]):
217 for rev in cl.descendants([wdirctx.rev()]):
218 ctx = repo[rev]
218 ctx = repo[rev]
219
219
220 # Will only happen if . is public.
220 # Will only happen if . is public.
221 if ctx.phase() == phases.public:
221 if ctx.phase() == phases.public:
222 break
222 break
223
223
224 stackrevs.add(ctx.rev())
224 stackrevs.add(ctx.rev())
225
225
226 # ctx.children() within a function iterating on descandants
226 # ctx.children() within a function iterating on descandants
227 # potentially has severe performance concerns because revlog.children()
227 # potentially has severe performance concerns because revlog.children()
228 # iterates over all revisions after ctx's node. However, the number of
228 # iterates over all revisions after ctx's node. However, the number of
229 # draft changesets should be a reasonably small number. So even if
229 # draft changesets should be a reasonably small number. So even if
230 # this is quadratic, the perf impact should be minimal.
230 # this is quadratic, the perf impact should be minimal.
231 if len(ctx.children()) > 1:
231 if len(ctx.children()) > 1:
232 branchpointattip = True
232 branchpointattip = True
233 break
233 break
234
234
235 stackrevs = list(sorted(stackrevs, reverse=True))
235 stackrevs = list(sorted(stackrevs, reverse=True))
236
236
237 # Find likely target heads for the current stack. These are likely
237 # Find likely target heads for the current stack. These are likely
238 # merge or rebase targets.
238 # merge or rebase targets.
239 if basectx:
239 if basectx:
240 # TODO make this customizable?
240 # TODO make this customizable?
241 newheads = set(repo.revs('heads(%d::) - %ld - not public()',
241 newheads = set(repo.revs('heads(%d::) - %ld - not public()',
242 basectx.rev(), stackrevs))
242 basectx.rev(), stackrevs))
243 else:
243 else:
244 newheads = set()
244 newheads = set()
245
245
246 allrevs = set(stackrevs) | newheads | set([baserev])
246 allrevs = set(stackrevs) | newheads | set([baserev])
247 nodelen = longestshortest(repo, allrevs)
247 nodelen = longestshortest(repo, allrevs)
248
248
249 try:
249 try:
250 cmdutil.findcmd('rebase', commands.table)
250 cmdutil.findcmd('rebase', commands.table)
251 haverebase = True
251 haverebase = True
252 except (error.AmbiguousCommand, error.UnknownCommand):
252 except (error.AmbiguousCommand, error.UnknownCommand):
253 haverebase = False
253 haverebase = False
254
254
255 # TODO use templating.
255 # TODO use templating.
256 # TODO consider using graphmod. But it may not be necessary given
256 # TODO consider using graphmod. But it may not be necessary given
257 # our simplicity and the customizations required.
257 # our simplicity and the customizations required.
258 # TODO use proper graph symbols from graphmod
258 # TODO use proper graph symbols from graphmod
259
259
260 tres = formatter.templateresources(ui, repo)
260 tres = formatter.templateresources(ui, repo)
261 shortesttmpl = formatter.maketemplater(ui, '{shortest(node, %d)}' % nodelen,
261 shortesttmpl = formatter.maketemplater(ui, '{shortest(node, %d)}' % nodelen,
262 resources=tres)
262 resources=tres)
263 def shortest(ctx):
263 def shortest(ctx):
264 return shortesttmpl.renderdefault({'ctx': ctx, 'node': ctx.hex()})
264 return shortesttmpl.renderdefault({'ctx': ctx, 'node': ctx.hex()})
265
265
266 # We write out new heads to aid in DAG awareness and to help with decision
266 # We write out new heads to aid in DAG awareness and to help with decision
267 # making on how the stack should be reconciled with commits made since the
267 # making on how the stack should be reconciled with commits made since the
268 # branch point.
268 # branch point.
269 if newheads:
269 if newheads:
270 # Calculate distance from base so we can render the count and so we can
270 # Calculate distance from base so we can render the count and so we can
271 # sort display order by commit distance.
271 # sort display order by commit distance.
272 revdistance = {}
272 revdistance = {}
273 for head in newheads:
273 for head in newheads:
274 # There is some redundancy in DAG traversal here and therefore
274 # There is some redundancy in DAG traversal here and therefore
275 # room to optimize.
275 # room to optimize.
276 ancestors = cl.ancestors([head], stoprev=basectx.rev())
276 ancestors = cl.ancestors([head], stoprev=basectx.rev())
277 revdistance[head] = len(list(ancestors))
277 revdistance[head] = len(list(ancestors))
278
278
279 sourcectx = repo[stackrevs[-1]]
279 sourcectx = repo[stackrevs[-1]]
280
280
281 sortedheads = sorted(newheads, key=lambda x: revdistance[x],
281 sortedheads = sorted(newheads, key=lambda x: revdistance[x],
282 reverse=True)
282 reverse=True)
283
283
284 for i, rev in enumerate(sortedheads):
284 for i, rev in enumerate(sortedheads):
285 ctx = repo[rev]
285 ctx = repo[rev]
286
286
287 if i:
287 if i:
288 ui.write(': ')
288 ui.write(': ')
289 else:
289 else:
290 ui.write(' ')
290 ui.write(' ')
291
291
292 ui.write(('o '))
292 ui.write(('o '))
293 displayer.show(ctx, nodelen=nodelen)
293 displayer.show(ctx, nodelen=nodelen)
294 displayer.flush(ctx)
294 displayer.flush(ctx)
295 ui.write('\n')
295 ui.write('\n')
296
296
297 if i:
297 if i:
298 ui.write(':/')
298 ui.write(':/')
299 else:
299 else:
300 ui.write(' /')
300 ui.write(' /')
301
301
302 ui.write(' (')
302 ui.write(' (')
303 ui.write(_('%d commits ahead') % revdistance[rev],
303 ui.write(_('%d commits ahead') % revdistance[rev],
304 label='stack.commitdistance')
304 label='stack.commitdistance')
305
305
306 if haverebase:
306 if haverebase:
307 # TODO may be able to omit --source in some scenarios
307 # TODO may be able to omit --source in some scenarios
308 ui.write('; ')
308 ui.write('; ')
309 ui.write(('hg rebase --source %s --dest %s' % (
309 ui.write(('hg rebase --source %s --dest %s' % (
310 shortest(sourcectx), shortest(ctx))),
310 shortest(sourcectx), shortest(ctx))),
311 label='stack.rebasehint')
311 label='stack.rebasehint')
312
312
313 ui.write(')\n')
313 ui.write(')\n')
314
314
315 ui.write(':\n: ')
315 ui.write(':\n: ')
316 ui.write(_('(stack head)\n'), label='stack.label')
316 ui.write(_('(stack head)\n'), label='stack.label')
317
317
318 if branchpointattip:
318 if branchpointattip:
319 ui.write(' \\ / ')
319 ui.write(' \\ / ')
320 ui.write(_('(multiple children)\n'), label='stack.label')
320 ui.write(_('(multiple children)\n'), label='stack.label')
321 ui.write(' |\n')
321 ui.write(' |\n')
322
322
323 for rev in stackrevs:
323 for rev in stackrevs:
324 ctx = repo[rev]
324 ctx = repo[rev]
325 symbol = '@' if rev == wdirctx.rev() else 'o'
325 symbol = '@' if rev == wdirctx.rev() else 'o'
326
326
327 if newheads:
327 if newheads:
328 ui.write(': ')
328 ui.write(': ')
329 else:
329 else:
330 ui.write(' ')
330 ui.write(' ')
331
331
332 ui.write(symbol, ' ')
332 ui.write(symbol, ' ')
333 displayer.show(ctx, nodelen=nodelen)
333 displayer.show(ctx, nodelen=nodelen)
334 displayer.flush(ctx)
334 displayer.flush(ctx)
335 ui.write('\n')
335 ui.write('\n')
336
336
337 # TODO display histedit hint?
337 # TODO display histedit hint?
338
338
339 if basectx:
339 if basectx:
340 # Vertically and horizontally separate stack base from parent
340 # Vertically and horizontally separate stack base from parent
341 # to reinforce stack boundary.
341 # to reinforce stack boundary.
342 if newheads:
342 if newheads:
343 ui.write(':/ ')
343 ui.write(':/ ')
344 else:
344 else:
345 ui.write(' / ')
345 ui.write(' / ')
346
346
347 ui.write(_('(stack base)'), '\n', label='stack.label')
347 ui.write(_('(stack base)'), '\n', label='stack.label')
348 ui.write(('o '))
348 ui.write(('o '))
349
349
350 displayer.show(basectx, nodelen=nodelen)
350 displayer.show(basectx, nodelen=nodelen)
351 displayer.flush(basectx)
351 displayer.flush(basectx)
352 ui.write('\n')
352 ui.write('\n')
353
353
354 @revsetpredicate('_underway([commitage[, headage]])')
354 @revsetpredicate('_underway([commitage[, headage]])')
355 def underwayrevset(repo, subset, x):
355 def underwayrevset(repo, subset, x):
356 args = revset.getargsdict(x, 'underway', 'commitage headage')
356 args = revset.getargsdict(x, 'underway', 'commitage headage')
357 if 'commitage' not in args:
357 if 'commitage' not in args:
358 args['commitage'] = None
358 args['commitage'] = None
359 if 'headage' not in args:
359 if 'headage' not in args:
360 args['headage'] = None
360 args['headage'] = None
361
361
362 # We assume callers of this revset add a topographical sort on the
362 # We assume callers of this revset add a topographical sort on the
363 # result. This means there is no benefit to making the revset lazy
363 # result. This means there is no benefit to making the revset lazy
364 # since the topographical sort needs to consume all revs.
364 # since the topographical sort needs to consume all revs.
365 #
365 #
366 # With this in mind, we build up the set manually instead of constructing
366 # With this in mind, we build up the set manually instead of constructing
367 # a complex revset. This enables faster execution.
367 # a complex revset. This enables faster execution.
368
368
369 # Mutable changesets (non-public) are the most important changesets
369 # Mutable changesets (non-public) are the most important changesets
370 # to return. ``not public()`` will also pull in obsolete changesets if
370 # to return. ``not public()`` will also pull in obsolete changesets if
371 # there is a non-obsolete changeset with obsolete ancestors. This is
371 # there is a non-obsolete changeset with obsolete ancestors. This is
372 # why we exclude obsolete changesets from this query.
372 # why we exclude obsolete changesets from this query.
373 rs = 'not public() and not obsolete()'
373 rs = 'not public() and not obsolete()'
374 rsargs = []
374 rsargs = []
375 if args['commitage']:
375 if args['commitage']:
376 rs += ' and date(%s)'
376 rs += ' and date(%s)'
377 rsargs.append(revsetlang.getstring(args['commitage'],
377 rsargs.append(revsetlang.getstring(args['commitage'],
378 _('commitage requires a string')))
378 _('commitage requires a string')))
379
379
380 mutable = repo.revs(rs, *rsargs)
380 mutable = repo.revs(rs, *rsargs)
381 relevant = revset.baseset(mutable)
381 relevant = revset.baseset(mutable)
382
382
383 # Add parents of mutable changesets to provide context.
383 # Add parents of mutable changesets to provide context.
384 relevant += repo.revs('parents(%ld)', mutable)
384 relevant += repo.revs('parents(%ld)', mutable)
385
385
386 # We also pull in (public) heads if they a) aren't closing a branch
386 # We also pull in (public) heads if they a) aren't closing a branch
387 # b) are recent.
387 # b) are recent.
388 rs = 'head() and not closed()'
388 rs = 'head() and not closed()'
389 rsargs = []
389 rsargs = []
390 if args['headage']:
390 if args['headage']:
391 rs += ' and date(%s)'
391 rs += ' and date(%s)'
392 rsargs.append(revsetlang.getstring(args['headage'],
392 rsargs.append(revsetlang.getstring(args['headage'],
393 _('headage requires a string')))
393 _('headage requires a string')))
394
394
395 relevant += repo.revs(rs, *rsargs)
395 relevant += repo.revs(rs, *rsargs)
396
396
397 # Add working directory parent.
397 # Add working directory parent.
398 wdirrev = repo['.'].rev()
398 wdirrev = repo['.'].rev()
399 if wdirrev != nullrev:
399 if wdirrev != nullrev:
400 relevant += revset.baseset({wdirrev})
400 relevant += revset.baseset({wdirrev})
401
401
402 return subset & relevant
402 return subset & relevant
403
403
404 @showview('work', csettopic='work')
404 @showview('work', csettopic='work')
405 def showwork(ui, repo, displayer):
405 def showwork(ui, repo, displayer):
406 """changesets that aren't finished"""
406 """changesets that aren't finished"""
407 # TODO support date-based limiting when calling revset.
407 # TODO support date-based limiting when calling revset.
408 revs = repo.revs('sort(_underway(), topo)')
408 revs = repo.revs('sort(_underway(), topo)')
409 nodelen = longestshortest(repo, revs)
409 nodelen = longestshortest(repo, revs)
410
410
411 revdag = graphmod.dagwalker(repo, revs)
411 revdag = graphmod.dagwalker(repo, revs)
412
412
413 ui.setconfig('experimental', 'graphshorten', True)
413 ui.setconfig('experimental', 'graphshorten', True)
414 logcmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges,
414 logcmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges,
415 props={'nodelen': nodelen})
415 props={'nodelen': nodelen})
416
416
417 def extsetup(ui):
417 def extsetup(ui):
418 # Alias `hg <prefix><view>` to `hg show <view>`.
418 # Alias `hg <prefix><view>` to `hg show <view>`.
419 for prefix in ui.configlist('commands', 'show.aliasprefix'):
419 for prefix in ui.configlist('commands', 'show.aliasprefix'):
420 for view in showview._table:
420 for view in showview._table:
421 name = '%s%s' % (prefix, view)
421 name = '%s%s' % (prefix, view)
422
422
423 choice, allcommands = cmdutil.findpossible(name, commands.table,
423 choice, allcommands = cmdutil.findpossible(name, commands.table,
424 strict=True)
424 strict=True)
425
425
426 # This alias is already a command name. Don't set it.
426 # This alias is already a command name. Don't set it.
427 if name in choice:
427 if name in choice:
428 continue
428 continue
429
429
430 # Same for aliases.
430 # Same for aliases.
431 if ui.config('alias', name, None):
431 if ui.config('alias', name, None):
432 continue
432 continue
433
433
434 ui.setconfig('alias', name, 'show %s' % view, source='show')
434 ui.setconfig('alias', name, 'show %s' % view, source='show')
435
435
436 def longestshortest(repo, revs, minlen=4):
436 def longestshortest(repo, revs, minlen=4):
437 """Return the length of the longest shortest node to identify revisions.
437 """Return the length of the longest shortest node to identify revisions.
438
438
439 The result of this function can be used with the ``shortest()`` template
439 The result of this function can be used with the ``shortest()`` template
440 function to ensure that a value is unique and unambiguous for a given
440 function to ensure that a value is unique and unambiguous for a given
441 set of nodes.
441 set of nodes.
442
442
443 The number of revisions in the repo is taken into account to prevent
443 The number of revisions in the repo is taken into account to prevent
444 a numeric node prefix from conflicting with an integer revision number.
444 a numeric node prefix from conflicting with an integer revision number.
445 If we fail to do this, a value of e.g. ``10023`` could mean either
445 If we fail to do this, a value of e.g. ``10023`` could mean either
446 revision 10023 or node ``10023abc...``.
446 revision 10023 or node ``10023abc...``.
447 """
447 """
448 if not revs:
448 if not revs:
449 return minlen
449 return minlen
450 # don't use filtered repo because it's slow. see templater.shortest().
451 cl = repo.changelog
450 cl = repo.changelog
452 return max(len(scmutil.shortesthexnodeidprefix(repo.unfiltered(),
451 return max(len(scmutil.shortesthexnodeidprefix(repo, hex(cl.node(r)),
453 hex(cl.node(r)),
454 minlen)) for r in revs)
452 minlen)) for r in revs)
455
453
456 # Adjust the docstring of the show command so it shows all registered views.
454 # Adjust the docstring of the show command so it shows all registered views.
457 # This is a bit hacky because it runs at the end of module load. When moved
455 # This is a bit hacky because it runs at the end of module load. When moved
458 # into core or when another extension wants to provide a view, we'll need
456 # into core or when another extension wants to provide a view, we'll need
459 # to do this more robustly.
457 # to do this more robustly.
460 # TODO make this more robust.
458 # TODO make this more robust.
461 def _updatedocstring():
459 def _updatedocstring():
462 longest = max(map(len, showview._table.keys()))
460 longest = max(map(len, showview._table.keys()))
463 entries = []
461 entries = []
464 for key in sorted(showview._table.keys()):
462 for key in sorted(showview._table.keys()):
465 entries.append(pycompat.sysstr(' %s %s' % (
463 entries.append(pycompat.sysstr(' %s %s' % (
466 key.ljust(longest), showview._table[key]._origdoc)))
464 key.ljust(longest), showview._table[key]._origdoc)))
467
465
468 cmdtable['show'][0].__doc__ = pycompat.sysstr('%s\n\n%s\n ') % (
466 cmdtable['show'][0].__doc__ = pycompat.sysstr('%s\n\n%s\n ') % (
469 cmdtable['show'][0].__doc__.rstrip(),
467 cmdtable['show'][0].__doc__.rstrip(),
470 pycompat.sysstr('\n\n').join(entries))
468 pycompat.sysstr('\n\n').join(entries))
471
469
472 _updatedocstring()
470 _updatedocstring()
@@ -1,1552 +1,1555 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28
28
29 from . import (
29 from . import (
30 encoding,
30 encoding,
31 error,
31 error,
32 match as matchmod,
32 match as matchmod,
33 obsolete,
33 obsolete,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 revsetlang,
38 revsetlang,
39 similar,
39 similar,
40 url,
40 url,
41 util,
41 util,
42 vfs,
42 vfs,
43 )
43 )
44
44
45 from .utils import (
45 from .utils import (
46 procutil,
46 procutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 if pycompat.iswindows:
50 if pycompat.iswindows:
51 from . import scmwindows as scmplatform
51 from . import scmwindows as scmplatform
52 else:
52 else:
53 from . import scmposix as scmplatform
53 from . import scmposix as scmplatform
54
54
55 termsize = scmplatform.termsize
55 termsize = scmplatform.termsize
56
56
57 class status(tuple):
57 class status(tuple):
58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
59 and 'ignored' properties are only relevant to the working copy.
59 and 'ignored' properties are only relevant to the working copy.
60 '''
60 '''
61
61
62 __slots__ = ()
62 __slots__ = ()
63
63
64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
65 clean):
65 clean):
66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
67 ignored, clean))
67 ignored, clean))
68
68
69 @property
69 @property
70 def modified(self):
70 def modified(self):
71 '''files that have been modified'''
71 '''files that have been modified'''
72 return self[0]
72 return self[0]
73
73
74 @property
74 @property
75 def added(self):
75 def added(self):
76 '''files that have been added'''
76 '''files that have been added'''
77 return self[1]
77 return self[1]
78
78
79 @property
79 @property
80 def removed(self):
80 def removed(self):
81 '''files that have been removed'''
81 '''files that have been removed'''
82 return self[2]
82 return self[2]
83
83
84 @property
84 @property
85 def deleted(self):
85 def deleted(self):
86 '''files that are in the dirstate, but have been deleted from the
86 '''files that are in the dirstate, but have been deleted from the
87 working copy (aka "missing")
87 working copy (aka "missing")
88 '''
88 '''
89 return self[3]
89 return self[3]
90
90
91 @property
91 @property
92 def unknown(self):
92 def unknown(self):
93 '''files not in the dirstate that are not ignored'''
93 '''files not in the dirstate that are not ignored'''
94 return self[4]
94 return self[4]
95
95
96 @property
96 @property
97 def ignored(self):
97 def ignored(self):
98 '''files not in the dirstate that are ignored (by _dirignore())'''
98 '''files not in the dirstate that are ignored (by _dirignore())'''
99 return self[5]
99 return self[5]
100
100
101 @property
101 @property
102 def clean(self):
102 def clean(self):
103 '''files that have not been modified'''
103 '''files that have not been modified'''
104 return self[6]
104 return self[6]
105
105
106 def __repr__(self, *args, **kwargs):
106 def __repr__(self, *args, **kwargs):
107 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
107 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
108 'unknown=%r, ignored=%r, clean=%r>') % self)
108 'unknown=%r, ignored=%r, clean=%r>') % self)
109
109
110 def itersubrepos(ctx1, ctx2):
110 def itersubrepos(ctx1, ctx2):
111 """find subrepos in ctx1 or ctx2"""
111 """find subrepos in ctx1 or ctx2"""
112 # Create a (subpath, ctx) mapping where we prefer subpaths from
112 # Create a (subpath, ctx) mapping where we prefer subpaths from
113 # ctx1. The subpaths from ctx2 are important when the .hgsub file
113 # ctx1. The subpaths from ctx2 are important when the .hgsub file
114 # has been modified (in ctx2) but not yet committed (in ctx1).
114 # has been modified (in ctx2) but not yet committed (in ctx1).
115 subpaths = dict.fromkeys(ctx2.substate, ctx2)
115 subpaths = dict.fromkeys(ctx2.substate, ctx2)
116 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
116 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
117
117
118 missing = set()
118 missing = set()
119
119
120 for subpath in ctx2.substate:
120 for subpath in ctx2.substate:
121 if subpath not in ctx1.substate:
121 if subpath not in ctx1.substate:
122 del subpaths[subpath]
122 del subpaths[subpath]
123 missing.add(subpath)
123 missing.add(subpath)
124
124
125 for subpath, ctx in sorted(subpaths.iteritems()):
125 for subpath, ctx in sorted(subpaths.iteritems()):
126 yield subpath, ctx.sub(subpath)
126 yield subpath, ctx.sub(subpath)
127
127
128 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
128 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
129 # status and diff will have an accurate result when it does
129 # status and diff will have an accurate result when it does
130 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
130 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
131 # against itself.
131 # against itself.
132 for subpath in missing:
132 for subpath in missing:
133 yield subpath, ctx2.nullsub(subpath, ctx1)
133 yield subpath, ctx2.nullsub(subpath, ctx1)
134
134
135 def nochangesfound(ui, repo, excluded=None):
135 def nochangesfound(ui, repo, excluded=None):
136 '''Report no changes for push/pull, excluded is None or a list of
136 '''Report no changes for push/pull, excluded is None or a list of
137 nodes excluded from the push/pull.
137 nodes excluded from the push/pull.
138 '''
138 '''
139 secretlist = []
139 secretlist = []
140 if excluded:
140 if excluded:
141 for n in excluded:
141 for n in excluded:
142 ctx = repo[n]
142 ctx = repo[n]
143 if ctx.phase() >= phases.secret and not ctx.extinct():
143 if ctx.phase() >= phases.secret and not ctx.extinct():
144 secretlist.append(n)
144 secretlist.append(n)
145
145
146 if secretlist:
146 if secretlist:
147 ui.status(_("no changes found (ignored %d secret changesets)\n")
147 ui.status(_("no changes found (ignored %d secret changesets)\n")
148 % len(secretlist))
148 % len(secretlist))
149 else:
149 else:
150 ui.status(_("no changes found\n"))
150 ui.status(_("no changes found\n"))
151
151
152 def callcatch(ui, func):
152 def callcatch(ui, func):
153 """call func() with global exception handling
153 """call func() with global exception handling
154
154
155 return func() if no exception happens. otherwise do some error handling
155 return func() if no exception happens. otherwise do some error handling
156 and return an exit code accordingly. does not handle all exceptions.
156 and return an exit code accordingly. does not handle all exceptions.
157 """
157 """
158 try:
158 try:
159 try:
159 try:
160 return func()
160 return func()
161 except: # re-raises
161 except: # re-raises
162 ui.traceback()
162 ui.traceback()
163 raise
163 raise
164 # Global exception handling, alphabetically
164 # Global exception handling, alphabetically
165 # Mercurial-specific first, followed by built-in and library exceptions
165 # Mercurial-specific first, followed by built-in and library exceptions
166 except error.LockHeld as inst:
166 except error.LockHeld as inst:
167 if inst.errno == errno.ETIMEDOUT:
167 if inst.errno == errno.ETIMEDOUT:
168 reason = _('timed out waiting for lock held by %r') % inst.locker
168 reason = _('timed out waiting for lock held by %r') % inst.locker
169 else:
169 else:
170 reason = _('lock held by %r') % inst.locker
170 reason = _('lock held by %r') % inst.locker
171 ui.warn(_("abort: %s: %s\n")
171 ui.warn(_("abort: %s: %s\n")
172 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
172 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
173 if not inst.locker:
173 if not inst.locker:
174 ui.warn(_("(lock might be very busy)\n"))
174 ui.warn(_("(lock might be very busy)\n"))
175 except error.LockUnavailable as inst:
175 except error.LockUnavailable as inst:
176 ui.warn(_("abort: could not lock %s: %s\n") %
176 ui.warn(_("abort: could not lock %s: %s\n") %
177 (inst.desc or stringutil.forcebytestr(inst.filename),
177 (inst.desc or stringutil.forcebytestr(inst.filename),
178 encoding.strtolocal(inst.strerror)))
178 encoding.strtolocal(inst.strerror)))
179 except error.OutOfBandError as inst:
179 except error.OutOfBandError as inst:
180 if inst.args:
180 if inst.args:
181 msg = _("abort: remote error:\n")
181 msg = _("abort: remote error:\n")
182 else:
182 else:
183 msg = _("abort: remote error\n")
183 msg = _("abort: remote error\n")
184 ui.warn(msg)
184 ui.warn(msg)
185 if inst.args:
185 if inst.args:
186 ui.warn(''.join(inst.args))
186 ui.warn(''.join(inst.args))
187 if inst.hint:
187 if inst.hint:
188 ui.warn('(%s)\n' % inst.hint)
188 ui.warn('(%s)\n' % inst.hint)
189 except error.RepoError as inst:
189 except error.RepoError as inst:
190 ui.warn(_("abort: %s!\n") % inst)
190 ui.warn(_("abort: %s!\n") % inst)
191 if inst.hint:
191 if inst.hint:
192 ui.warn(_("(%s)\n") % inst.hint)
192 ui.warn(_("(%s)\n") % inst.hint)
193 except error.ResponseError as inst:
193 except error.ResponseError as inst:
194 ui.warn(_("abort: %s") % inst.args[0])
194 ui.warn(_("abort: %s") % inst.args[0])
195 msg = inst.args[1]
195 msg = inst.args[1]
196 if isinstance(msg, type(u'')):
196 if isinstance(msg, type(u'')):
197 msg = pycompat.sysbytes(msg)
197 msg = pycompat.sysbytes(msg)
198 if not isinstance(msg, bytes):
198 if not isinstance(msg, bytes):
199 ui.warn(" %r\n" % (msg,))
199 ui.warn(" %r\n" % (msg,))
200 elif not msg:
200 elif not msg:
201 ui.warn(_(" empty string\n"))
201 ui.warn(_(" empty string\n"))
202 else:
202 else:
203 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
203 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
204 except error.CensoredNodeError as inst:
204 except error.CensoredNodeError as inst:
205 ui.warn(_("abort: file censored %s!\n") % inst)
205 ui.warn(_("abort: file censored %s!\n") % inst)
206 except error.RevlogError as inst:
206 except error.RevlogError as inst:
207 ui.warn(_("abort: %s!\n") % inst)
207 ui.warn(_("abort: %s!\n") % inst)
208 except error.InterventionRequired as inst:
208 except error.InterventionRequired as inst:
209 ui.warn("%s\n" % inst)
209 ui.warn("%s\n" % inst)
210 if inst.hint:
210 if inst.hint:
211 ui.warn(_("(%s)\n") % inst.hint)
211 ui.warn(_("(%s)\n") % inst.hint)
212 return 1
212 return 1
213 except error.WdirUnsupported:
213 except error.WdirUnsupported:
214 ui.warn(_("abort: working directory revision cannot be specified\n"))
214 ui.warn(_("abort: working directory revision cannot be specified\n"))
215 except error.Abort as inst:
215 except error.Abort as inst:
216 ui.warn(_("abort: %s\n") % inst)
216 ui.warn(_("abort: %s\n") % inst)
217 if inst.hint:
217 if inst.hint:
218 ui.warn(_("(%s)\n") % inst.hint)
218 ui.warn(_("(%s)\n") % inst.hint)
219 except ImportError as inst:
219 except ImportError as inst:
220 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
220 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
221 m = stringutil.forcebytestr(inst).split()[-1]
221 m = stringutil.forcebytestr(inst).split()[-1]
222 if m in "mpatch bdiff".split():
222 if m in "mpatch bdiff".split():
223 ui.warn(_("(did you forget to compile extensions?)\n"))
223 ui.warn(_("(did you forget to compile extensions?)\n"))
224 elif m in "zlib".split():
224 elif m in "zlib".split():
225 ui.warn(_("(is your Python install correct?)\n"))
225 ui.warn(_("(is your Python install correct?)\n"))
226 except IOError as inst:
226 except IOError as inst:
227 if util.safehasattr(inst, "code"):
227 if util.safehasattr(inst, "code"):
228 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
228 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
229 elif util.safehasattr(inst, "reason"):
229 elif util.safehasattr(inst, "reason"):
230 try: # usually it is in the form (errno, strerror)
230 try: # usually it is in the form (errno, strerror)
231 reason = inst.reason.args[1]
231 reason = inst.reason.args[1]
232 except (AttributeError, IndexError):
232 except (AttributeError, IndexError):
233 # it might be anything, for example a string
233 # it might be anything, for example a string
234 reason = inst.reason
234 reason = inst.reason
235 if isinstance(reason, unicode):
235 if isinstance(reason, unicode):
236 # SSLError of Python 2.7.9 contains a unicode
236 # SSLError of Python 2.7.9 contains a unicode
237 reason = encoding.unitolocal(reason)
237 reason = encoding.unitolocal(reason)
238 ui.warn(_("abort: error: %s\n") % reason)
238 ui.warn(_("abort: error: %s\n") % reason)
239 elif (util.safehasattr(inst, "args")
239 elif (util.safehasattr(inst, "args")
240 and inst.args and inst.args[0] == errno.EPIPE):
240 and inst.args and inst.args[0] == errno.EPIPE):
241 pass
241 pass
242 elif getattr(inst, "strerror", None):
242 elif getattr(inst, "strerror", None):
243 if getattr(inst, "filename", None):
243 if getattr(inst, "filename", None):
244 ui.warn(_("abort: %s: %s\n") % (
244 ui.warn(_("abort: %s: %s\n") % (
245 encoding.strtolocal(inst.strerror),
245 encoding.strtolocal(inst.strerror),
246 stringutil.forcebytestr(inst.filename)))
246 stringutil.forcebytestr(inst.filename)))
247 else:
247 else:
248 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
248 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
249 else:
249 else:
250 raise
250 raise
251 except OSError as inst:
251 except OSError as inst:
252 if getattr(inst, "filename", None) is not None:
252 if getattr(inst, "filename", None) is not None:
253 ui.warn(_("abort: %s: '%s'\n") % (
253 ui.warn(_("abort: %s: '%s'\n") % (
254 encoding.strtolocal(inst.strerror),
254 encoding.strtolocal(inst.strerror),
255 stringutil.forcebytestr(inst.filename)))
255 stringutil.forcebytestr(inst.filename)))
256 else:
256 else:
257 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
257 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 except MemoryError:
258 except MemoryError:
259 ui.warn(_("abort: out of memory\n"))
259 ui.warn(_("abort: out of memory\n"))
260 except SystemExit as inst:
260 except SystemExit as inst:
261 # Commands shouldn't sys.exit directly, but give a return code.
261 # Commands shouldn't sys.exit directly, but give a return code.
262 # Just in case catch this and and pass exit code to caller.
262 # Just in case catch this and and pass exit code to caller.
263 return inst.code
263 return inst.code
264 except socket.error as inst:
264 except socket.error as inst:
265 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
265 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
266
266
267 return -1
267 return -1
268
268
269 def checknewlabel(repo, lbl, kind):
269 def checknewlabel(repo, lbl, kind):
270 # Do not use the "kind" parameter in ui output.
270 # Do not use the "kind" parameter in ui output.
271 # It makes strings difficult to translate.
271 # It makes strings difficult to translate.
272 if lbl in ['tip', '.', 'null']:
272 if lbl in ['tip', '.', 'null']:
273 raise error.Abort(_("the name '%s' is reserved") % lbl)
273 raise error.Abort(_("the name '%s' is reserved") % lbl)
274 for c in (':', '\0', '\n', '\r'):
274 for c in (':', '\0', '\n', '\r'):
275 if c in lbl:
275 if c in lbl:
276 raise error.Abort(
276 raise error.Abort(
277 _("%r cannot be used in a name") % pycompat.bytestr(c))
277 _("%r cannot be used in a name") % pycompat.bytestr(c))
278 try:
278 try:
279 int(lbl)
279 int(lbl)
280 raise error.Abort(_("cannot use an integer as a name"))
280 raise error.Abort(_("cannot use an integer as a name"))
281 except ValueError:
281 except ValueError:
282 pass
282 pass
283 if lbl.strip() != lbl:
283 if lbl.strip() != lbl:
284 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
284 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
285
285
286 def checkfilename(f):
286 def checkfilename(f):
287 '''Check that the filename f is an acceptable filename for a tracked file'''
287 '''Check that the filename f is an acceptable filename for a tracked file'''
288 if '\r' in f or '\n' in f:
288 if '\r' in f or '\n' in f:
289 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
289 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
290
290
291 def checkportable(ui, f):
291 def checkportable(ui, f):
292 '''Check if filename f is portable and warn or abort depending on config'''
292 '''Check if filename f is portable and warn or abort depending on config'''
293 checkfilename(f)
293 checkfilename(f)
294 abort, warn = checkportabilityalert(ui)
294 abort, warn = checkportabilityalert(ui)
295 if abort or warn:
295 if abort or warn:
296 msg = util.checkwinfilename(f)
296 msg = util.checkwinfilename(f)
297 if msg:
297 if msg:
298 msg = "%s: %s" % (msg, procutil.shellquote(f))
298 msg = "%s: %s" % (msg, procutil.shellquote(f))
299 if abort:
299 if abort:
300 raise error.Abort(msg)
300 raise error.Abort(msg)
301 ui.warn(_("warning: %s\n") % msg)
301 ui.warn(_("warning: %s\n") % msg)
302
302
303 def checkportabilityalert(ui):
303 def checkportabilityalert(ui):
304 '''check if the user's config requests nothing, a warning, or abort for
304 '''check if the user's config requests nothing, a warning, or abort for
305 non-portable filenames'''
305 non-portable filenames'''
306 val = ui.config('ui', 'portablefilenames')
306 val = ui.config('ui', 'portablefilenames')
307 lval = val.lower()
307 lval = val.lower()
308 bval = stringutil.parsebool(val)
308 bval = stringutil.parsebool(val)
309 abort = pycompat.iswindows or lval == 'abort'
309 abort = pycompat.iswindows or lval == 'abort'
310 warn = bval or lval == 'warn'
310 warn = bval or lval == 'warn'
311 if bval is None and not (warn or abort or lval == 'ignore'):
311 if bval is None and not (warn or abort or lval == 'ignore'):
312 raise error.ConfigError(
312 raise error.ConfigError(
313 _("ui.portablefilenames value is invalid ('%s')") % val)
313 _("ui.portablefilenames value is invalid ('%s')") % val)
314 return abort, warn
314 return abort, warn
315
315
316 class casecollisionauditor(object):
316 class casecollisionauditor(object):
317 def __init__(self, ui, abort, dirstate):
317 def __init__(self, ui, abort, dirstate):
318 self._ui = ui
318 self._ui = ui
319 self._abort = abort
319 self._abort = abort
320 allfiles = '\0'.join(dirstate._map)
320 allfiles = '\0'.join(dirstate._map)
321 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
321 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
322 self._dirstate = dirstate
322 self._dirstate = dirstate
323 # The purpose of _newfiles is so that we don't complain about
323 # The purpose of _newfiles is so that we don't complain about
324 # case collisions if someone were to call this object with the
324 # case collisions if someone were to call this object with the
325 # same filename twice.
325 # same filename twice.
326 self._newfiles = set()
326 self._newfiles = set()
327
327
328 def __call__(self, f):
328 def __call__(self, f):
329 if f in self._newfiles:
329 if f in self._newfiles:
330 return
330 return
331 fl = encoding.lower(f)
331 fl = encoding.lower(f)
332 if fl in self._loweredfiles and f not in self._dirstate:
332 if fl in self._loweredfiles and f not in self._dirstate:
333 msg = _('possible case-folding collision for %s') % f
333 msg = _('possible case-folding collision for %s') % f
334 if self._abort:
334 if self._abort:
335 raise error.Abort(msg)
335 raise error.Abort(msg)
336 self._ui.warn(_("warning: %s\n") % msg)
336 self._ui.warn(_("warning: %s\n") % msg)
337 self._loweredfiles.add(fl)
337 self._loweredfiles.add(fl)
338 self._newfiles.add(f)
338 self._newfiles.add(f)
339
339
340 def filteredhash(repo, maxrev):
340 def filteredhash(repo, maxrev):
341 """build hash of filtered revisions in the current repoview.
341 """build hash of filtered revisions in the current repoview.
342
342
343 Multiple caches perform up-to-date validation by checking that the
343 Multiple caches perform up-to-date validation by checking that the
344 tiprev and tipnode stored in the cache file match the current repository.
344 tiprev and tipnode stored in the cache file match the current repository.
345 However, this is not sufficient for validating repoviews because the set
345 However, this is not sufficient for validating repoviews because the set
346 of revisions in the view may change without the repository tiprev and
346 of revisions in the view may change without the repository tiprev and
347 tipnode changing.
347 tipnode changing.
348
348
349 This function hashes all the revs filtered from the view and returns
349 This function hashes all the revs filtered from the view and returns
350 that SHA-1 digest.
350 that SHA-1 digest.
351 """
351 """
352 cl = repo.changelog
352 cl = repo.changelog
353 if not cl.filteredrevs:
353 if not cl.filteredrevs:
354 return None
354 return None
355 key = None
355 key = None
356 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
356 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
357 if revs:
357 if revs:
358 s = hashlib.sha1()
358 s = hashlib.sha1()
359 for rev in revs:
359 for rev in revs:
360 s.update('%d;' % rev)
360 s.update('%d;' % rev)
361 key = s.digest()
361 key = s.digest()
362 return key
362 return key
363
363
364 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
364 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
365 '''yield every hg repository under path, always recursively.
365 '''yield every hg repository under path, always recursively.
366 The recurse flag will only control recursion into repo working dirs'''
366 The recurse flag will only control recursion into repo working dirs'''
367 def errhandler(err):
367 def errhandler(err):
368 if err.filename == path:
368 if err.filename == path:
369 raise err
369 raise err
370 samestat = getattr(os.path, 'samestat', None)
370 samestat = getattr(os.path, 'samestat', None)
371 if followsym and samestat is not None:
371 if followsym and samestat is not None:
372 def adddir(dirlst, dirname):
372 def adddir(dirlst, dirname):
373 dirstat = os.stat(dirname)
373 dirstat = os.stat(dirname)
374 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
374 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
375 if not match:
375 if not match:
376 dirlst.append(dirstat)
376 dirlst.append(dirstat)
377 return not match
377 return not match
378 else:
378 else:
379 followsym = False
379 followsym = False
380
380
381 if (seen_dirs is None) and followsym:
381 if (seen_dirs is None) and followsym:
382 seen_dirs = []
382 seen_dirs = []
383 adddir(seen_dirs, path)
383 adddir(seen_dirs, path)
384 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
384 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
385 dirs.sort()
385 dirs.sort()
386 if '.hg' in dirs:
386 if '.hg' in dirs:
387 yield root # found a repository
387 yield root # found a repository
388 qroot = os.path.join(root, '.hg', 'patches')
388 qroot = os.path.join(root, '.hg', 'patches')
389 if os.path.isdir(os.path.join(qroot, '.hg')):
389 if os.path.isdir(os.path.join(qroot, '.hg')):
390 yield qroot # we have a patch queue repo here
390 yield qroot # we have a patch queue repo here
391 if recurse:
391 if recurse:
392 # avoid recursing inside the .hg directory
392 # avoid recursing inside the .hg directory
393 dirs.remove('.hg')
393 dirs.remove('.hg')
394 else:
394 else:
395 dirs[:] = [] # don't descend further
395 dirs[:] = [] # don't descend further
396 elif followsym:
396 elif followsym:
397 newdirs = []
397 newdirs = []
398 for d in dirs:
398 for d in dirs:
399 fname = os.path.join(root, d)
399 fname = os.path.join(root, d)
400 if adddir(seen_dirs, fname):
400 if adddir(seen_dirs, fname):
401 if os.path.islink(fname):
401 if os.path.islink(fname):
402 for hgname in walkrepos(fname, True, seen_dirs):
402 for hgname in walkrepos(fname, True, seen_dirs):
403 yield hgname
403 yield hgname
404 else:
404 else:
405 newdirs.append(d)
405 newdirs.append(d)
406 dirs[:] = newdirs
406 dirs[:] = newdirs
407
407
408 def binnode(ctx):
408 def binnode(ctx):
409 """Return binary node id for a given basectx"""
409 """Return binary node id for a given basectx"""
410 node = ctx.node()
410 node = ctx.node()
411 if node is None:
411 if node is None:
412 return wdirid
412 return wdirid
413 return node
413 return node
414
414
415 def intrev(ctx):
415 def intrev(ctx):
416 """Return integer for a given basectx that can be used in comparison or
416 """Return integer for a given basectx that can be used in comparison or
417 arithmetic operation"""
417 arithmetic operation"""
418 rev = ctx.rev()
418 rev = ctx.rev()
419 if rev is None:
419 if rev is None:
420 return wdirrev
420 return wdirrev
421 return rev
421 return rev
422
422
423 def formatchangeid(ctx):
423 def formatchangeid(ctx):
424 """Format changectx as '{rev}:{node|formatnode}', which is the default
424 """Format changectx as '{rev}:{node|formatnode}', which is the default
425 template provided by logcmdutil.changesettemplater"""
425 template provided by logcmdutil.changesettemplater"""
426 repo = ctx.repo()
426 repo = ctx.repo()
427 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
427 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
428
428
429 def formatrevnode(ui, rev, node):
429 def formatrevnode(ui, rev, node):
430 """Format given revision and node depending on the current verbosity"""
430 """Format given revision and node depending on the current verbosity"""
431 if ui.debugflag:
431 if ui.debugflag:
432 hexfunc = hex
432 hexfunc = hex
433 else:
433 else:
434 hexfunc = short
434 hexfunc = short
435 return '%d:%s' % (rev, hexfunc(node))
435 return '%d:%s' % (rev, hexfunc(node))
436
436
437 def resolvehexnodeidprefix(repo, prefix):
437 def resolvehexnodeidprefix(repo, prefix):
438 # Uses unfiltered repo because it's faster when prefix is ambiguous/
438 # Uses unfiltered repo because it's faster when prefix is ambiguous/
439 # This matches the "shortest" template function.
439 # This matches the shortesthexnodeidprefix() function below.
440 node = repo.unfiltered().changelog._partialmatch(prefix)
440 node = repo.unfiltered().changelog._partialmatch(prefix)
441 if node is None:
441 if node is None:
442 return
442 return
443 repo.changelog.rev(node) # make sure node isn't filtered
443 repo.changelog.rev(node) # make sure node isn't filtered
444 return node
444 return node
445
445
446 def shortesthexnodeidprefix(repo, hexnode, minlength=1):
446 def shortesthexnodeidprefix(repo, hexnode, minlength=1):
447 """Find the shortest unambiguous prefix that matches hexnode."""
447 """Find the shortest unambiguous prefix that matches hexnode."""
448 return repo.changelog.shortest(hexnode, minlength)
448 # _partialmatch() of filtered changelog could take O(len(repo)) time,
449 # which would be unacceptably slow. so we look for hash collision in
450 # unfiltered space, which means some hashes may be slightly longer.
451 return repo.unfiltered().changelog.shortest(hexnode, minlength)
449
452
450 def isrevsymbol(repo, symbol):
453 def isrevsymbol(repo, symbol):
451 """Checks if a symbol exists in the repo.
454 """Checks if a symbol exists in the repo.
452
455
453 See revsymbol() for details. Raises error.LookupError if the symbol is an
456 See revsymbol() for details. Raises error.LookupError if the symbol is an
454 ambiguous nodeid prefix.
457 ambiguous nodeid prefix.
455 """
458 """
456 try:
459 try:
457 revsymbol(repo, symbol)
460 revsymbol(repo, symbol)
458 return True
461 return True
459 except error.RepoLookupError:
462 except error.RepoLookupError:
460 return False
463 return False
461
464
462 def revsymbol(repo, symbol):
465 def revsymbol(repo, symbol):
463 """Returns a context given a single revision symbol (as string).
466 """Returns a context given a single revision symbol (as string).
464
467
465 This is similar to revsingle(), but accepts only a single revision symbol,
468 This is similar to revsingle(), but accepts only a single revision symbol,
466 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
469 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
467 not "max(public())".
470 not "max(public())".
468 """
471 """
469 if not isinstance(symbol, bytes):
472 if not isinstance(symbol, bytes):
470 msg = ("symbol (%s of type %s) was not a string, did you mean "
473 msg = ("symbol (%s of type %s) was not a string, did you mean "
471 "repo[symbol]?" % (symbol, type(symbol)))
474 "repo[symbol]?" % (symbol, type(symbol)))
472 raise error.ProgrammingError(msg)
475 raise error.ProgrammingError(msg)
473 try:
476 try:
474 if symbol in ('.', 'tip', 'null'):
477 if symbol in ('.', 'tip', 'null'):
475 return repo[symbol]
478 return repo[symbol]
476
479
477 try:
480 try:
478 r = int(symbol)
481 r = int(symbol)
479 if '%d' % r != symbol:
482 if '%d' % r != symbol:
480 raise ValueError
483 raise ValueError
481 l = len(repo.changelog)
484 l = len(repo.changelog)
482 if r < 0:
485 if r < 0:
483 r += l
486 r += l
484 if r < 0 or r >= l and r != wdirrev:
487 if r < 0 or r >= l and r != wdirrev:
485 raise ValueError
488 raise ValueError
486 return repo[r]
489 return repo[r]
487 except error.FilteredIndexError:
490 except error.FilteredIndexError:
488 raise
491 raise
489 except (ValueError, OverflowError, IndexError):
492 except (ValueError, OverflowError, IndexError):
490 pass
493 pass
491
494
492 if len(symbol) == 40:
495 if len(symbol) == 40:
493 try:
496 try:
494 node = bin(symbol)
497 node = bin(symbol)
495 rev = repo.changelog.rev(node)
498 rev = repo.changelog.rev(node)
496 return repo[rev]
499 return repo[rev]
497 except error.FilteredLookupError:
500 except error.FilteredLookupError:
498 raise
501 raise
499 except (TypeError, LookupError):
502 except (TypeError, LookupError):
500 pass
503 pass
501
504
502 # look up bookmarks through the name interface
505 # look up bookmarks through the name interface
503 try:
506 try:
504 node = repo.names.singlenode(repo, symbol)
507 node = repo.names.singlenode(repo, symbol)
505 rev = repo.changelog.rev(node)
508 rev = repo.changelog.rev(node)
506 return repo[rev]
509 return repo[rev]
507 except KeyError:
510 except KeyError:
508 pass
511 pass
509
512
510 node = resolvehexnodeidprefix(repo, symbol)
513 node = resolvehexnodeidprefix(repo, symbol)
511 if node is not None:
514 if node is not None:
512 rev = repo.changelog.rev(node)
515 rev = repo.changelog.rev(node)
513 return repo[rev]
516 return repo[rev]
514
517
515 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
518 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
516
519
517 except error.WdirUnsupported:
520 except error.WdirUnsupported:
518 return repo[None]
521 return repo[None]
519 except (error.FilteredIndexError, error.FilteredLookupError,
522 except (error.FilteredIndexError, error.FilteredLookupError,
520 error.FilteredRepoLookupError):
523 error.FilteredRepoLookupError):
521 raise _filterederror(repo, symbol)
524 raise _filterederror(repo, symbol)
522
525
523 def _filterederror(repo, changeid):
526 def _filterederror(repo, changeid):
524 """build an exception to be raised about a filtered changeid
527 """build an exception to be raised about a filtered changeid
525
528
526 This is extracted in a function to help extensions (eg: evolve) to
529 This is extracted in a function to help extensions (eg: evolve) to
527 experiment with various message variants."""
530 experiment with various message variants."""
528 if repo.filtername.startswith('visible'):
531 if repo.filtername.startswith('visible'):
529
532
530 # Check if the changeset is obsolete
533 # Check if the changeset is obsolete
531 unfilteredrepo = repo.unfiltered()
534 unfilteredrepo = repo.unfiltered()
532 ctx = revsymbol(unfilteredrepo, changeid)
535 ctx = revsymbol(unfilteredrepo, changeid)
533
536
534 # If the changeset is obsolete, enrich the message with the reason
537 # If the changeset is obsolete, enrich the message with the reason
535 # that made this changeset not visible
538 # that made this changeset not visible
536 if ctx.obsolete():
539 if ctx.obsolete():
537 msg = obsutil._getfilteredreason(repo, changeid, ctx)
540 msg = obsutil._getfilteredreason(repo, changeid, ctx)
538 else:
541 else:
539 msg = _("hidden revision '%s'") % changeid
542 msg = _("hidden revision '%s'") % changeid
540
543
541 hint = _('use --hidden to access hidden revisions')
544 hint = _('use --hidden to access hidden revisions')
542
545
543 return error.FilteredRepoLookupError(msg, hint=hint)
546 return error.FilteredRepoLookupError(msg, hint=hint)
544 msg = _("filtered revision '%s' (not in '%s' subset)")
547 msg = _("filtered revision '%s' (not in '%s' subset)")
545 msg %= (changeid, repo.filtername)
548 msg %= (changeid, repo.filtername)
546 return error.FilteredRepoLookupError(msg)
549 return error.FilteredRepoLookupError(msg)
547
550
548 def revsingle(repo, revspec, default='.', localalias=None):
551 def revsingle(repo, revspec, default='.', localalias=None):
549 if not revspec and revspec != 0:
552 if not revspec and revspec != 0:
550 return repo[default]
553 return repo[default]
551
554
552 l = revrange(repo, [revspec], localalias=localalias)
555 l = revrange(repo, [revspec], localalias=localalias)
553 if not l:
556 if not l:
554 raise error.Abort(_('empty revision set'))
557 raise error.Abort(_('empty revision set'))
555 return repo[l.last()]
558 return repo[l.last()]
556
559
557 def _pairspec(revspec):
560 def _pairspec(revspec):
558 tree = revsetlang.parse(revspec)
561 tree = revsetlang.parse(revspec)
559 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
562 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
560
563
561 def revpairnodes(repo, revs):
564 def revpairnodes(repo, revs):
562 repo.ui.deprecwarn("revpairnodes is deprecated, please use revpair", "4.6")
565 repo.ui.deprecwarn("revpairnodes is deprecated, please use revpair", "4.6")
563 ctx1, ctx2 = revpair(repo, revs)
566 ctx1, ctx2 = revpair(repo, revs)
564 return ctx1.node(), ctx2.node()
567 return ctx1.node(), ctx2.node()
565
568
566 def revpair(repo, revs):
569 def revpair(repo, revs):
567 if not revs:
570 if not revs:
568 return repo['.'], repo[None]
571 return repo['.'], repo[None]
569
572
570 l = revrange(repo, revs)
573 l = revrange(repo, revs)
571
574
572 if not l:
575 if not l:
573 first = second = None
576 first = second = None
574 elif l.isascending():
577 elif l.isascending():
575 first = l.min()
578 first = l.min()
576 second = l.max()
579 second = l.max()
577 elif l.isdescending():
580 elif l.isdescending():
578 first = l.max()
581 first = l.max()
579 second = l.min()
582 second = l.min()
580 else:
583 else:
581 first = l.first()
584 first = l.first()
582 second = l.last()
585 second = l.last()
583
586
584 if first is None:
587 if first is None:
585 raise error.Abort(_('empty revision range'))
588 raise error.Abort(_('empty revision range'))
586 if (first == second and len(revs) >= 2
589 if (first == second and len(revs) >= 2
587 and not all(revrange(repo, [r]) for r in revs)):
590 and not all(revrange(repo, [r]) for r in revs)):
588 raise error.Abort(_('empty revision on one side of range'))
591 raise error.Abort(_('empty revision on one side of range'))
589
592
590 # if top-level is range expression, the result must always be a pair
593 # if top-level is range expression, the result must always be a pair
591 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
594 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
592 return repo[first], repo[None]
595 return repo[first], repo[None]
593
596
594 return repo[first], repo[second]
597 return repo[first], repo[second]
595
598
596 def revrange(repo, specs, localalias=None):
599 def revrange(repo, specs, localalias=None):
597 """Execute 1 to many revsets and return the union.
600 """Execute 1 to many revsets and return the union.
598
601
599 This is the preferred mechanism for executing revsets using user-specified
602 This is the preferred mechanism for executing revsets using user-specified
600 config options, such as revset aliases.
603 config options, such as revset aliases.
601
604
602 The revsets specified by ``specs`` will be executed via a chained ``OR``
605 The revsets specified by ``specs`` will be executed via a chained ``OR``
603 expression. If ``specs`` is empty, an empty result is returned.
606 expression. If ``specs`` is empty, an empty result is returned.
604
607
605 ``specs`` can contain integers, in which case they are assumed to be
608 ``specs`` can contain integers, in which case they are assumed to be
606 revision numbers.
609 revision numbers.
607
610
608 It is assumed the revsets are already formatted. If you have arguments
611 It is assumed the revsets are already formatted. If you have arguments
609 that need to be expanded in the revset, call ``revsetlang.formatspec()``
612 that need to be expanded in the revset, call ``revsetlang.formatspec()``
610 and pass the result as an element of ``specs``.
613 and pass the result as an element of ``specs``.
611
614
612 Specifying a single revset is allowed.
615 Specifying a single revset is allowed.
613
616
614 Returns a ``revset.abstractsmartset`` which is a list-like interface over
617 Returns a ``revset.abstractsmartset`` which is a list-like interface over
615 integer revisions.
618 integer revisions.
616 """
619 """
617 allspecs = []
620 allspecs = []
618 for spec in specs:
621 for spec in specs:
619 if isinstance(spec, int):
622 if isinstance(spec, int):
620 spec = revsetlang.formatspec('rev(%d)', spec)
623 spec = revsetlang.formatspec('rev(%d)', spec)
621 allspecs.append(spec)
624 allspecs.append(spec)
622 return repo.anyrevs(allspecs, user=True, localalias=localalias)
625 return repo.anyrevs(allspecs, user=True, localalias=localalias)
623
626
624 def meaningfulparents(repo, ctx):
627 def meaningfulparents(repo, ctx):
625 """Return list of meaningful (or all if debug) parentrevs for rev.
628 """Return list of meaningful (or all if debug) parentrevs for rev.
626
629
627 For merges (two non-nullrev revisions) both parents are meaningful.
630 For merges (two non-nullrev revisions) both parents are meaningful.
628 Otherwise the first parent revision is considered meaningful if it
631 Otherwise the first parent revision is considered meaningful if it
629 is not the preceding revision.
632 is not the preceding revision.
630 """
633 """
631 parents = ctx.parents()
634 parents = ctx.parents()
632 if len(parents) > 1:
635 if len(parents) > 1:
633 return parents
636 return parents
634 if repo.ui.debugflag:
637 if repo.ui.debugflag:
635 return [parents[0], repo['null']]
638 return [parents[0], repo['null']]
636 if parents[0].rev() >= intrev(ctx) - 1:
639 if parents[0].rev() >= intrev(ctx) - 1:
637 return []
640 return []
638 return parents
641 return parents
639
642
640 def expandpats(pats):
643 def expandpats(pats):
641 '''Expand bare globs when running on windows.
644 '''Expand bare globs when running on windows.
642 On posix we assume it already has already been done by sh.'''
645 On posix we assume it already has already been done by sh.'''
643 if not util.expandglobs:
646 if not util.expandglobs:
644 return list(pats)
647 return list(pats)
645 ret = []
648 ret = []
646 for kindpat in pats:
649 for kindpat in pats:
647 kind, pat = matchmod._patsplit(kindpat, None)
650 kind, pat = matchmod._patsplit(kindpat, None)
648 if kind is None:
651 if kind is None:
649 try:
652 try:
650 globbed = glob.glob(pat)
653 globbed = glob.glob(pat)
651 except re.error:
654 except re.error:
652 globbed = [pat]
655 globbed = [pat]
653 if globbed:
656 if globbed:
654 ret.extend(globbed)
657 ret.extend(globbed)
655 continue
658 continue
656 ret.append(kindpat)
659 ret.append(kindpat)
657 return ret
660 return ret
658
661
659 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
662 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
660 badfn=None):
663 badfn=None):
661 '''Return a matcher and the patterns that were used.
664 '''Return a matcher and the patterns that were used.
662 The matcher will warn about bad matches, unless an alternate badfn callback
665 The matcher will warn about bad matches, unless an alternate badfn callback
663 is provided.'''
666 is provided.'''
664 if pats == ("",):
667 if pats == ("",):
665 pats = []
668 pats = []
666 if opts is None:
669 if opts is None:
667 opts = {}
670 opts = {}
668 if not globbed and default == 'relpath':
671 if not globbed and default == 'relpath':
669 pats = expandpats(pats or [])
672 pats = expandpats(pats or [])
670
673
671 def bad(f, msg):
674 def bad(f, msg):
672 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
675 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
673
676
674 if badfn is None:
677 if badfn is None:
675 badfn = bad
678 badfn = bad
676
679
677 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
680 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
678 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
681 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
679
682
680 if m.always():
683 if m.always():
681 pats = []
684 pats = []
682 return m, pats
685 return m, pats
683
686
684 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
687 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
685 badfn=None):
688 badfn=None):
686 '''Return a matcher that will warn about bad matches.'''
689 '''Return a matcher that will warn about bad matches.'''
687 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
690 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
688
691
689 def matchall(repo):
692 def matchall(repo):
690 '''Return a matcher that will efficiently match everything.'''
693 '''Return a matcher that will efficiently match everything.'''
691 return matchmod.always(repo.root, repo.getcwd())
694 return matchmod.always(repo.root, repo.getcwd())
692
695
693 def matchfiles(repo, files, badfn=None):
696 def matchfiles(repo, files, badfn=None):
694 '''Return a matcher that will efficiently match exactly these files.'''
697 '''Return a matcher that will efficiently match exactly these files.'''
695 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
698 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
696
699
697 def parsefollowlinespattern(repo, rev, pat, msg):
700 def parsefollowlinespattern(repo, rev, pat, msg):
698 """Return a file name from `pat` pattern suitable for usage in followlines
701 """Return a file name from `pat` pattern suitable for usage in followlines
699 logic.
702 logic.
700 """
703 """
701 if not matchmod.patkind(pat):
704 if not matchmod.patkind(pat):
702 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
705 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
703 else:
706 else:
704 ctx = repo[rev]
707 ctx = repo[rev]
705 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
708 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
706 files = [f for f in ctx if m(f)]
709 files = [f for f in ctx if m(f)]
707 if len(files) != 1:
710 if len(files) != 1:
708 raise error.ParseError(msg)
711 raise error.ParseError(msg)
709 return files[0]
712 return files[0]
710
713
711 def origpath(ui, repo, filepath):
714 def origpath(ui, repo, filepath):
712 '''customize where .orig files are created
715 '''customize where .orig files are created
713
716
714 Fetch user defined path from config file: [ui] origbackuppath = <path>
717 Fetch user defined path from config file: [ui] origbackuppath = <path>
715 Fall back to default (filepath with .orig suffix) if not specified
718 Fall back to default (filepath with .orig suffix) if not specified
716 '''
719 '''
717 origbackuppath = ui.config('ui', 'origbackuppath')
720 origbackuppath = ui.config('ui', 'origbackuppath')
718 if not origbackuppath:
721 if not origbackuppath:
719 return filepath + ".orig"
722 return filepath + ".orig"
720
723
721 # Convert filepath from an absolute path into a path inside the repo.
724 # Convert filepath from an absolute path into a path inside the repo.
722 filepathfromroot = util.normpath(os.path.relpath(filepath,
725 filepathfromroot = util.normpath(os.path.relpath(filepath,
723 start=repo.root))
726 start=repo.root))
724
727
725 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
728 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
726 origbackupdir = origvfs.dirname(filepathfromroot)
729 origbackupdir = origvfs.dirname(filepathfromroot)
727 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
730 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
728 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
731 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
729
732
730 # Remove any files that conflict with the backup file's path
733 # Remove any files that conflict with the backup file's path
731 for f in reversed(list(util.finddirs(filepathfromroot))):
734 for f in reversed(list(util.finddirs(filepathfromroot))):
732 if origvfs.isfileorlink(f):
735 if origvfs.isfileorlink(f):
733 ui.note(_('removing conflicting file: %s\n')
736 ui.note(_('removing conflicting file: %s\n')
734 % origvfs.join(f))
737 % origvfs.join(f))
735 origvfs.unlink(f)
738 origvfs.unlink(f)
736 break
739 break
737
740
738 origvfs.makedirs(origbackupdir)
741 origvfs.makedirs(origbackupdir)
739
742
740 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
743 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
741 ui.note(_('removing conflicting directory: %s\n')
744 ui.note(_('removing conflicting directory: %s\n')
742 % origvfs.join(filepathfromroot))
745 % origvfs.join(filepathfromroot))
743 origvfs.rmtree(filepathfromroot, forcibly=True)
746 origvfs.rmtree(filepathfromroot, forcibly=True)
744
747
745 return origvfs.join(filepathfromroot)
748 return origvfs.join(filepathfromroot)
746
749
747 class _containsnode(object):
750 class _containsnode(object):
748 """proxy __contains__(node) to container.__contains__ which accepts revs"""
751 """proxy __contains__(node) to container.__contains__ which accepts revs"""
749
752
750 def __init__(self, repo, revcontainer):
753 def __init__(self, repo, revcontainer):
751 self._torev = repo.changelog.rev
754 self._torev = repo.changelog.rev
752 self._revcontains = revcontainer.__contains__
755 self._revcontains = revcontainer.__contains__
753
756
754 def __contains__(self, node):
757 def __contains__(self, node):
755 return self._revcontains(self._torev(node))
758 return self._revcontains(self._torev(node))
756
759
757 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
760 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
758 """do common cleanups when old nodes are replaced by new nodes
761 """do common cleanups when old nodes are replaced by new nodes
759
762
760 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
763 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
761 (we might also want to move working directory parent in the future)
764 (we might also want to move working directory parent in the future)
762
765
763 By default, bookmark moves are calculated automatically from 'replacements',
766 By default, bookmark moves are calculated automatically from 'replacements',
764 but 'moves' can be used to override that. Also, 'moves' may include
767 but 'moves' can be used to override that. Also, 'moves' may include
765 additional bookmark moves that should not have associated obsmarkers.
768 additional bookmark moves that should not have associated obsmarkers.
766
769
767 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
770 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
768 have replacements. operation is a string, like "rebase".
771 have replacements. operation is a string, like "rebase".
769
772
770 metadata is dictionary containing metadata to be stored in obsmarker if
773 metadata is dictionary containing metadata to be stored in obsmarker if
771 obsolescence is enabled.
774 obsolescence is enabled.
772 """
775 """
773 if not replacements and not moves:
776 if not replacements and not moves:
774 return
777 return
775
778
776 # translate mapping's other forms
779 # translate mapping's other forms
777 if not util.safehasattr(replacements, 'items'):
780 if not util.safehasattr(replacements, 'items'):
778 replacements = {n: () for n in replacements}
781 replacements = {n: () for n in replacements}
779
782
780 # Calculate bookmark movements
783 # Calculate bookmark movements
781 if moves is None:
784 if moves is None:
782 moves = {}
785 moves = {}
783 # Unfiltered repo is needed since nodes in replacements might be hidden.
786 # Unfiltered repo is needed since nodes in replacements might be hidden.
784 unfi = repo.unfiltered()
787 unfi = repo.unfiltered()
785 for oldnode, newnodes in replacements.items():
788 for oldnode, newnodes in replacements.items():
786 if oldnode in moves:
789 if oldnode in moves:
787 continue
790 continue
788 if len(newnodes) > 1:
791 if len(newnodes) > 1:
789 # usually a split, take the one with biggest rev number
792 # usually a split, take the one with biggest rev number
790 newnode = next(unfi.set('max(%ln)', newnodes)).node()
793 newnode = next(unfi.set('max(%ln)', newnodes)).node()
791 elif len(newnodes) == 0:
794 elif len(newnodes) == 0:
792 # move bookmark backwards
795 # move bookmark backwards
793 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
796 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
794 list(replacements)))
797 list(replacements)))
795 if roots:
798 if roots:
796 newnode = roots[0].node()
799 newnode = roots[0].node()
797 else:
800 else:
798 newnode = nullid
801 newnode = nullid
799 else:
802 else:
800 newnode = newnodes[0]
803 newnode = newnodes[0]
801 moves[oldnode] = newnode
804 moves[oldnode] = newnode
802
805
803 with repo.transaction('cleanup') as tr:
806 with repo.transaction('cleanup') as tr:
804 # Move bookmarks
807 # Move bookmarks
805 bmarks = repo._bookmarks
808 bmarks = repo._bookmarks
806 bmarkchanges = []
809 bmarkchanges = []
807 allnewnodes = [n for ns in replacements.values() for n in ns]
810 allnewnodes = [n for ns in replacements.values() for n in ns]
808 for oldnode, newnode in moves.items():
811 for oldnode, newnode in moves.items():
809 oldbmarks = repo.nodebookmarks(oldnode)
812 oldbmarks = repo.nodebookmarks(oldnode)
810 if not oldbmarks:
813 if not oldbmarks:
811 continue
814 continue
812 from . import bookmarks # avoid import cycle
815 from . import bookmarks # avoid import cycle
813 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
816 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
814 (util.rapply(pycompat.maybebytestr, oldbmarks),
817 (util.rapply(pycompat.maybebytestr, oldbmarks),
815 hex(oldnode), hex(newnode)))
818 hex(oldnode), hex(newnode)))
816 # Delete divergent bookmarks being parents of related newnodes
819 # Delete divergent bookmarks being parents of related newnodes
817 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
820 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
818 allnewnodes, newnode, oldnode)
821 allnewnodes, newnode, oldnode)
819 deletenodes = _containsnode(repo, deleterevs)
822 deletenodes = _containsnode(repo, deleterevs)
820 for name in oldbmarks:
823 for name in oldbmarks:
821 bmarkchanges.append((name, newnode))
824 bmarkchanges.append((name, newnode))
822 for b in bookmarks.divergent2delete(repo, deletenodes, name):
825 for b in bookmarks.divergent2delete(repo, deletenodes, name):
823 bmarkchanges.append((b, None))
826 bmarkchanges.append((b, None))
824
827
825 if bmarkchanges:
828 if bmarkchanges:
826 bmarks.applychanges(repo, tr, bmarkchanges)
829 bmarks.applychanges(repo, tr, bmarkchanges)
827
830
828 # Obsolete or strip nodes
831 # Obsolete or strip nodes
829 if obsolete.isenabled(repo, obsolete.createmarkersopt):
832 if obsolete.isenabled(repo, obsolete.createmarkersopt):
830 # If a node is already obsoleted, and we want to obsolete it
833 # If a node is already obsoleted, and we want to obsolete it
831 # without a successor, skip that obssolete request since it's
834 # without a successor, skip that obssolete request since it's
832 # unnecessary. That's the "if s or not isobs(n)" check below.
835 # unnecessary. That's the "if s or not isobs(n)" check below.
833 # Also sort the node in topology order, that might be useful for
836 # Also sort the node in topology order, that might be useful for
834 # some obsstore logic.
837 # some obsstore logic.
835 # NOTE: the filtering and sorting might belong to createmarkers.
838 # NOTE: the filtering and sorting might belong to createmarkers.
836 isobs = unfi.obsstore.successors.__contains__
839 isobs = unfi.obsstore.successors.__contains__
837 torev = unfi.changelog.rev
840 torev = unfi.changelog.rev
838 sortfunc = lambda ns: torev(ns[0])
841 sortfunc = lambda ns: torev(ns[0])
839 rels = [(unfi[n], tuple(unfi[m] for m in s))
842 rels = [(unfi[n], tuple(unfi[m] for m in s))
840 for n, s in sorted(replacements.items(), key=sortfunc)
843 for n, s in sorted(replacements.items(), key=sortfunc)
841 if s or not isobs(n)]
844 if s or not isobs(n)]
842 if rels:
845 if rels:
843 obsolete.createmarkers(repo, rels, operation=operation,
846 obsolete.createmarkers(repo, rels, operation=operation,
844 metadata=metadata)
847 metadata=metadata)
845 else:
848 else:
846 from . import repair # avoid import cycle
849 from . import repair # avoid import cycle
847 tostrip = list(replacements)
850 tostrip = list(replacements)
848 if tostrip:
851 if tostrip:
849 repair.delayedstrip(repo.ui, repo, tostrip, operation)
852 repair.delayedstrip(repo.ui, repo, tostrip, operation)
850
853
851 def addremove(repo, matcher, prefix, opts=None):
854 def addremove(repo, matcher, prefix, opts=None):
852 if opts is None:
855 if opts is None:
853 opts = {}
856 opts = {}
854 m = matcher
857 m = matcher
855 dry_run = opts.get('dry_run')
858 dry_run = opts.get('dry_run')
856 try:
859 try:
857 similarity = float(opts.get('similarity') or 0)
860 similarity = float(opts.get('similarity') or 0)
858 except ValueError:
861 except ValueError:
859 raise error.Abort(_('similarity must be a number'))
862 raise error.Abort(_('similarity must be a number'))
860 if similarity < 0 or similarity > 100:
863 if similarity < 0 or similarity > 100:
861 raise error.Abort(_('similarity must be between 0 and 100'))
864 raise error.Abort(_('similarity must be between 0 and 100'))
862 similarity /= 100.0
865 similarity /= 100.0
863
866
864 ret = 0
867 ret = 0
865 join = lambda f: os.path.join(prefix, f)
868 join = lambda f: os.path.join(prefix, f)
866
869
867 wctx = repo[None]
870 wctx = repo[None]
868 for subpath in sorted(wctx.substate):
871 for subpath in sorted(wctx.substate):
869 submatch = matchmod.subdirmatcher(subpath, m)
872 submatch = matchmod.subdirmatcher(subpath, m)
870 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
873 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
871 sub = wctx.sub(subpath)
874 sub = wctx.sub(subpath)
872 try:
875 try:
873 if sub.addremove(submatch, prefix, opts):
876 if sub.addremove(submatch, prefix, opts):
874 ret = 1
877 ret = 1
875 except error.LookupError:
878 except error.LookupError:
876 repo.ui.status(_("skipping missing subrepository: %s\n")
879 repo.ui.status(_("skipping missing subrepository: %s\n")
877 % join(subpath))
880 % join(subpath))
878
881
879 rejected = []
882 rejected = []
880 def badfn(f, msg):
883 def badfn(f, msg):
881 if f in m.files():
884 if f in m.files():
882 m.bad(f, msg)
885 m.bad(f, msg)
883 rejected.append(f)
886 rejected.append(f)
884
887
885 badmatch = matchmod.badmatch(m, badfn)
888 badmatch = matchmod.badmatch(m, badfn)
886 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
889 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
887 badmatch)
890 badmatch)
888
891
889 unknownset = set(unknown + forgotten)
892 unknownset = set(unknown + forgotten)
890 toprint = unknownset.copy()
893 toprint = unknownset.copy()
891 toprint.update(deleted)
894 toprint.update(deleted)
892 for abs in sorted(toprint):
895 for abs in sorted(toprint):
893 if repo.ui.verbose or not m.exact(abs):
896 if repo.ui.verbose or not m.exact(abs):
894 if abs in unknownset:
897 if abs in unknownset:
895 status = _('adding %s\n') % m.uipath(abs)
898 status = _('adding %s\n') % m.uipath(abs)
896 else:
899 else:
897 status = _('removing %s\n') % m.uipath(abs)
900 status = _('removing %s\n') % m.uipath(abs)
898 repo.ui.status(status)
901 repo.ui.status(status)
899
902
900 renames = _findrenames(repo, m, added + unknown, removed + deleted,
903 renames = _findrenames(repo, m, added + unknown, removed + deleted,
901 similarity)
904 similarity)
902
905
903 if not dry_run:
906 if not dry_run:
904 _markchanges(repo, unknown + forgotten, deleted, renames)
907 _markchanges(repo, unknown + forgotten, deleted, renames)
905
908
906 for f in rejected:
909 for f in rejected:
907 if f in m.files():
910 if f in m.files():
908 return 1
911 return 1
909 return ret
912 return ret
910
913
911 def marktouched(repo, files, similarity=0.0):
914 def marktouched(repo, files, similarity=0.0):
912 '''Assert that files have somehow been operated upon. files are relative to
915 '''Assert that files have somehow been operated upon. files are relative to
913 the repo root.'''
916 the repo root.'''
914 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
917 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
915 rejected = []
918 rejected = []
916
919
917 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
920 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
918
921
919 if repo.ui.verbose:
922 if repo.ui.verbose:
920 unknownset = set(unknown + forgotten)
923 unknownset = set(unknown + forgotten)
921 toprint = unknownset.copy()
924 toprint = unknownset.copy()
922 toprint.update(deleted)
925 toprint.update(deleted)
923 for abs in sorted(toprint):
926 for abs in sorted(toprint):
924 if abs in unknownset:
927 if abs in unknownset:
925 status = _('adding %s\n') % abs
928 status = _('adding %s\n') % abs
926 else:
929 else:
927 status = _('removing %s\n') % abs
930 status = _('removing %s\n') % abs
928 repo.ui.status(status)
931 repo.ui.status(status)
929
932
930 renames = _findrenames(repo, m, added + unknown, removed + deleted,
933 renames = _findrenames(repo, m, added + unknown, removed + deleted,
931 similarity)
934 similarity)
932
935
933 _markchanges(repo, unknown + forgotten, deleted, renames)
936 _markchanges(repo, unknown + forgotten, deleted, renames)
934
937
935 for f in rejected:
938 for f in rejected:
936 if f in m.files():
939 if f in m.files():
937 return 1
940 return 1
938 return 0
941 return 0
939
942
940 def _interestingfiles(repo, matcher):
943 def _interestingfiles(repo, matcher):
941 '''Walk dirstate with matcher, looking for files that addremove would care
944 '''Walk dirstate with matcher, looking for files that addremove would care
942 about.
945 about.
943
946
944 This is different from dirstate.status because it doesn't care about
947 This is different from dirstate.status because it doesn't care about
945 whether files are modified or clean.'''
948 whether files are modified or clean.'''
946 added, unknown, deleted, removed, forgotten = [], [], [], [], []
949 added, unknown, deleted, removed, forgotten = [], [], [], [], []
947 audit_path = pathutil.pathauditor(repo.root, cached=True)
950 audit_path = pathutil.pathauditor(repo.root, cached=True)
948
951
949 ctx = repo[None]
952 ctx = repo[None]
950 dirstate = repo.dirstate
953 dirstate = repo.dirstate
951 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
954 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
952 unknown=True, ignored=False, full=False)
955 unknown=True, ignored=False, full=False)
953 for abs, st in walkresults.iteritems():
956 for abs, st in walkresults.iteritems():
954 dstate = dirstate[abs]
957 dstate = dirstate[abs]
955 if dstate == '?' and audit_path.check(abs):
958 if dstate == '?' and audit_path.check(abs):
956 unknown.append(abs)
959 unknown.append(abs)
957 elif dstate != 'r' and not st:
960 elif dstate != 'r' and not st:
958 deleted.append(abs)
961 deleted.append(abs)
959 elif dstate == 'r' and st:
962 elif dstate == 'r' and st:
960 forgotten.append(abs)
963 forgotten.append(abs)
961 # for finding renames
964 # for finding renames
962 elif dstate == 'r' and not st:
965 elif dstate == 'r' and not st:
963 removed.append(abs)
966 removed.append(abs)
964 elif dstate == 'a':
967 elif dstate == 'a':
965 added.append(abs)
968 added.append(abs)
966
969
967 return added, unknown, deleted, removed, forgotten
970 return added, unknown, deleted, removed, forgotten
968
971
969 def _findrenames(repo, matcher, added, removed, similarity):
972 def _findrenames(repo, matcher, added, removed, similarity):
970 '''Find renames from removed files to added ones.'''
973 '''Find renames from removed files to added ones.'''
971 renames = {}
974 renames = {}
972 if similarity > 0:
975 if similarity > 0:
973 for old, new, score in similar.findrenames(repo, added, removed,
976 for old, new, score in similar.findrenames(repo, added, removed,
974 similarity):
977 similarity):
975 if (repo.ui.verbose or not matcher.exact(old)
978 if (repo.ui.verbose or not matcher.exact(old)
976 or not matcher.exact(new)):
979 or not matcher.exact(new)):
977 repo.ui.status(_('recording removal of %s as rename to %s '
980 repo.ui.status(_('recording removal of %s as rename to %s '
978 '(%d%% similar)\n') %
981 '(%d%% similar)\n') %
979 (matcher.rel(old), matcher.rel(new),
982 (matcher.rel(old), matcher.rel(new),
980 score * 100))
983 score * 100))
981 renames[new] = old
984 renames[new] = old
982 return renames
985 return renames
983
986
984 def _markchanges(repo, unknown, deleted, renames):
987 def _markchanges(repo, unknown, deleted, renames):
985 '''Marks the files in unknown as added, the files in deleted as removed,
988 '''Marks the files in unknown as added, the files in deleted as removed,
986 and the files in renames as copied.'''
989 and the files in renames as copied.'''
987 wctx = repo[None]
990 wctx = repo[None]
988 with repo.wlock():
991 with repo.wlock():
989 wctx.forget(deleted)
992 wctx.forget(deleted)
990 wctx.add(unknown)
993 wctx.add(unknown)
991 for new, old in renames.iteritems():
994 for new, old in renames.iteritems():
992 wctx.copy(old, new)
995 wctx.copy(old, new)
993
996
994 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
997 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
995 """Update the dirstate to reflect the intent of copying src to dst. For
998 """Update the dirstate to reflect the intent of copying src to dst. For
996 different reasons it might not end with dst being marked as copied from src.
999 different reasons it might not end with dst being marked as copied from src.
997 """
1000 """
998 origsrc = repo.dirstate.copied(src) or src
1001 origsrc = repo.dirstate.copied(src) or src
999 if dst == origsrc: # copying back a copy?
1002 if dst == origsrc: # copying back a copy?
1000 if repo.dirstate[dst] not in 'mn' and not dryrun:
1003 if repo.dirstate[dst] not in 'mn' and not dryrun:
1001 repo.dirstate.normallookup(dst)
1004 repo.dirstate.normallookup(dst)
1002 else:
1005 else:
1003 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1006 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1004 if not ui.quiet:
1007 if not ui.quiet:
1005 ui.warn(_("%s has not been committed yet, so no copy "
1008 ui.warn(_("%s has not been committed yet, so no copy "
1006 "data will be stored for %s.\n")
1009 "data will be stored for %s.\n")
1007 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1010 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1008 if repo.dirstate[dst] in '?r' and not dryrun:
1011 if repo.dirstate[dst] in '?r' and not dryrun:
1009 wctx.add([dst])
1012 wctx.add([dst])
1010 elif not dryrun:
1013 elif not dryrun:
1011 wctx.copy(origsrc, dst)
1014 wctx.copy(origsrc, dst)
1012
1015
1013 def readrequires(opener, supported):
1016 def readrequires(opener, supported):
1014 '''Reads and parses .hg/requires and checks if all entries found
1017 '''Reads and parses .hg/requires and checks if all entries found
1015 are in the list of supported features.'''
1018 are in the list of supported features.'''
1016 requirements = set(opener.read("requires").splitlines())
1019 requirements = set(opener.read("requires").splitlines())
1017 missings = []
1020 missings = []
1018 for r in requirements:
1021 for r in requirements:
1019 if r not in supported:
1022 if r not in supported:
1020 if not r or not r[0:1].isalnum():
1023 if not r or not r[0:1].isalnum():
1021 raise error.RequirementError(_(".hg/requires file is corrupt"))
1024 raise error.RequirementError(_(".hg/requires file is corrupt"))
1022 missings.append(r)
1025 missings.append(r)
1023 missings.sort()
1026 missings.sort()
1024 if missings:
1027 if missings:
1025 raise error.RequirementError(
1028 raise error.RequirementError(
1026 _("repository requires features unknown to this Mercurial: %s")
1029 _("repository requires features unknown to this Mercurial: %s")
1027 % " ".join(missings),
1030 % " ".join(missings),
1028 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1031 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1029 " for more information"))
1032 " for more information"))
1030 return requirements
1033 return requirements
1031
1034
1032 def writerequires(opener, requirements):
1035 def writerequires(opener, requirements):
1033 with opener('requires', 'w') as fp:
1036 with opener('requires', 'w') as fp:
1034 for r in sorted(requirements):
1037 for r in sorted(requirements):
1035 fp.write("%s\n" % r)
1038 fp.write("%s\n" % r)
1036
1039
1037 class filecachesubentry(object):
1040 class filecachesubentry(object):
1038 def __init__(self, path, stat):
1041 def __init__(self, path, stat):
1039 self.path = path
1042 self.path = path
1040 self.cachestat = None
1043 self.cachestat = None
1041 self._cacheable = None
1044 self._cacheable = None
1042
1045
1043 if stat:
1046 if stat:
1044 self.cachestat = filecachesubentry.stat(self.path)
1047 self.cachestat = filecachesubentry.stat(self.path)
1045
1048
1046 if self.cachestat:
1049 if self.cachestat:
1047 self._cacheable = self.cachestat.cacheable()
1050 self._cacheable = self.cachestat.cacheable()
1048 else:
1051 else:
1049 # None means we don't know yet
1052 # None means we don't know yet
1050 self._cacheable = None
1053 self._cacheable = None
1051
1054
1052 def refresh(self):
1055 def refresh(self):
1053 if self.cacheable():
1056 if self.cacheable():
1054 self.cachestat = filecachesubentry.stat(self.path)
1057 self.cachestat = filecachesubentry.stat(self.path)
1055
1058
1056 def cacheable(self):
1059 def cacheable(self):
1057 if self._cacheable is not None:
1060 if self._cacheable is not None:
1058 return self._cacheable
1061 return self._cacheable
1059
1062
1060 # we don't know yet, assume it is for now
1063 # we don't know yet, assume it is for now
1061 return True
1064 return True
1062
1065
1063 def changed(self):
1066 def changed(self):
1064 # no point in going further if we can't cache it
1067 # no point in going further if we can't cache it
1065 if not self.cacheable():
1068 if not self.cacheable():
1066 return True
1069 return True
1067
1070
1068 newstat = filecachesubentry.stat(self.path)
1071 newstat = filecachesubentry.stat(self.path)
1069
1072
1070 # we may not know if it's cacheable yet, check again now
1073 # we may not know if it's cacheable yet, check again now
1071 if newstat and self._cacheable is None:
1074 if newstat and self._cacheable is None:
1072 self._cacheable = newstat.cacheable()
1075 self._cacheable = newstat.cacheable()
1073
1076
1074 # check again
1077 # check again
1075 if not self._cacheable:
1078 if not self._cacheable:
1076 return True
1079 return True
1077
1080
1078 if self.cachestat != newstat:
1081 if self.cachestat != newstat:
1079 self.cachestat = newstat
1082 self.cachestat = newstat
1080 return True
1083 return True
1081 else:
1084 else:
1082 return False
1085 return False
1083
1086
1084 @staticmethod
1087 @staticmethod
1085 def stat(path):
1088 def stat(path):
1086 try:
1089 try:
1087 return util.cachestat(path)
1090 return util.cachestat(path)
1088 except OSError as e:
1091 except OSError as e:
1089 if e.errno != errno.ENOENT:
1092 if e.errno != errno.ENOENT:
1090 raise
1093 raise
1091
1094
1092 class filecacheentry(object):
1095 class filecacheentry(object):
1093 def __init__(self, paths, stat=True):
1096 def __init__(self, paths, stat=True):
1094 self._entries = []
1097 self._entries = []
1095 for path in paths:
1098 for path in paths:
1096 self._entries.append(filecachesubentry(path, stat))
1099 self._entries.append(filecachesubentry(path, stat))
1097
1100
1098 def changed(self):
1101 def changed(self):
1099 '''true if any entry has changed'''
1102 '''true if any entry has changed'''
1100 for entry in self._entries:
1103 for entry in self._entries:
1101 if entry.changed():
1104 if entry.changed():
1102 return True
1105 return True
1103 return False
1106 return False
1104
1107
1105 def refresh(self):
1108 def refresh(self):
1106 for entry in self._entries:
1109 for entry in self._entries:
1107 entry.refresh()
1110 entry.refresh()
1108
1111
1109 class filecache(object):
1112 class filecache(object):
1110 '''A property like decorator that tracks files under .hg/ for updates.
1113 '''A property like decorator that tracks files under .hg/ for updates.
1111
1114
1112 Records stat info when called in _filecache.
1115 Records stat info when called in _filecache.
1113
1116
1114 On subsequent calls, compares old stat info with new info, and recreates the
1117 On subsequent calls, compares old stat info with new info, and recreates the
1115 object when any of the files changes, updating the new stat info in
1118 object when any of the files changes, updating the new stat info in
1116 _filecache.
1119 _filecache.
1117
1120
1118 Mercurial either atomic renames or appends for files under .hg,
1121 Mercurial either atomic renames or appends for files under .hg,
1119 so to ensure the cache is reliable we need the filesystem to be able
1122 so to ensure the cache is reliable we need the filesystem to be able
1120 to tell us if a file has been replaced. If it can't, we fallback to
1123 to tell us if a file has been replaced. If it can't, we fallback to
1121 recreating the object on every call (essentially the same behavior as
1124 recreating the object on every call (essentially the same behavior as
1122 propertycache).
1125 propertycache).
1123
1126
1124 '''
1127 '''
1125 def __init__(self, *paths):
1128 def __init__(self, *paths):
1126 self.paths = paths
1129 self.paths = paths
1127
1130
1128 def join(self, obj, fname):
1131 def join(self, obj, fname):
1129 """Used to compute the runtime path of a cached file.
1132 """Used to compute the runtime path of a cached file.
1130
1133
1131 Users should subclass filecache and provide their own version of this
1134 Users should subclass filecache and provide their own version of this
1132 function to call the appropriate join function on 'obj' (an instance
1135 function to call the appropriate join function on 'obj' (an instance
1133 of the class that its member function was decorated).
1136 of the class that its member function was decorated).
1134 """
1137 """
1135 raise NotImplementedError
1138 raise NotImplementedError
1136
1139
1137 def __call__(self, func):
1140 def __call__(self, func):
1138 self.func = func
1141 self.func = func
1139 self.name = func.__name__.encode('ascii')
1142 self.name = func.__name__.encode('ascii')
1140 return self
1143 return self
1141
1144
1142 def __get__(self, obj, type=None):
1145 def __get__(self, obj, type=None):
1143 # if accessed on the class, return the descriptor itself.
1146 # if accessed on the class, return the descriptor itself.
1144 if obj is None:
1147 if obj is None:
1145 return self
1148 return self
1146 # do we need to check if the file changed?
1149 # do we need to check if the file changed?
1147 if self.name in obj.__dict__:
1150 if self.name in obj.__dict__:
1148 assert self.name in obj._filecache, self.name
1151 assert self.name in obj._filecache, self.name
1149 return obj.__dict__[self.name]
1152 return obj.__dict__[self.name]
1150
1153
1151 entry = obj._filecache.get(self.name)
1154 entry = obj._filecache.get(self.name)
1152
1155
1153 if entry:
1156 if entry:
1154 if entry.changed():
1157 if entry.changed():
1155 entry.obj = self.func(obj)
1158 entry.obj = self.func(obj)
1156 else:
1159 else:
1157 paths = [self.join(obj, path) for path in self.paths]
1160 paths = [self.join(obj, path) for path in self.paths]
1158
1161
1159 # We stat -before- creating the object so our cache doesn't lie if
1162 # We stat -before- creating the object so our cache doesn't lie if
1160 # a writer modified between the time we read and stat
1163 # a writer modified between the time we read and stat
1161 entry = filecacheentry(paths, True)
1164 entry = filecacheentry(paths, True)
1162 entry.obj = self.func(obj)
1165 entry.obj = self.func(obj)
1163
1166
1164 obj._filecache[self.name] = entry
1167 obj._filecache[self.name] = entry
1165
1168
1166 obj.__dict__[self.name] = entry.obj
1169 obj.__dict__[self.name] = entry.obj
1167 return entry.obj
1170 return entry.obj
1168
1171
1169 def __set__(self, obj, value):
1172 def __set__(self, obj, value):
1170 if self.name not in obj._filecache:
1173 if self.name not in obj._filecache:
1171 # we add an entry for the missing value because X in __dict__
1174 # we add an entry for the missing value because X in __dict__
1172 # implies X in _filecache
1175 # implies X in _filecache
1173 paths = [self.join(obj, path) for path in self.paths]
1176 paths = [self.join(obj, path) for path in self.paths]
1174 ce = filecacheentry(paths, False)
1177 ce = filecacheentry(paths, False)
1175 obj._filecache[self.name] = ce
1178 obj._filecache[self.name] = ce
1176 else:
1179 else:
1177 ce = obj._filecache[self.name]
1180 ce = obj._filecache[self.name]
1178
1181
1179 ce.obj = value # update cached copy
1182 ce.obj = value # update cached copy
1180 obj.__dict__[self.name] = value # update copy returned by obj.x
1183 obj.__dict__[self.name] = value # update copy returned by obj.x
1181
1184
1182 def __delete__(self, obj):
1185 def __delete__(self, obj):
1183 try:
1186 try:
1184 del obj.__dict__[self.name]
1187 del obj.__dict__[self.name]
1185 except KeyError:
1188 except KeyError:
1186 raise AttributeError(self.name)
1189 raise AttributeError(self.name)
1187
1190
1188 def extdatasource(repo, source):
1191 def extdatasource(repo, source):
1189 """Gather a map of rev -> value dict from the specified source
1192 """Gather a map of rev -> value dict from the specified source
1190
1193
1191 A source spec is treated as a URL, with a special case shell: type
1194 A source spec is treated as a URL, with a special case shell: type
1192 for parsing the output from a shell command.
1195 for parsing the output from a shell command.
1193
1196
1194 The data is parsed as a series of newline-separated records where
1197 The data is parsed as a series of newline-separated records where
1195 each record is a revision specifier optionally followed by a space
1198 each record is a revision specifier optionally followed by a space
1196 and a freeform string value. If the revision is known locally, it
1199 and a freeform string value. If the revision is known locally, it
1197 is converted to a rev, otherwise the record is skipped.
1200 is converted to a rev, otherwise the record is skipped.
1198
1201
1199 Note that both key and value are treated as UTF-8 and converted to
1202 Note that both key and value are treated as UTF-8 and converted to
1200 the local encoding. This allows uniformity between local and
1203 the local encoding. This allows uniformity between local and
1201 remote data sources.
1204 remote data sources.
1202 """
1205 """
1203
1206
1204 spec = repo.ui.config("extdata", source)
1207 spec = repo.ui.config("extdata", source)
1205 if not spec:
1208 if not spec:
1206 raise error.Abort(_("unknown extdata source '%s'") % source)
1209 raise error.Abort(_("unknown extdata source '%s'") % source)
1207
1210
1208 data = {}
1211 data = {}
1209 src = proc = None
1212 src = proc = None
1210 try:
1213 try:
1211 if spec.startswith("shell:"):
1214 if spec.startswith("shell:"):
1212 # external commands should be run relative to the repo root
1215 # external commands should be run relative to the repo root
1213 cmd = spec[6:]
1216 cmd = spec[6:]
1214 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1217 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1215 close_fds=procutil.closefds,
1218 close_fds=procutil.closefds,
1216 stdout=subprocess.PIPE, cwd=repo.root)
1219 stdout=subprocess.PIPE, cwd=repo.root)
1217 src = proc.stdout
1220 src = proc.stdout
1218 else:
1221 else:
1219 # treat as a URL or file
1222 # treat as a URL or file
1220 src = url.open(repo.ui, spec)
1223 src = url.open(repo.ui, spec)
1221 for l in src:
1224 for l in src:
1222 if " " in l:
1225 if " " in l:
1223 k, v = l.strip().split(" ", 1)
1226 k, v = l.strip().split(" ", 1)
1224 else:
1227 else:
1225 k, v = l.strip(), ""
1228 k, v = l.strip(), ""
1226
1229
1227 k = encoding.tolocal(k)
1230 k = encoding.tolocal(k)
1228 try:
1231 try:
1229 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1232 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1230 except (error.LookupError, error.RepoLookupError):
1233 except (error.LookupError, error.RepoLookupError):
1231 pass # we ignore data for nodes that don't exist locally
1234 pass # we ignore data for nodes that don't exist locally
1232 finally:
1235 finally:
1233 if proc:
1236 if proc:
1234 proc.communicate()
1237 proc.communicate()
1235 if src:
1238 if src:
1236 src.close()
1239 src.close()
1237 if proc and proc.returncode != 0:
1240 if proc and proc.returncode != 0:
1238 raise error.Abort(_("extdata command '%s' failed: %s")
1241 raise error.Abort(_("extdata command '%s' failed: %s")
1239 % (cmd, procutil.explainexit(proc.returncode)))
1242 % (cmd, procutil.explainexit(proc.returncode)))
1240
1243
1241 return data
1244 return data
1242
1245
1243 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1246 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1244 if lock is None:
1247 if lock is None:
1245 raise error.LockInheritanceContractViolation(
1248 raise error.LockInheritanceContractViolation(
1246 'lock can only be inherited while held')
1249 'lock can only be inherited while held')
1247 if environ is None:
1250 if environ is None:
1248 environ = {}
1251 environ = {}
1249 with lock.inherit() as locker:
1252 with lock.inherit() as locker:
1250 environ[envvar] = locker
1253 environ[envvar] = locker
1251 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1254 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1252
1255
1253 def wlocksub(repo, cmd, *args, **kwargs):
1256 def wlocksub(repo, cmd, *args, **kwargs):
1254 """run cmd as a subprocess that allows inheriting repo's wlock
1257 """run cmd as a subprocess that allows inheriting repo's wlock
1255
1258
1256 This can only be called while the wlock is held. This takes all the
1259 This can only be called while the wlock is held. This takes all the
1257 arguments that ui.system does, and returns the exit code of the
1260 arguments that ui.system does, and returns the exit code of the
1258 subprocess."""
1261 subprocess."""
1259 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1262 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1260 **kwargs)
1263 **kwargs)
1261
1264
1262 def gdinitconfig(ui):
1265 def gdinitconfig(ui):
1263 """helper function to know if a repo should be created as general delta
1266 """helper function to know if a repo should be created as general delta
1264 """
1267 """
1265 # experimental config: format.generaldelta
1268 # experimental config: format.generaldelta
1266 return (ui.configbool('format', 'generaldelta')
1269 return (ui.configbool('format', 'generaldelta')
1267 or ui.configbool('format', 'usegeneraldelta'))
1270 or ui.configbool('format', 'usegeneraldelta'))
1268
1271
1269 def gddeltaconfig(ui):
1272 def gddeltaconfig(ui):
1270 """helper function to know if incoming delta should be optimised
1273 """helper function to know if incoming delta should be optimised
1271 """
1274 """
1272 # experimental config: format.generaldelta
1275 # experimental config: format.generaldelta
1273 return ui.configbool('format', 'generaldelta')
1276 return ui.configbool('format', 'generaldelta')
1274
1277
1275 class simplekeyvaluefile(object):
1278 class simplekeyvaluefile(object):
1276 """A simple file with key=value lines
1279 """A simple file with key=value lines
1277
1280
1278 Keys must be alphanumerics and start with a letter, values must not
1281 Keys must be alphanumerics and start with a letter, values must not
1279 contain '\n' characters"""
1282 contain '\n' characters"""
1280 firstlinekey = '__firstline'
1283 firstlinekey = '__firstline'
1281
1284
1282 def __init__(self, vfs, path, keys=None):
1285 def __init__(self, vfs, path, keys=None):
1283 self.vfs = vfs
1286 self.vfs = vfs
1284 self.path = path
1287 self.path = path
1285
1288
1286 def read(self, firstlinenonkeyval=False):
1289 def read(self, firstlinenonkeyval=False):
1287 """Read the contents of a simple key-value file
1290 """Read the contents of a simple key-value file
1288
1291
1289 'firstlinenonkeyval' indicates whether the first line of file should
1292 'firstlinenonkeyval' indicates whether the first line of file should
1290 be treated as a key-value pair or reuturned fully under the
1293 be treated as a key-value pair or reuturned fully under the
1291 __firstline key."""
1294 __firstline key."""
1292 lines = self.vfs.readlines(self.path)
1295 lines = self.vfs.readlines(self.path)
1293 d = {}
1296 d = {}
1294 if firstlinenonkeyval:
1297 if firstlinenonkeyval:
1295 if not lines:
1298 if not lines:
1296 e = _("empty simplekeyvalue file")
1299 e = _("empty simplekeyvalue file")
1297 raise error.CorruptedState(e)
1300 raise error.CorruptedState(e)
1298 # we don't want to include '\n' in the __firstline
1301 # we don't want to include '\n' in the __firstline
1299 d[self.firstlinekey] = lines[0][:-1]
1302 d[self.firstlinekey] = lines[0][:-1]
1300 del lines[0]
1303 del lines[0]
1301
1304
1302 try:
1305 try:
1303 # the 'if line.strip()' part prevents us from failing on empty
1306 # the 'if line.strip()' part prevents us from failing on empty
1304 # lines which only contain '\n' therefore are not skipped
1307 # lines which only contain '\n' therefore are not skipped
1305 # by 'if line'
1308 # by 'if line'
1306 updatedict = dict(line[:-1].split('=', 1) for line in lines
1309 updatedict = dict(line[:-1].split('=', 1) for line in lines
1307 if line.strip())
1310 if line.strip())
1308 if self.firstlinekey in updatedict:
1311 if self.firstlinekey in updatedict:
1309 e = _("%r can't be used as a key")
1312 e = _("%r can't be used as a key")
1310 raise error.CorruptedState(e % self.firstlinekey)
1313 raise error.CorruptedState(e % self.firstlinekey)
1311 d.update(updatedict)
1314 d.update(updatedict)
1312 except ValueError as e:
1315 except ValueError as e:
1313 raise error.CorruptedState(str(e))
1316 raise error.CorruptedState(str(e))
1314 return d
1317 return d
1315
1318
1316 def write(self, data, firstline=None):
1319 def write(self, data, firstline=None):
1317 """Write key=>value mapping to a file
1320 """Write key=>value mapping to a file
1318 data is a dict. Keys must be alphanumerical and start with a letter.
1321 data is a dict. Keys must be alphanumerical and start with a letter.
1319 Values must not contain newline characters.
1322 Values must not contain newline characters.
1320
1323
1321 If 'firstline' is not None, it is written to file before
1324 If 'firstline' is not None, it is written to file before
1322 everything else, as it is, not in a key=value form"""
1325 everything else, as it is, not in a key=value form"""
1323 lines = []
1326 lines = []
1324 if firstline is not None:
1327 if firstline is not None:
1325 lines.append('%s\n' % firstline)
1328 lines.append('%s\n' % firstline)
1326
1329
1327 for k, v in data.items():
1330 for k, v in data.items():
1328 if k == self.firstlinekey:
1331 if k == self.firstlinekey:
1329 e = "key name '%s' is reserved" % self.firstlinekey
1332 e = "key name '%s' is reserved" % self.firstlinekey
1330 raise error.ProgrammingError(e)
1333 raise error.ProgrammingError(e)
1331 if not k[0:1].isalpha():
1334 if not k[0:1].isalpha():
1332 e = "keys must start with a letter in a key-value file"
1335 e = "keys must start with a letter in a key-value file"
1333 raise error.ProgrammingError(e)
1336 raise error.ProgrammingError(e)
1334 if not k.isalnum():
1337 if not k.isalnum():
1335 e = "invalid key name in a simple key-value file"
1338 e = "invalid key name in a simple key-value file"
1336 raise error.ProgrammingError(e)
1339 raise error.ProgrammingError(e)
1337 if '\n' in v:
1340 if '\n' in v:
1338 e = "invalid value in a simple key-value file"
1341 e = "invalid value in a simple key-value file"
1339 raise error.ProgrammingError(e)
1342 raise error.ProgrammingError(e)
1340 lines.append("%s=%s\n" % (k, v))
1343 lines.append("%s=%s\n" % (k, v))
1341 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1344 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1342 fp.write(''.join(lines))
1345 fp.write(''.join(lines))
1343
1346
1344 _reportobsoletedsource = [
1347 _reportobsoletedsource = [
1345 'debugobsolete',
1348 'debugobsolete',
1346 'pull',
1349 'pull',
1347 'push',
1350 'push',
1348 'serve',
1351 'serve',
1349 'unbundle',
1352 'unbundle',
1350 ]
1353 ]
1351
1354
1352 _reportnewcssource = [
1355 _reportnewcssource = [
1353 'pull',
1356 'pull',
1354 'unbundle',
1357 'unbundle',
1355 ]
1358 ]
1356
1359
1357 # a list of (repo, ctx, files) functions called by various commands to allow
1360 # a list of (repo, ctx, files) functions called by various commands to allow
1358 # extensions to ensure the corresponding files are available locally, before the
1361 # extensions to ensure the corresponding files are available locally, before the
1359 # command uses them.
1362 # command uses them.
1360 fileprefetchhooks = util.hooks()
1363 fileprefetchhooks = util.hooks()
1361
1364
1362 # A marker that tells the evolve extension to suppress its own reporting
1365 # A marker that tells the evolve extension to suppress its own reporting
1363 _reportstroubledchangesets = True
1366 _reportstroubledchangesets = True
1364
1367
1365 def registersummarycallback(repo, otr, txnname=''):
1368 def registersummarycallback(repo, otr, txnname=''):
1366 """register a callback to issue a summary after the transaction is closed
1369 """register a callback to issue a summary after the transaction is closed
1367 """
1370 """
1368 def txmatch(sources):
1371 def txmatch(sources):
1369 return any(txnname.startswith(source) for source in sources)
1372 return any(txnname.startswith(source) for source in sources)
1370
1373
1371 categories = []
1374 categories = []
1372
1375
1373 def reportsummary(func):
1376 def reportsummary(func):
1374 """decorator for report callbacks."""
1377 """decorator for report callbacks."""
1375 # The repoview life cycle is shorter than the one of the actual
1378 # The repoview life cycle is shorter than the one of the actual
1376 # underlying repository. So the filtered object can die before the
1379 # underlying repository. So the filtered object can die before the
1377 # weakref is used leading to troubles. We keep a reference to the
1380 # weakref is used leading to troubles. We keep a reference to the
1378 # unfiltered object and restore the filtering when retrieving the
1381 # unfiltered object and restore the filtering when retrieving the
1379 # repository through the weakref.
1382 # repository through the weakref.
1380 filtername = repo.filtername
1383 filtername = repo.filtername
1381 reporef = weakref.ref(repo.unfiltered())
1384 reporef = weakref.ref(repo.unfiltered())
1382 def wrapped(tr):
1385 def wrapped(tr):
1383 repo = reporef()
1386 repo = reporef()
1384 if filtername:
1387 if filtername:
1385 repo = repo.filtered(filtername)
1388 repo = repo.filtered(filtername)
1386 func(repo, tr)
1389 func(repo, tr)
1387 newcat = '%02i-txnreport' % len(categories)
1390 newcat = '%02i-txnreport' % len(categories)
1388 otr.addpostclose(newcat, wrapped)
1391 otr.addpostclose(newcat, wrapped)
1389 categories.append(newcat)
1392 categories.append(newcat)
1390 return wrapped
1393 return wrapped
1391
1394
1392 if txmatch(_reportobsoletedsource):
1395 if txmatch(_reportobsoletedsource):
1393 @reportsummary
1396 @reportsummary
1394 def reportobsoleted(repo, tr):
1397 def reportobsoleted(repo, tr):
1395 obsoleted = obsutil.getobsoleted(repo, tr)
1398 obsoleted = obsutil.getobsoleted(repo, tr)
1396 if obsoleted:
1399 if obsoleted:
1397 repo.ui.status(_('obsoleted %i changesets\n')
1400 repo.ui.status(_('obsoleted %i changesets\n')
1398 % len(obsoleted))
1401 % len(obsoleted))
1399
1402
1400 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1403 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1401 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1404 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1402 instabilitytypes = [
1405 instabilitytypes = [
1403 ('orphan', 'orphan'),
1406 ('orphan', 'orphan'),
1404 ('phase-divergent', 'phasedivergent'),
1407 ('phase-divergent', 'phasedivergent'),
1405 ('content-divergent', 'contentdivergent'),
1408 ('content-divergent', 'contentdivergent'),
1406 ]
1409 ]
1407
1410
1408 def getinstabilitycounts(repo):
1411 def getinstabilitycounts(repo):
1409 filtered = repo.changelog.filteredrevs
1412 filtered = repo.changelog.filteredrevs
1410 counts = {}
1413 counts = {}
1411 for instability, revset in instabilitytypes:
1414 for instability, revset in instabilitytypes:
1412 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1415 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1413 filtered)
1416 filtered)
1414 return counts
1417 return counts
1415
1418
1416 oldinstabilitycounts = getinstabilitycounts(repo)
1419 oldinstabilitycounts = getinstabilitycounts(repo)
1417 @reportsummary
1420 @reportsummary
1418 def reportnewinstabilities(repo, tr):
1421 def reportnewinstabilities(repo, tr):
1419 newinstabilitycounts = getinstabilitycounts(repo)
1422 newinstabilitycounts = getinstabilitycounts(repo)
1420 for instability, revset in instabilitytypes:
1423 for instability, revset in instabilitytypes:
1421 delta = (newinstabilitycounts[instability] -
1424 delta = (newinstabilitycounts[instability] -
1422 oldinstabilitycounts[instability])
1425 oldinstabilitycounts[instability])
1423 if delta > 0:
1426 if delta > 0:
1424 repo.ui.warn(_('%i new %s changesets\n') %
1427 repo.ui.warn(_('%i new %s changesets\n') %
1425 (delta, instability))
1428 (delta, instability))
1426
1429
1427 if txmatch(_reportnewcssource):
1430 if txmatch(_reportnewcssource):
1428 @reportsummary
1431 @reportsummary
1429 def reportnewcs(repo, tr):
1432 def reportnewcs(repo, tr):
1430 """Report the range of new revisions pulled/unbundled."""
1433 """Report the range of new revisions pulled/unbundled."""
1431 newrevs = tr.changes.get('revs', xrange(0, 0))
1434 newrevs = tr.changes.get('revs', xrange(0, 0))
1432 if not newrevs:
1435 if not newrevs:
1433 return
1436 return
1434
1437
1435 # Compute the bounds of new revisions' range, excluding obsoletes.
1438 # Compute the bounds of new revisions' range, excluding obsoletes.
1436 unfi = repo.unfiltered()
1439 unfi = repo.unfiltered()
1437 revs = unfi.revs('%ld and not obsolete()', newrevs)
1440 revs = unfi.revs('%ld and not obsolete()', newrevs)
1438 if not revs:
1441 if not revs:
1439 # Got only obsoletes.
1442 # Got only obsoletes.
1440 return
1443 return
1441 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1444 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1442
1445
1443 if minrev == maxrev:
1446 if minrev == maxrev:
1444 revrange = minrev
1447 revrange = minrev
1445 else:
1448 else:
1446 revrange = '%s:%s' % (minrev, maxrev)
1449 revrange = '%s:%s' % (minrev, maxrev)
1447 repo.ui.status(_('new changesets %s\n') % revrange)
1450 repo.ui.status(_('new changesets %s\n') % revrange)
1448
1451
1449 def nodesummaries(repo, nodes, maxnumnodes=4):
1452 def nodesummaries(repo, nodes, maxnumnodes=4):
1450 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1453 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1451 return ' '.join(short(h) for h in nodes)
1454 return ' '.join(short(h) for h in nodes)
1452 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1455 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1453 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1456 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1454
1457
1455 def enforcesinglehead(repo, tr, desc):
1458 def enforcesinglehead(repo, tr, desc):
1456 """check that no named branch has multiple heads"""
1459 """check that no named branch has multiple heads"""
1457 if desc in ('strip', 'repair'):
1460 if desc in ('strip', 'repair'):
1458 # skip the logic during strip
1461 # skip the logic during strip
1459 return
1462 return
1460 visible = repo.filtered('visible')
1463 visible = repo.filtered('visible')
1461 # possible improvement: we could restrict the check to affected branch
1464 # possible improvement: we could restrict the check to affected branch
1462 for name, heads in visible.branchmap().iteritems():
1465 for name, heads in visible.branchmap().iteritems():
1463 if len(heads) > 1:
1466 if len(heads) > 1:
1464 msg = _('rejecting multiple heads on branch "%s"')
1467 msg = _('rejecting multiple heads on branch "%s"')
1465 msg %= name
1468 msg %= name
1466 hint = _('%d heads: %s')
1469 hint = _('%d heads: %s')
1467 hint %= (len(heads), nodesummaries(repo, heads))
1470 hint %= (len(heads), nodesummaries(repo, heads))
1468 raise error.Abort(msg, hint=hint)
1471 raise error.Abort(msg, hint=hint)
1469
1472
1470 def wrapconvertsink(sink):
1473 def wrapconvertsink(sink):
1471 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1474 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1472 before it is used, whether or not the convert extension was formally loaded.
1475 before it is used, whether or not the convert extension was formally loaded.
1473 """
1476 """
1474 return sink
1477 return sink
1475
1478
1476 def unhidehashlikerevs(repo, specs, hiddentype):
1479 def unhidehashlikerevs(repo, specs, hiddentype):
1477 """parse the user specs and unhide changesets whose hash or revision number
1480 """parse the user specs and unhide changesets whose hash or revision number
1478 is passed.
1481 is passed.
1479
1482
1480 hiddentype can be: 1) 'warn': warn while unhiding changesets
1483 hiddentype can be: 1) 'warn': warn while unhiding changesets
1481 2) 'nowarn': don't warn while unhiding changesets
1484 2) 'nowarn': don't warn while unhiding changesets
1482
1485
1483 returns a repo object with the required changesets unhidden
1486 returns a repo object with the required changesets unhidden
1484 """
1487 """
1485 if not repo.filtername or not repo.ui.configbool('experimental',
1488 if not repo.filtername or not repo.ui.configbool('experimental',
1486 'directaccess'):
1489 'directaccess'):
1487 return repo
1490 return repo
1488
1491
1489 if repo.filtername not in ('visible', 'visible-hidden'):
1492 if repo.filtername not in ('visible', 'visible-hidden'):
1490 return repo
1493 return repo
1491
1494
1492 symbols = set()
1495 symbols = set()
1493 for spec in specs:
1496 for spec in specs:
1494 try:
1497 try:
1495 tree = revsetlang.parse(spec)
1498 tree = revsetlang.parse(spec)
1496 except error.ParseError: # will be reported by scmutil.revrange()
1499 except error.ParseError: # will be reported by scmutil.revrange()
1497 continue
1500 continue
1498
1501
1499 symbols.update(revsetlang.gethashlikesymbols(tree))
1502 symbols.update(revsetlang.gethashlikesymbols(tree))
1500
1503
1501 if not symbols:
1504 if not symbols:
1502 return repo
1505 return repo
1503
1506
1504 revs = _getrevsfromsymbols(repo, symbols)
1507 revs = _getrevsfromsymbols(repo, symbols)
1505
1508
1506 if not revs:
1509 if not revs:
1507 return repo
1510 return repo
1508
1511
1509 if hiddentype == 'warn':
1512 if hiddentype == 'warn':
1510 unfi = repo.unfiltered()
1513 unfi = repo.unfiltered()
1511 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1514 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1512 repo.ui.warn(_("warning: accessing hidden changesets for write "
1515 repo.ui.warn(_("warning: accessing hidden changesets for write "
1513 "operation: %s\n") % revstr)
1516 "operation: %s\n") % revstr)
1514
1517
1515 # we have to use new filtername to separate branch/tags cache until we can
1518 # we have to use new filtername to separate branch/tags cache until we can
1516 # disbale these cache when revisions are dynamically pinned.
1519 # disbale these cache when revisions are dynamically pinned.
1517 return repo.filtered('visible-hidden', revs)
1520 return repo.filtered('visible-hidden', revs)
1518
1521
1519 def _getrevsfromsymbols(repo, symbols):
1522 def _getrevsfromsymbols(repo, symbols):
1520 """parse the list of symbols and returns a set of revision numbers of hidden
1523 """parse the list of symbols and returns a set of revision numbers of hidden
1521 changesets present in symbols"""
1524 changesets present in symbols"""
1522 revs = set()
1525 revs = set()
1523 unfi = repo.unfiltered()
1526 unfi = repo.unfiltered()
1524 unficl = unfi.changelog
1527 unficl = unfi.changelog
1525 cl = repo.changelog
1528 cl = repo.changelog
1526 tiprev = len(unficl)
1529 tiprev = len(unficl)
1527 pmatch = unficl._partialmatch
1530 pmatch = unficl._partialmatch
1528 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1531 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1529 for s in symbols:
1532 for s in symbols:
1530 try:
1533 try:
1531 n = int(s)
1534 n = int(s)
1532 if n <= tiprev:
1535 if n <= tiprev:
1533 if not allowrevnums:
1536 if not allowrevnums:
1534 continue
1537 continue
1535 else:
1538 else:
1536 if n not in cl:
1539 if n not in cl:
1537 revs.add(n)
1540 revs.add(n)
1538 continue
1541 continue
1539 except ValueError:
1542 except ValueError:
1540 pass
1543 pass
1541
1544
1542 try:
1545 try:
1543 s = pmatch(s)
1546 s = pmatch(s)
1544 except (error.LookupError, error.WdirUnsupported):
1547 except (error.LookupError, error.WdirUnsupported):
1545 s = None
1548 s = None
1546
1549
1547 if s is not None:
1550 if s is not None:
1548 rev = unficl.rev(s)
1551 rev = unficl.rev(s)
1549 if rev not in cl:
1552 if rev not in cl:
1550 revs.add(rev)
1553 revs.add(rev)
1551
1554
1552 return revs
1555 return revs
@@ -1,676 +1,673 b''
1 # templatefuncs.py - common template functions
1 # templatefuncs.py - common template functions
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 color,
14 color,
15 encoding,
15 encoding,
16 error,
16 error,
17 minirst,
17 minirst,
18 obsutil,
18 obsutil,
19 pycompat,
19 pycompat,
20 registrar,
20 registrar,
21 revset as revsetmod,
21 revset as revsetmod,
22 revsetlang,
22 revsetlang,
23 scmutil,
23 scmutil,
24 templatefilters,
24 templatefilters,
25 templatekw,
25 templatekw,
26 templateutil,
26 templateutil,
27 util,
27 util,
28 )
28 )
29 from .utils import (
29 from .utils import (
30 dateutil,
30 dateutil,
31 stringutil,
31 stringutil,
32 )
32 )
33
33
34 evalrawexp = templateutil.evalrawexp
34 evalrawexp = templateutil.evalrawexp
35 evalfuncarg = templateutil.evalfuncarg
35 evalfuncarg = templateutil.evalfuncarg
36 evalboolean = templateutil.evalboolean
36 evalboolean = templateutil.evalboolean
37 evaldate = templateutil.evaldate
37 evaldate = templateutil.evaldate
38 evalinteger = templateutil.evalinteger
38 evalinteger = templateutil.evalinteger
39 evalstring = templateutil.evalstring
39 evalstring = templateutil.evalstring
40 evalstringliteral = templateutil.evalstringliteral
40 evalstringliteral = templateutil.evalstringliteral
41
41
42 # dict of template built-in functions
42 # dict of template built-in functions
43 funcs = {}
43 funcs = {}
44 templatefunc = registrar.templatefunc(funcs)
44 templatefunc = registrar.templatefunc(funcs)
45
45
46 @templatefunc('date(date[, fmt])')
46 @templatefunc('date(date[, fmt])')
47 def date(context, mapping, args):
47 def date(context, mapping, args):
48 """Format a date. See :hg:`help dates` for formatting
48 """Format a date. See :hg:`help dates` for formatting
49 strings. The default is a Unix date format, including the timezone:
49 strings. The default is a Unix date format, including the timezone:
50 "Mon Sep 04 15:13:13 2006 0700"."""
50 "Mon Sep 04 15:13:13 2006 0700"."""
51 if not (1 <= len(args) <= 2):
51 if not (1 <= len(args) <= 2):
52 # i18n: "date" is a keyword
52 # i18n: "date" is a keyword
53 raise error.ParseError(_("date expects one or two arguments"))
53 raise error.ParseError(_("date expects one or two arguments"))
54
54
55 date = evaldate(context, mapping, args[0],
55 date = evaldate(context, mapping, args[0],
56 # i18n: "date" is a keyword
56 # i18n: "date" is a keyword
57 _("date expects a date information"))
57 _("date expects a date information"))
58 fmt = None
58 fmt = None
59 if len(args) == 2:
59 if len(args) == 2:
60 fmt = evalstring(context, mapping, args[1])
60 fmt = evalstring(context, mapping, args[1])
61 if fmt is None:
61 if fmt is None:
62 return dateutil.datestr(date)
62 return dateutil.datestr(date)
63 else:
63 else:
64 return dateutil.datestr(date, fmt)
64 return dateutil.datestr(date, fmt)
65
65
66 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
66 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
67 def dict_(context, mapping, args):
67 def dict_(context, mapping, args):
68 """Construct a dict from key-value pairs. A key may be omitted if
68 """Construct a dict from key-value pairs. A key may be omitted if
69 a value expression can provide an unambiguous name."""
69 a value expression can provide an unambiguous name."""
70 data = util.sortdict()
70 data = util.sortdict()
71
71
72 for v in args['args']:
72 for v in args['args']:
73 k = templateutil.findsymbolicname(v)
73 k = templateutil.findsymbolicname(v)
74 if not k:
74 if not k:
75 raise error.ParseError(_('dict key cannot be inferred'))
75 raise error.ParseError(_('dict key cannot be inferred'))
76 if k in data or k in args['kwargs']:
76 if k in data or k in args['kwargs']:
77 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
77 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
78 data[k] = evalfuncarg(context, mapping, v)
78 data[k] = evalfuncarg(context, mapping, v)
79
79
80 data.update((k, evalfuncarg(context, mapping, v))
80 data.update((k, evalfuncarg(context, mapping, v))
81 for k, v in args['kwargs'].iteritems())
81 for k, v in args['kwargs'].iteritems())
82 return templateutil.hybriddict(data)
82 return templateutil.hybriddict(data)
83
83
84 @templatefunc('diff([includepattern [, excludepattern]])')
84 @templatefunc('diff([includepattern [, excludepattern]])')
85 def diff(context, mapping, args):
85 def diff(context, mapping, args):
86 """Show a diff, optionally
86 """Show a diff, optionally
87 specifying files to include or exclude."""
87 specifying files to include or exclude."""
88 if len(args) > 2:
88 if len(args) > 2:
89 # i18n: "diff" is a keyword
89 # i18n: "diff" is a keyword
90 raise error.ParseError(_("diff expects zero, one, or two arguments"))
90 raise error.ParseError(_("diff expects zero, one, or two arguments"))
91
91
92 def getpatterns(i):
92 def getpatterns(i):
93 if i < len(args):
93 if i < len(args):
94 s = evalstring(context, mapping, args[i]).strip()
94 s = evalstring(context, mapping, args[i]).strip()
95 if s:
95 if s:
96 return [s]
96 return [s]
97 return []
97 return []
98
98
99 ctx = context.resource(mapping, 'ctx')
99 ctx = context.resource(mapping, 'ctx')
100 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
100 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
101
101
102 return ''.join(chunks)
102 return ''.join(chunks)
103
103
104 @templatefunc('extdata(source)', argspec='source')
104 @templatefunc('extdata(source)', argspec='source')
105 def extdata(context, mapping, args):
105 def extdata(context, mapping, args):
106 """Show a text read from the specified extdata source. (EXPERIMENTAL)"""
106 """Show a text read from the specified extdata source. (EXPERIMENTAL)"""
107 if 'source' not in args:
107 if 'source' not in args:
108 # i18n: "extdata" is a keyword
108 # i18n: "extdata" is a keyword
109 raise error.ParseError(_('extdata expects one argument'))
109 raise error.ParseError(_('extdata expects one argument'))
110
110
111 source = evalstring(context, mapping, args['source'])
111 source = evalstring(context, mapping, args['source'])
112 cache = context.resource(mapping, 'cache').setdefault('extdata', {})
112 cache = context.resource(mapping, 'cache').setdefault('extdata', {})
113 ctx = context.resource(mapping, 'ctx')
113 ctx = context.resource(mapping, 'ctx')
114 if source in cache:
114 if source in cache:
115 data = cache[source]
115 data = cache[source]
116 else:
116 else:
117 data = cache[source] = scmutil.extdatasource(ctx.repo(), source)
117 data = cache[source] = scmutil.extdatasource(ctx.repo(), source)
118 return data.get(ctx.rev(), '')
118 return data.get(ctx.rev(), '')
119
119
120 @templatefunc('files(pattern)')
120 @templatefunc('files(pattern)')
121 def files(context, mapping, args):
121 def files(context, mapping, args):
122 """All files of the current changeset matching the pattern. See
122 """All files of the current changeset matching the pattern. See
123 :hg:`help patterns`."""
123 :hg:`help patterns`."""
124 if not len(args) == 1:
124 if not len(args) == 1:
125 # i18n: "files" is a keyword
125 # i18n: "files" is a keyword
126 raise error.ParseError(_("files expects one argument"))
126 raise error.ParseError(_("files expects one argument"))
127
127
128 raw = evalstring(context, mapping, args[0])
128 raw = evalstring(context, mapping, args[0])
129 ctx = context.resource(mapping, 'ctx')
129 ctx = context.resource(mapping, 'ctx')
130 m = ctx.match([raw])
130 m = ctx.match([raw])
131 files = list(ctx.matches(m))
131 files = list(ctx.matches(m))
132 return templateutil.compatlist(context, mapping, "file", files)
132 return templateutil.compatlist(context, mapping, "file", files)
133
133
134 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
134 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
135 def fill(context, mapping, args):
135 def fill(context, mapping, args):
136 """Fill many
136 """Fill many
137 paragraphs with optional indentation. See the "fill" filter."""
137 paragraphs with optional indentation. See the "fill" filter."""
138 if not (1 <= len(args) <= 4):
138 if not (1 <= len(args) <= 4):
139 # i18n: "fill" is a keyword
139 # i18n: "fill" is a keyword
140 raise error.ParseError(_("fill expects one to four arguments"))
140 raise error.ParseError(_("fill expects one to four arguments"))
141
141
142 text = evalstring(context, mapping, args[0])
142 text = evalstring(context, mapping, args[0])
143 width = 76
143 width = 76
144 initindent = ''
144 initindent = ''
145 hangindent = ''
145 hangindent = ''
146 if 2 <= len(args) <= 4:
146 if 2 <= len(args) <= 4:
147 width = evalinteger(context, mapping, args[1],
147 width = evalinteger(context, mapping, args[1],
148 # i18n: "fill" is a keyword
148 # i18n: "fill" is a keyword
149 _("fill expects an integer width"))
149 _("fill expects an integer width"))
150 try:
150 try:
151 initindent = evalstring(context, mapping, args[2])
151 initindent = evalstring(context, mapping, args[2])
152 hangindent = evalstring(context, mapping, args[3])
152 hangindent = evalstring(context, mapping, args[3])
153 except IndexError:
153 except IndexError:
154 pass
154 pass
155
155
156 return templatefilters.fill(text, width, initindent, hangindent)
156 return templatefilters.fill(text, width, initindent, hangindent)
157
157
158 @templatefunc('formatnode(node)')
158 @templatefunc('formatnode(node)')
159 def formatnode(context, mapping, args):
159 def formatnode(context, mapping, args):
160 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
160 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
161 if len(args) != 1:
161 if len(args) != 1:
162 # i18n: "formatnode" is a keyword
162 # i18n: "formatnode" is a keyword
163 raise error.ParseError(_("formatnode expects one argument"))
163 raise error.ParseError(_("formatnode expects one argument"))
164
164
165 ui = context.resource(mapping, 'ui')
165 ui = context.resource(mapping, 'ui')
166 node = evalstring(context, mapping, args[0])
166 node = evalstring(context, mapping, args[0])
167 if ui.debugflag:
167 if ui.debugflag:
168 return node
168 return node
169 return templatefilters.short(node)
169 return templatefilters.short(node)
170
170
171 @templatefunc('mailmap(author)')
171 @templatefunc('mailmap(author)')
172 def mailmap(context, mapping, args):
172 def mailmap(context, mapping, args):
173 """Return the author, updated according to the value
173 """Return the author, updated according to the value
174 set in the .mailmap file"""
174 set in the .mailmap file"""
175 if len(args) != 1:
175 if len(args) != 1:
176 raise error.ParseError(_("mailmap expects one argument"))
176 raise error.ParseError(_("mailmap expects one argument"))
177
177
178 author = evalstring(context, mapping, args[0])
178 author = evalstring(context, mapping, args[0])
179
179
180 cache = context.resource(mapping, 'cache')
180 cache = context.resource(mapping, 'cache')
181 repo = context.resource(mapping, 'repo')
181 repo = context.resource(mapping, 'repo')
182
182
183 if 'mailmap' not in cache:
183 if 'mailmap' not in cache:
184 data = repo.wvfs.tryread('.mailmap')
184 data = repo.wvfs.tryread('.mailmap')
185 cache['mailmap'] = stringutil.parsemailmap(data)
185 cache['mailmap'] = stringutil.parsemailmap(data)
186
186
187 return stringutil.mapname(cache['mailmap'], author)
187 return stringutil.mapname(cache['mailmap'], author)
188
188
189 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
189 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
190 argspec='text width fillchar left')
190 argspec='text width fillchar left')
191 def pad(context, mapping, args):
191 def pad(context, mapping, args):
192 """Pad text with a
192 """Pad text with a
193 fill character."""
193 fill character."""
194 if 'text' not in args or 'width' not in args:
194 if 'text' not in args or 'width' not in args:
195 # i18n: "pad" is a keyword
195 # i18n: "pad" is a keyword
196 raise error.ParseError(_("pad() expects two to four arguments"))
196 raise error.ParseError(_("pad() expects two to four arguments"))
197
197
198 width = evalinteger(context, mapping, args['width'],
198 width = evalinteger(context, mapping, args['width'],
199 # i18n: "pad" is a keyword
199 # i18n: "pad" is a keyword
200 _("pad() expects an integer width"))
200 _("pad() expects an integer width"))
201
201
202 text = evalstring(context, mapping, args['text'])
202 text = evalstring(context, mapping, args['text'])
203
203
204 left = False
204 left = False
205 fillchar = ' '
205 fillchar = ' '
206 if 'fillchar' in args:
206 if 'fillchar' in args:
207 fillchar = evalstring(context, mapping, args['fillchar'])
207 fillchar = evalstring(context, mapping, args['fillchar'])
208 if len(color.stripeffects(fillchar)) != 1:
208 if len(color.stripeffects(fillchar)) != 1:
209 # i18n: "pad" is a keyword
209 # i18n: "pad" is a keyword
210 raise error.ParseError(_("pad() expects a single fill character"))
210 raise error.ParseError(_("pad() expects a single fill character"))
211 if 'left' in args:
211 if 'left' in args:
212 left = evalboolean(context, mapping, args['left'])
212 left = evalboolean(context, mapping, args['left'])
213
213
214 fillwidth = width - encoding.colwidth(color.stripeffects(text))
214 fillwidth = width - encoding.colwidth(color.stripeffects(text))
215 if fillwidth <= 0:
215 if fillwidth <= 0:
216 return text
216 return text
217 if left:
217 if left:
218 return fillchar * fillwidth + text
218 return fillchar * fillwidth + text
219 else:
219 else:
220 return text + fillchar * fillwidth
220 return text + fillchar * fillwidth
221
221
222 @templatefunc('indent(text, indentchars[, firstline])')
222 @templatefunc('indent(text, indentchars[, firstline])')
223 def indent(context, mapping, args):
223 def indent(context, mapping, args):
224 """Indents all non-empty lines
224 """Indents all non-empty lines
225 with the characters given in the indentchars string. An optional
225 with the characters given in the indentchars string. An optional
226 third parameter will override the indent for the first line only
226 third parameter will override the indent for the first line only
227 if present."""
227 if present."""
228 if not (2 <= len(args) <= 3):
228 if not (2 <= len(args) <= 3):
229 # i18n: "indent" is a keyword
229 # i18n: "indent" is a keyword
230 raise error.ParseError(_("indent() expects two or three arguments"))
230 raise error.ParseError(_("indent() expects two or three arguments"))
231
231
232 text = evalstring(context, mapping, args[0])
232 text = evalstring(context, mapping, args[0])
233 indent = evalstring(context, mapping, args[1])
233 indent = evalstring(context, mapping, args[1])
234
234
235 if len(args) == 3:
235 if len(args) == 3:
236 firstline = evalstring(context, mapping, args[2])
236 firstline = evalstring(context, mapping, args[2])
237 else:
237 else:
238 firstline = indent
238 firstline = indent
239
239
240 # the indent function doesn't indent the first line, so we do it here
240 # the indent function doesn't indent the first line, so we do it here
241 return templatefilters.indent(firstline + text, indent)
241 return templatefilters.indent(firstline + text, indent)
242
242
243 @templatefunc('get(dict, key)')
243 @templatefunc('get(dict, key)')
244 def get(context, mapping, args):
244 def get(context, mapping, args):
245 """Get an attribute/key from an object. Some keywords
245 """Get an attribute/key from an object. Some keywords
246 are complex types. This function allows you to obtain the value of an
246 are complex types. This function allows you to obtain the value of an
247 attribute on these types."""
247 attribute on these types."""
248 if len(args) != 2:
248 if len(args) != 2:
249 # i18n: "get" is a keyword
249 # i18n: "get" is a keyword
250 raise error.ParseError(_("get() expects two arguments"))
250 raise error.ParseError(_("get() expects two arguments"))
251
251
252 dictarg = evalfuncarg(context, mapping, args[0])
252 dictarg = evalfuncarg(context, mapping, args[0])
253 if not util.safehasattr(dictarg, 'get'):
253 if not util.safehasattr(dictarg, 'get'):
254 # i18n: "get" is a keyword
254 # i18n: "get" is a keyword
255 raise error.ParseError(_("get() expects a dict as first argument"))
255 raise error.ParseError(_("get() expects a dict as first argument"))
256
256
257 key = evalfuncarg(context, mapping, args[1])
257 key = evalfuncarg(context, mapping, args[1])
258 return templateutil.getdictitem(dictarg, key)
258 return templateutil.getdictitem(dictarg, key)
259
259
260 @templatefunc('if(expr, then[, else])')
260 @templatefunc('if(expr, then[, else])')
261 def if_(context, mapping, args):
261 def if_(context, mapping, args):
262 """Conditionally execute based on the result of
262 """Conditionally execute based on the result of
263 an expression."""
263 an expression."""
264 if not (2 <= len(args) <= 3):
264 if not (2 <= len(args) <= 3):
265 # i18n: "if" is a keyword
265 # i18n: "if" is a keyword
266 raise error.ParseError(_("if expects two or three arguments"))
266 raise error.ParseError(_("if expects two or three arguments"))
267
267
268 test = evalboolean(context, mapping, args[0])
268 test = evalboolean(context, mapping, args[0])
269 if test:
269 if test:
270 return evalrawexp(context, mapping, args[1])
270 return evalrawexp(context, mapping, args[1])
271 elif len(args) == 3:
271 elif len(args) == 3:
272 return evalrawexp(context, mapping, args[2])
272 return evalrawexp(context, mapping, args[2])
273
273
274 @templatefunc('ifcontains(needle, haystack, then[, else])')
274 @templatefunc('ifcontains(needle, haystack, then[, else])')
275 def ifcontains(context, mapping, args):
275 def ifcontains(context, mapping, args):
276 """Conditionally execute based
276 """Conditionally execute based
277 on whether the item "needle" is in "haystack"."""
277 on whether the item "needle" is in "haystack"."""
278 if not (3 <= len(args) <= 4):
278 if not (3 <= len(args) <= 4):
279 # i18n: "ifcontains" is a keyword
279 # i18n: "ifcontains" is a keyword
280 raise error.ParseError(_("ifcontains expects three or four arguments"))
280 raise error.ParseError(_("ifcontains expects three or four arguments"))
281
281
282 haystack = evalfuncarg(context, mapping, args[1])
282 haystack = evalfuncarg(context, mapping, args[1])
283 keytype = getattr(haystack, 'keytype', None)
283 keytype = getattr(haystack, 'keytype', None)
284 try:
284 try:
285 needle = evalrawexp(context, mapping, args[0])
285 needle = evalrawexp(context, mapping, args[0])
286 needle = templateutil.unwrapastype(context, mapping, needle,
286 needle = templateutil.unwrapastype(context, mapping, needle,
287 keytype or bytes)
287 keytype or bytes)
288 found = (needle in haystack)
288 found = (needle in haystack)
289 except error.ParseError:
289 except error.ParseError:
290 found = False
290 found = False
291
291
292 if found:
292 if found:
293 return evalrawexp(context, mapping, args[2])
293 return evalrawexp(context, mapping, args[2])
294 elif len(args) == 4:
294 elif len(args) == 4:
295 return evalrawexp(context, mapping, args[3])
295 return evalrawexp(context, mapping, args[3])
296
296
297 @templatefunc('ifeq(expr1, expr2, then[, else])')
297 @templatefunc('ifeq(expr1, expr2, then[, else])')
298 def ifeq(context, mapping, args):
298 def ifeq(context, mapping, args):
299 """Conditionally execute based on
299 """Conditionally execute based on
300 whether 2 items are equivalent."""
300 whether 2 items are equivalent."""
301 if not (3 <= len(args) <= 4):
301 if not (3 <= len(args) <= 4):
302 # i18n: "ifeq" is a keyword
302 # i18n: "ifeq" is a keyword
303 raise error.ParseError(_("ifeq expects three or four arguments"))
303 raise error.ParseError(_("ifeq expects three or four arguments"))
304
304
305 test = evalstring(context, mapping, args[0])
305 test = evalstring(context, mapping, args[0])
306 match = evalstring(context, mapping, args[1])
306 match = evalstring(context, mapping, args[1])
307 if test == match:
307 if test == match:
308 return evalrawexp(context, mapping, args[2])
308 return evalrawexp(context, mapping, args[2])
309 elif len(args) == 4:
309 elif len(args) == 4:
310 return evalrawexp(context, mapping, args[3])
310 return evalrawexp(context, mapping, args[3])
311
311
312 @templatefunc('join(list, sep)')
312 @templatefunc('join(list, sep)')
313 def join(context, mapping, args):
313 def join(context, mapping, args):
314 """Join items in a list with a delimiter."""
314 """Join items in a list with a delimiter."""
315 if not (1 <= len(args) <= 2):
315 if not (1 <= len(args) <= 2):
316 # i18n: "join" is a keyword
316 # i18n: "join" is a keyword
317 raise error.ParseError(_("join expects one or two arguments"))
317 raise error.ParseError(_("join expects one or two arguments"))
318
318
319 joinset = evalrawexp(context, mapping, args[0])
319 joinset = evalrawexp(context, mapping, args[0])
320 joiner = " "
320 joiner = " "
321 if len(args) > 1:
321 if len(args) > 1:
322 joiner = evalstring(context, mapping, args[1])
322 joiner = evalstring(context, mapping, args[1])
323 if isinstance(joinset, templateutil.wrapped):
323 if isinstance(joinset, templateutil.wrapped):
324 return joinset.join(context, mapping, joiner)
324 return joinset.join(context, mapping, joiner)
325 # TODO: perhaps a generator should be stringify()-ed here, but we can't
325 # TODO: perhaps a generator should be stringify()-ed here, but we can't
326 # because hgweb abuses it as a keyword that returns a list of dicts.
326 # because hgweb abuses it as a keyword that returns a list of dicts.
327 joinset = templateutil.unwrapvalue(context, mapping, joinset)
327 joinset = templateutil.unwrapvalue(context, mapping, joinset)
328 return templateutil.joinitems(pycompat.maybebytestr(joinset), joiner)
328 return templateutil.joinitems(pycompat.maybebytestr(joinset), joiner)
329
329
330 @templatefunc('label(label, expr)')
330 @templatefunc('label(label, expr)')
331 def label(context, mapping, args):
331 def label(context, mapping, args):
332 """Apply a label to generated content. Content with
332 """Apply a label to generated content. Content with
333 a label applied can result in additional post-processing, such as
333 a label applied can result in additional post-processing, such as
334 automatic colorization."""
334 automatic colorization."""
335 if len(args) != 2:
335 if len(args) != 2:
336 # i18n: "label" is a keyword
336 # i18n: "label" is a keyword
337 raise error.ParseError(_("label expects two arguments"))
337 raise error.ParseError(_("label expects two arguments"))
338
338
339 ui = context.resource(mapping, 'ui')
339 ui = context.resource(mapping, 'ui')
340 thing = evalstring(context, mapping, args[1])
340 thing = evalstring(context, mapping, args[1])
341 # preserve unknown symbol as literal so effects like 'red', 'bold',
341 # preserve unknown symbol as literal so effects like 'red', 'bold',
342 # etc. don't need to be quoted
342 # etc. don't need to be quoted
343 label = evalstringliteral(context, mapping, args[0])
343 label = evalstringliteral(context, mapping, args[0])
344
344
345 return ui.label(thing, label)
345 return ui.label(thing, label)
346
346
347 @templatefunc('latesttag([pattern])')
347 @templatefunc('latesttag([pattern])')
348 def latesttag(context, mapping, args):
348 def latesttag(context, mapping, args):
349 """The global tags matching the given pattern on the
349 """The global tags matching the given pattern on the
350 most recent globally tagged ancestor of this changeset.
350 most recent globally tagged ancestor of this changeset.
351 If no such tags exist, the "{tag}" template resolves to
351 If no such tags exist, the "{tag}" template resolves to
352 the string "null"."""
352 the string "null"."""
353 if len(args) > 1:
353 if len(args) > 1:
354 # i18n: "latesttag" is a keyword
354 # i18n: "latesttag" is a keyword
355 raise error.ParseError(_("latesttag expects at most one argument"))
355 raise error.ParseError(_("latesttag expects at most one argument"))
356
356
357 pattern = None
357 pattern = None
358 if len(args) == 1:
358 if len(args) == 1:
359 pattern = evalstring(context, mapping, args[0])
359 pattern = evalstring(context, mapping, args[0])
360 return templatekw.showlatesttags(context, mapping, pattern)
360 return templatekw.showlatesttags(context, mapping, pattern)
361
361
362 @templatefunc('localdate(date[, tz])')
362 @templatefunc('localdate(date[, tz])')
363 def localdate(context, mapping, args):
363 def localdate(context, mapping, args):
364 """Converts a date to the specified timezone.
364 """Converts a date to the specified timezone.
365 The default is local date."""
365 The default is local date."""
366 if not (1 <= len(args) <= 2):
366 if not (1 <= len(args) <= 2):
367 # i18n: "localdate" is a keyword
367 # i18n: "localdate" is a keyword
368 raise error.ParseError(_("localdate expects one or two arguments"))
368 raise error.ParseError(_("localdate expects one or two arguments"))
369
369
370 date = evaldate(context, mapping, args[0],
370 date = evaldate(context, mapping, args[0],
371 # i18n: "localdate" is a keyword
371 # i18n: "localdate" is a keyword
372 _("localdate expects a date information"))
372 _("localdate expects a date information"))
373 if len(args) >= 2:
373 if len(args) >= 2:
374 tzoffset = None
374 tzoffset = None
375 tz = evalfuncarg(context, mapping, args[1])
375 tz = evalfuncarg(context, mapping, args[1])
376 if isinstance(tz, bytes):
376 if isinstance(tz, bytes):
377 tzoffset, remainder = dateutil.parsetimezone(tz)
377 tzoffset, remainder = dateutil.parsetimezone(tz)
378 if remainder:
378 if remainder:
379 tzoffset = None
379 tzoffset = None
380 if tzoffset is None:
380 if tzoffset is None:
381 try:
381 try:
382 tzoffset = int(tz)
382 tzoffset = int(tz)
383 except (TypeError, ValueError):
383 except (TypeError, ValueError):
384 # i18n: "localdate" is a keyword
384 # i18n: "localdate" is a keyword
385 raise error.ParseError(_("localdate expects a timezone"))
385 raise error.ParseError(_("localdate expects a timezone"))
386 else:
386 else:
387 tzoffset = dateutil.makedate()[1]
387 tzoffset = dateutil.makedate()[1]
388 return (date[0], tzoffset)
388 return (date[0], tzoffset)
389
389
390 @templatefunc('max(iterable)')
390 @templatefunc('max(iterable)')
391 def max_(context, mapping, args, **kwargs):
391 def max_(context, mapping, args, **kwargs):
392 """Return the max of an iterable"""
392 """Return the max of an iterable"""
393 if len(args) != 1:
393 if len(args) != 1:
394 # i18n: "max" is a keyword
394 # i18n: "max" is a keyword
395 raise error.ParseError(_("max expects one argument"))
395 raise error.ParseError(_("max expects one argument"))
396
396
397 iterable = evalfuncarg(context, mapping, args[0])
397 iterable = evalfuncarg(context, mapping, args[0])
398 try:
398 try:
399 x = max(pycompat.maybebytestr(iterable))
399 x = max(pycompat.maybebytestr(iterable))
400 except (TypeError, ValueError):
400 except (TypeError, ValueError):
401 # i18n: "max" is a keyword
401 # i18n: "max" is a keyword
402 raise error.ParseError(_("max first argument should be an iterable"))
402 raise error.ParseError(_("max first argument should be an iterable"))
403 return templateutil.wraphybridvalue(iterable, x, x)
403 return templateutil.wraphybridvalue(iterable, x, x)
404
404
405 @templatefunc('min(iterable)')
405 @templatefunc('min(iterable)')
406 def min_(context, mapping, args, **kwargs):
406 def min_(context, mapping, args, **kwargs):
407 """Return the min of an iterable"""
407 """Return the min of an iterable"""
408 if len(args) != 1:
408 if len(args) != 1:
409 # i18n: "min" is a keyword
409 # i18n: "min" is a keyword
410 raise error.ParseError(_("min expects one argument"))
410 raise error.ParseError(_("min expects one argument"))
411
411
412 iterable = evalfuncarg(context, mapping, args[0])
412 iterable = evalfuncarg(context, mapping, args[0])
413 try:
413 try:
414 x = min(pycompat.maybebytestr(iterable))
414 x = min(pycompat.maybebytestr(iterable))
415 except (TypeError, ValueError):
415 except (TypeError, ValueError):
416 # i18n: "min" is a keyword
416 # i18n: "min" is a keyword
417 raise error.ParseError(_("min first argument should be an iterable"))
417 raise error.ParseError(_("min first argument should be an iterable"))
418 return templateutil.wraphybridvalue(iterable, x, x)
418 return templateutil.wraphybridvalue(iterable, x, x)
419
419
420 @templatefunc('mod(a, b)')
420 @templatefunc('mod(a, b)')
421 def mod(context, mapping, args):
421 def mod(context, mapping, args):
422 """Calculate a mod b such that a / b + a mod b == a"""
422 """Calculate a mod b such that a / b + a mod b == a"""
423 if not len(args) == 2:
423 if not len(args) == 2:
424 # i18n: "mod" is a keyword
424 # i18n: "mod" is a keyword
425 raise error.ParseError(_("mod expects two arguments"))
425 raise error.ParseError(_("mod expects two arguments"))
426
426
427 func = lambda a, b: a % b
427 func = lambda a, b: a % b
428 return templateutil.runarithmetic(context, mapping,
428 return templateutil.runarithmetic(context, mapping,
429 (func, args[0], args[1]))
429 (func, args[0], args[1]))
430
430
431 @templatefunc('obsfateoperations(markers)')
431 @templatefunc('obsfateoperations(markers)')
432 def obsfateoperations(context, mapping, args):
432 def obsfateoperations(context, mapping, args):
433 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
433 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
434 if len(args) != 1:
434 if len(args) != 1:
435 # i18n: "obsfateoperations" is a keyword
435 # i18n: "obsfateoperations" is a keyword
436 raise error.ParseError(_("obsfateoperations expects one argument"))
436 raise error.ParseError(_("obsfateoperations expects one argument"))
437
437
438 markers = evalfuncarg(context, mapping, args[0])
438 markers = evalfuncarg(context, mapping, args[0])
439
439
440 try:
440 try:
441 data = obsutil.markersoperations(markers)
441 data = obsutil.markersoperations(markers)
442 return templateutil.hybridlist(data, name='operation')
442 return templateutil.hybridlist(data, name='operation')
443 except (TypeError, KeyError):
443 except (TypeError, KeyError):
444 # i18n: "obsfateoperations" is a keyword
444 # i18n: "obsfateoperations" is a keyword
445 errmsg = _("obsfateoperations first argument should be an iterable")
445 errmsg = _("obsfateoperations first argument should be an iterable")
446 raise error.ParseError(errmsg)
446 raise error.ParseError(errmsg)
447
447
448 @templatefunc('obsfatedate(markers)')
448 @templatefunc('obsfatedate(markers)')
449 def obsfatedate(context, mapping, args):
449 def obsfatedate(context, mapping, args):
450 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
450 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
451 if len(args) != 1:
451 if len(args) != 1:
452 # i18n: "obsfatedate" is a keyword
452 # i18n: "obsfatedate" is a keyword
453 raise error.ParseError(_("obsfatedate expects one argument"))
453 raise error.ParseError(_("obsfatedate expects one argument"))
454
454
455 markers = evalfuncarg(context, mapping, args[0])
455 markers = evalfuncarg(context, mapping, args[0])
456
456
457 try:
457 try:
458 data = obsutil.markersdates(markers)
458 data = obsutil.markersdates(markers)
459 return templateutil.hybridlist(data, name='date', fmt='%d %d')
459 return templateutil.hybridlist(data, name='date', fmt='%d %d')
460 except (TypeError, KeyError):
460 except (TypeError, KeyError):
461 # i18n: "obsfatedate" is a keyword
461 # i18n: "obsfatedate" is a keyword
462 errmsg = _("obsfatedate first argument should be an iterable")
462 errmsg = _("obsfatedate first argument should be an iterable")
463 raise error.ParseError(errmsg)
463 raise error.ParseError(errmsg)
464
464
465 @templatefunc('obsfateusers(markers)')
465 @templatefunc('obsfateusers(markers)')
466 def obsfateusers(context, mapping, args):
466 def obsfateusers(context, mapping, args):
467 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
467 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
468 if len(args) != 1:
468 if len(args) != 1:
469 # i18n: "obsfateusers" is a keyword
469 # i18n: "obsfateusers" is a keyword
470 raise error.ParseError(_("obsfateusers expects one argument"))
470 raise error.ParseError(_("obsfateusers expects one argument"))
471
471
472 markers = evalfuncarg(context, mapping, args[0])
472 markers = evalfuncarg(context, mapping, args[0])
473
473
474 try:
474 try:
475 data = obsutil.markersusers(markers)
475 data = obsutil.markersusers(markers)
476 return templateutil.hybridlist(data, name='user')
476 return templateutil.hybridlist(data, name='user')
477 except (TypeError, KeyError, ValueError):
477 except (TypeError, KeyError, ValueError):
478 # i18n: "obsfateusers" is a keyword
478 # i18n: "obsfateusers" is a keyword
479 msg = _("obsfateusers first argument should be an iterable of "
479 msg = _("obsfateusers first argument should be an iterable of "
480 "obsmakers")
480 "obsmakers")
481 raise error.ParseError(msg)
481 raise error.ParseError(msg)
482
482
483 @templatefunc('obsfateverb(successors, markers)')
483 @templatefunc('obsfateverb(successors, markers)')
484 def obsfateverb(context, mapping, args):
484 def obsfateverb(context, mapping, args):
485 """Compute obsfate related information based on successors (EXPERIMENTAL)"""
485 """Compute obsfate related information based on successors (EXPERIMENTAL)"""
486 if len(args) != 2:
486 if len(args) != 2:
487 # i18n: "obsfateverb" is a keyword
487 # i18n: "obsfateverb" is a keyword
488 raise error.ParseError(_("obsfateverb expects two arguments"))
488 raise error.ParseError(_("obsfateverb expects two arguments"))
489
489
490 successors = evalfuncarg(context, mapping, args[0])
490 successors = evalfuncarg(context, mapping, args[0])
491 markers = evalfuncarg(context, mapping, args[1])
491 markers = evalfuncarg(context, mapping, args[1])
492
492
493 try:
493 try:
494 return obsutil.obsfateverb(successors, markers)
494 return obsutil.obsfateverb(successors, markers)
495 except TypeError:
495 except TypeError:
496 # i18n: "obsfateverb" is a keyword
496 # i18n: "obsfateverb" is a keyword
497 errmsg = _("obsfateverb first argument should be countable")
497 errmsg = _("obsfateverb first argument should be countable")
498 raise error.ParseError(errmsg)
498 raise error.ParseError(errmsg)
499
499
500 @templatefunc('relpath(path)')
500 @templatefunc('relpath(path)')
501 def relpath(context, mapping, args):
501 def relpath(context, mapping, args):
502 """Convert a repository-absolute path into a filesystem path relative to
502 """Convert a repository-absolute path into a filesystem path relative to
503 the current working directory."""
503 the current working directory."""
504 if len(args) != 1:
504 if len(args) != 1:
505 # i18n: "relpath" is a keyword
505 # i18n: "relpath" is a keyword
506 raise error.ParseError(_("relpath expects one argument"))
506 raise error.ParseError(_("relpath expects one argument"))
507
507
508 repo = context.resource(mapping, 'ctx').repo()
508 repo = context.resource(mapping, 'ctx').repo()
509 path = evalstring(context, mapping, args[0])
509 path = evalstring(context, mapping, args[0])
510 return repo.pathto(path)
510 return repo.pathto(path)
511
511
512 @templatefunc('revset(query[, formatargs...])')
512 @templatefunc('revset(query[, formatargs...])')
513 def revset(context, mapping, args):
513 def revset(context, mapping, args):
514 """Execute a revision set query. See
514 """Execute a revision set query. See
515 :hg:`help revset`."""
515 :hg:`help revset`."""
516 if not len(args) > 0:
516 if not len(args) > 0:
517 # i18n: "revset" is a keyword
517 # i18n: "revset" is a keyword
518 raise error.ParseError(_("revset expects one or more arguments"))
518 raise error.ParseError(_("revset expects one or more arguments"))
519
519
520 raw = evalstring(context, mapping, args[0])
520 raw = evalstring(context, mapping, args[0])
521 ctx = context.resource(mapping, 'ctx')
521 ctx = context.resource(mapping, 'ctx')
522 repo = ctx.repo()
522 repo = ctx.repo()
523
523
524 def query(expr):
524 def query(expr):
525 m = revsetmod.match(repo.ui, expr, lookup=revsetmod.lookupfn(repo))
525 m = revsetmod.match(repo.ui, expr, lookup=revsetmod.lookupfn(repo))
526 return m(repo)
526 return m(repo)
527
527
528 if len(args) > 1:
528 if len(args) > 1:
529 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
529 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
530 revs = query(revsetlang.formatspec(raw, *formatargs))
530 revs = query(revsetlang.formatspec(raw, *formatargs))
531 revs = list(revs)
531 revs = list(revs)
532 else:
532 else:
533 cache = context.resource(mapping, 'cache')
533 cache = context.resource(mapping, 'cache')
534 revsetcache = cache.setdefault("revsetcache", {})
534 revsetcache = cache.setdefault("revsetcache", {})
535 if raw in revsetcache:
535 if raw in revsetcache:
536 revs = revsetcache[raw]
536 revs = revsetcache[raw]
537 else:
537 else:
538 revs = query(raw)
538 revs = query(raw)
539 revs = list(revs)
539 revs = list(revs)
540 revsetcache[raw] = revs
540 revsetcache[raw] = revs
541 return templatekw.showrevslist(context, mapping, "revision", revs)
541 return templatekw.showrevslist(context, mapping, "revision", revs)
542
542
543 @templatefunc('rstdoc(text, style)')
543 @templatefunc('rstdoc(text, style)')
544 def rstdoc(context, mapping, args):
544 def rstdoc(context, mapping, args):
545 """Format reStructuredText."""
545 """Format reStructuredText."""
546 if len(args) != 2:
546 if len(args) != 2:
547 # i18n: "rstdoc" is a keyword
547 # i18n: "rstdoc" is a keyword
548 raise error.ParseError(_("rstdoc expects two arguments"))
548 raise error.ParseError(_("rstdoc expects two arguments"))
549
549
550 text = evalstring(context, mapping, args[0])
550 text = evalstring(context, mapping, args[0])
551 style = evalstring(context, mapping, args[1])
551 style = evalstring(context, mapping, args[1])
552
552
553 return minirst.format(text, style=style, keep=['verbose'])
553 return minirst.format(text, style=style, keep=['verbose'])
554
554
555 @templatefunc('separate(sep, args)', argspec='sep *args')
555 @templatefunc('separate(sep, args)', argspec='sep *args')
556 def separate(context, mapping, args):
556 def separate(context, mapping, args):
557 """Add a separator between non-empty arguments."""
557 """Add a separator between non-empty arguments."""
558 if 'sep' not in args:
558 if 'sep' not in args:
559 # i18n: "separate" is a keyword
559 # i18n: "separate" is a keyword
560 raise error.ParseError(_("separate expects at least one argument"))
560 raise error.ParseError(_("separate expects at least one argument"))
561
561
562 sep = evalstring(context, mapping, args['sep'])
562 sep = evalstring(context, mapping, args['sep'])
563 first = True
563 first = True
564 for arg in args['args']:
564 for arg in args['args']:
565 argstr = evalstring(context, mapping, arg)
565 argstr = evalstring(context, mapping, arg)
566 if not argstr:
566 if not argstr:
567 continue
567 continue
568 if first:
568 if first:
569 first = False
569 first = False
570 else:
570 else:
571 yield sep
571 yield sep
572 yield argstr
572 yield argstr
573
573
574 @templatefunc('shortest(node, minlength=4)')
574 @templatefunc('shortest(node, minlength=4)')
575 def shortest(context, mapping, args):
575 def shortest(context, mapping, args):
576 """Obtain the shortest representation of
576 """Obtain the shortest representation of
577 a node."""
577 a node."""
578 if not (1 <= len(args) <= 2):
578 if not (1 <= len(args) <= 2):
579 # i18n: "shortest" is a keyword
579 # i18n: "shortest" is a keyword
580 raise error.ParseError(_("shortest() expects one or two arguments"))
580 raise error.ParseError(_("shortest() expects one or two arguments"))
581
581
582 node = evalstring(context, mapping, args[0])
582 node = evalstring(context, mapping, args[0])
583
583
584 minlength = 4
584 minlength = 4
585 if len(args) > 1:
585 if len(args) > 1:
586 minlength = evalinteger(context, mapping, args[1],
586 minlength = evalinteger(context, mapping, args[1],
587 # i18n: "shortest" is a keyword
587 # i18n: "shortest" is a keyword
588 _("shortest() expects an integer minlength"))
588 _("shortest() expects an integer minlength"))
589
589
590 # _partialmatch() of filtered changelog could take O(len(repo)) time,
591 # which would be unacceptably slow. so we look for hash collision in
592 # unfiltered space, which means some hashes may be slightly longer.
593 repo = context.resource(mapping, 'ctx')._repo
590 repo = context.resource(mapping, 'ctx')._repo
594 return scmutil.shortesthexnodeidprefix(repo.unfiltered(), node, minlength)
591 return scmutil.shortesthexnodeidprefix(repo, node, minlength)
595
592
596 @templatefunc('strip(text[, chars])')
593 @templatefunc('strip(text[, chars])')
597 def strip(context, mapping, args):
594 def strip(context, mapping, args):
598 """Strip characters from a string. By default,
595 """Strip characters from a string. By default,
599 strips all leading and trailing whitespace."""
596 strips all leading and trailing whitespace."""
600 if not (1 <= len(args) <= 2):
597 if not (1 <= len(args) <= 2):
601 # i18n: "strip" is a keyword
598 # i18n: "strip" is a keyword
602 raise error.ParseError(_("strip expects one or two arguments"))
599 raise error.ParseError(_("strip expects one or two arguments"))
603
600
604 text = evalstring(context, mapping, args[0])
601 text = evalstring(context, mapping, args[0])
605 if len(args) == 2:
602 if len(args) == 2:
606 chars = evalstring(context, mapping, args[1])
603 chars = evalstring(context, mapping, args[1])
607 return text.strip(chars)
604 return text.strip(chars)
608 return text.strip()
605 return text.strip()
609
606
610 @templatefunc('sub(pattern, replacement, expression)')
607 @templatefunc('sub(pattern, replacement, expression)')
611 def sub(context, mapping, args):
608 def sub(context, mapping, args):
612 """Perform text substitution
609 """Perform text substitution
613 using regular expressions."""
610 using regular expressions."""
614 if len(args) != 3:
611 if len(args) != 3:
615 # i18n: "sub" is a keyword
612 # i18n: "sub" is a keyword
616 raise error.ParseError(_("sub expects three arguments"))
613 raise error.ParseError(_("sub expects three arguments"))
617
614
618 pat = evalstring(context, mapping, args[0])
615 pat = evalstring(context, mapping, args[0])
619 rpl = evalstring(context, mapping, args[1])
616 rpl = evalstring(context, mapping, args[1])
620 src = evalstring(context, mapping, args[2])
617 src = evalstring(context, mapping, args[2])
621 try:
618 try:
622 patre = re.compile(pat)
619 patre = re.compile(pat)
623 except re.error:
620 except re.error:
624 # i18n: "sub" is a keyword
621 # i18n: "sub" is a keyword
625 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
622 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
626 try:
623 try:
627 yield patre.sub(rpl, src)
624 yield patre.sub(rpl, src)
628 except re.error:
625 except re.error:
629 # i18n: "sub" is a keyword
626 # i18n: "sub" is a keyword
630 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
627 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
631
628
632 @templatefunc('startswith(pattern, text)')
629 @templatefunc('startswith(pattern, text)')
633 def startswith(context, mapping, args):
630 def startswith(context, mapping, args):
634 """Returns the value from the "text" argument
631 """Returns the value from the "text" argument
635 if it begins with the content from the "pattern" argument."""
632 if it begins with the content from the "pattern" argument."""
636 if len(args) != 2:
633 if len(args) != 2:
637 # i18n: "startswith" is a keyword
634 # i18n: "startswith" is a keyword
638 raise error.ParseError(_("startswith expects two arguments"))
635 raise error.ParseError(_("startswith expects two arguments"))
639
636
640 patn = evalstring(context, mapping, args[0])
637 patn = evalstring(context, mapping, args[0])
641 text = evalstring(context, mapping, args[1])
638 text = evalstring(context, mapping, args[1])
642 if text.startswith(patn):
639 if text.startswith(patn):
643 return text
640 return text
644 return ''
641 return ''
645
642
646 @templatefunc('word(number, text[, separator])')
643 @templatefunc('word(number, text[, separator])')
647 def word(context, mapping, args):
644 def word(context, mapping, args):
648 """Return the nth word from a string."""
645 """Return the nth word from a string."""
649 if not (2 <= len(args) <= 3):
646 if not (2 <= len(args) <= 3):
650 # i18n: "word" is a keyword
647 # i18n: "word" is a keyword
651 raise error.ParseError(_("word expects two or three arguments, got %d")
648 raise error.ParseError(_("word expects two or three arguments, got %d")
652 % len(args))
649 % len(args))
653
650
654 num = evalinteger(context, mapping, args[0],
651 num = evalinteger(context, mapping, args[0],
655 # i18n: "word" is a keyword
652 # i18n: "word" is a keyword
656 _("word expects an integer index"))
653 _("word expects an integer index"))
657 text = evalstring(context, mapping, args[1])
654 text = evalstring(context, mapping, args[1])
658 if len(args) == 3:
655 if len(args) == 3:
659 splitter = evalstring(context, mapping, args[2])
656 splitter = evalstring(context, mapping, args[2])
660 else:
657 else:
661 splitter = None
658 splitter = None
662
659
663 tokens = text.split(splitter)
660 tokens = text.split(splitter)
664 if num >= len(tokens) or num < -len(tokens):
661 if num >= len(tokens) or num < -len(tokens):
665 return ''
662 return ''
666 else:
663 else:
667 return tokens[num]
664 return tokens[num]
668
665
669 def loadfunction(ui, extname, registrarobj):
666 def loadfunction(ui, extname, registrarobj):
670 """Load template function from specified registrarobj
667 """Load template function from specified registrarobj
671 """
668 """
672 for name, func in registrarobj._table.iteritems():
669 for name, func in registrarobj._table.iteritems():
673 funcs[name] = func
670 funcs[name] = func
674
671
675 # tell hggettext to extract docstrings from these functions:
672 # tell hggettext to extract docstrings from these functions:
676 i18nfunctions = funcs.values()
673 i18nfunctions = funcs.values()
General Comments 0
You need to be logged in to leave comments. Login now