##// END OF EJS Templates
revset: pass in lookup function instead of repo (API)...
Yuya Nishihara -
r37913:f83cb91b default
parent child Browse files
Show More
@@ -1,1494 +1,1495 b''
1 #
1 #
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import copy
10 import copy
11 import mimetypes
11 import mimetypes
12 import os
12 import os
13 import re
13 import re
14
14
15 from ..i18n import _
15 from ..i18n import _
16 from ..node import hex, nullid, short
16 from ..node import hex, nullid, short
17
17
18 from .common import (
18 from .common import (
19 ErrorResponse,
19 ErrorResponse,
20 HTTP_FORBIDDEN,
20 HTTP_FORBIDDEN,
21 HTTP_NOT_FOUND,
21 HTTP_NOT_FOUND,
22 get_contact,
22 get_contact,
23 paritygen,
23 paritygen,
24 staticfile,
24 staticfile,
25 )
25 )
26
26
27 from .. import (
27 from .. import (
28 archival,
28 archival,
29 dagop,
29 dagop,
30 encoding,
30 encoding,
31 error,
31 error,
32 graphmod,
32 graphmod,
33 pycompat,
33 pycompat,
34 revset,
34 revset,
35 revsetlang,
35 revsetlang,
36 scmutil,
36 scmutil,
37 smartset,
37 smartset,
38 templater,
38 templater,
39 templateutil,
39 templateutil,
40 )
40 )
41
41
42 from ..utils import (
42 from ..utils import (
43 stringutil,
43 stringutil,
44 )
44 )
45
45
46 from . import (
46 from . import (
47 webutil,
47 webutil,
48 )
48 )
49
49
50 __all__ = []
50 __all__ = []
51 commands = {}
51 commands = {}
52
52
53 class webcommand(object):
53 class webcommand(object):
54 """Decorator used to register a web command handler.
54 """Decorator used to register a web command handler.
55
55
56 The decorator takes as its positional arguments the name/path the
56 The decorator takes as its positional arguments the name/path the
57 command should be accessible under.
57 command should be accessible under.
58
58
59 When called, functions receive as arguments a ``requestcontext``,
59 When called, functions receive as arguments a ``requestcontext``,
60 ``wsgirequest``, and a templater instance for generatoring output.
60 ``wsgirequest``, and a templater instance for generatoring output.
61 The functions should populate the ``rctx.res`` object with details
61 The functions should populate the ``rctx.res`` object with details
62 about the HTTP response.
62 about the HTTP response.
63
63
64 The function returns a generator to be consumed by the WSGI application.
64 The function returns a generator to be consumed by the WSGI application.
65 For most commands, this should be the result from
65 For most commands, this should be the result from
66 ``web.res.sendresponse()``. Many commands will call ``web.sendtemplate()``
66 ``web.res.sendresponse()``. Many commands will call ``web.sendtemplate()``
67 to render a template.
67 to render a template.
68
68
69 Usage:
69 Usage:
70
70
71 @webcommand('mycommand')
71 @webcommand('mycommand')
72 def mycommand(web):
72 def mycommand(web):
73 pass
73 pass
74 """
74 """
75
75
76 def __init__(self, name):
76 def __init__(self, name):
77 self.name = name
77 self.name = name
78
78
79 def __call__(self, func):
79 def __call__(self, func):
80 __all__.append(self.name)
80 __all__.append(self.name)
81 commands[self.name] = func
81 commands[self.name] = func
82 return func
82 return func
83
83
84 @webcommand('log')
84 @webcommand('log')
85 def log(web):
85 def log(web):
86 """
86 """
87 /log[/{revision}[/{path}]]
87 /log[/{revision}[/{path}]]
88 --------------------------
88 --------------------------
89
89
90 Show repository or file history.
90 Show repository or file history.
91
91
92 For URLs of the form ``/log/{revision}``, a list of changesets starting at
92 For URLs of the form ``/log/{revision}``, a list of changesets starting at
93 the specified changeset identifier is shown. If ``{revision}`` is not
93 the specified changeset identifier is shown. If ``{revision}`` is not
94 defined, the default is ``tip``. This form is equivalent to the
94 defined, the default is ``tip``. This form is equivalent to the
95 ``changelog`` handler.
95 ``changelog`` handler.
96
96
97 For URLs of the form ``/log/{revision}/{file}``, the history for a specific
97 For URLs of the form ``/log/{revision}/{file}``, the history for a specific
98 file will be shown. This form is equivalent to the ``filelog`` handler.
98 file will be shown. This form is equivalent to the ``filelog`` handler.
99 """
99 """
100
100
101 if web.req.qsparams.get('file'):
101 if web.req.qsparams.get('file'):
102 return filelog(web)
102 return filelog(web)
103 else:
103 else:
104 return changelog(web)
104 return changelog(web)
105
105
106 @webcommand('rawfile')
106 @webcommand('rawfile')
107 def rawfile(web):
107 def rawfile(web):
108 guessmime = web.configbool('web', 'guessmime')
108 guessmime = web.configbool('web', 'guessmime')
109
109
110 path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
110 path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
111 if not path:
111 if not path:
112 return manifest(web)
112 return manifest(web)
113
113
114 try:
114 try:
115 fctx = webutil.filectx(web.repo, web.req)
115 fctx = webutil.filectx(web.repo, web.req)
116 except error.LookupError as inst:
116 except error.LookupError as inst:
117 try:
117 try:
118 return manifest(web)
118 return manifest(web)
119 except ErrorResponse:
119 except ErrorResponse:
120 raise inst
120 raise inst
121
121
122 path = fctx.path()
122 path = fctx.path()
123 text = fctx.data()
123 text = fctx.data()
124 mt = 'application/binary'
124 mt = 'application/binary'
125 if guessmime:
125 if guessmime:
126 mt = mimetypes.guess_type(path)[0]
126 mt = mimetypes.guess_type(path)[0]
127 if mt is None:
127 if mt is None:
128 if stringutil.binary(text):
128 if stringutil.binary(text):
129 mt = 'application/binary'
129 mt = 'application/binary'
130 else:
130 else:
131 mt = 'text/plain'
131 mt = 'text/plain'
132 if mt.startswith('text/'):
132 if mt.startswith('text/'):
133 mt += '; charset="%s"' % encoding.encoding
133 mt += '; charset="%s"' % encoding.encoding
134
134
135 web.res.headers['Content-Type'] = mt
135 web.res.headers['Content-Type'] = mt
136 filename = (path.rpartition('/')[-1]
136 filename = (path.rpartition('/')[-1]
137 .replace('\\', '\\\\').replace('"', '\\"'))
137 .replace('\\', '\\\\').replace('"', '\\"'))
138 web.res.headers['Content-Disposition'] = 'inline; filename="%s"' % filename
138 web.res.headers['Content-Disposition'] = 'inline; filename="%s"' % filename
139 web.res.setbodybytes(text)
139 web.res.setbodybytes(text)
140 return web.res.sendresponse()
140 return web.res.sendresponse()
141
141
142 def _filerevision(web, fctx):
142 def _filerevision(web, fctx):
143 f = fctx.path()
143 f = fctx.path()
144 text = fctx.data()
144 text = fctx.data()
145 parity = paritygen(web.stripecount)
145 parity = paritygen(web.stripecount)
146 ishead = fctx.filerev() in fctx.filelog().headrevs()
146 ishead = fctx.filerev() in fctx.filelog().headrevs()
147
147
148 if stringutil.binary(text):
148 if stringutil.binary(text):
149 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
149 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
150 text = '(binary:%s)' % mt
150 text = '(binary:%s)' % mt
151
151
152 def lines():
152 def lines():
153 for lineno, t in enumerate(text.splitlines(True)):
153 for lineno, t in enumerate(text.splitlines(True)):
154 yield {"line": t,
154 yield {"line": t,
155 "lineid": "l%d" % (lineno + 1),
155 "lineid": "l%d" % (lineno + 1),
156 "linenumber": "% 6d" % (lineno + 1),
156 "linenumber": "% 6d" % (lineno + 1),
157 "parity": next(parity)}
157 "parity": next(parity)}
158
158
159 return web.sendtemplate(
159 return web.sendtemplate(
160 'filerevision',
160 'filerevision',
161 file=f,
161 file=f,
162 path=webutil.up(f),
162 path=webutil.up(f),
163 text=lines(),
163 text=lines(),
164 symrev=webutil.symrevorshortnode(web.req, fctx),
164 symrev=webutil.symrevorshortnode(web.req, fctx),
165 rename=webutil.renamelink(fctx),
165 rename=webutil.renamelink(fctx),
166 permissions=fctx.manifest().flags(f),
166 permissions=fctx.manifest().flags(f),
167 ishead=int(ishead),
167 ishead=int(ishead),
168 **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
168 **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
169
169
170 @webcommand('file')
170 @webcommand('file')
171 def file(web):
171 def file(web):
172 """
172 """
173 /file/{revision}[/{path}]
173 /file/{revision}[/{path}]
174 -------------------------
174 -------------------------
175
175
176 Show information about a directory or file in the repository.
176 Show information about a directory or file in the repository.
177
177
178 Info about the ``path`` given as a URL parameter will be rendered.
178 Info about the ``path`` given as a URL parameter will be rendered.
179
179
180 If ``path`` is a directory, information about the entries in that
180 If ``path`` is a directory, information about the entries in that
181 directory will be rendered. This form is equivalent to the ``manifest``
181 directory will be rendered. This form is equivalent to the ``manifest``
182 handler.
182 handler.
183
183
184 If ``path`` is a file, information about that file will be shown via
184 If ``path`` is a file, information about that file will be shown via
185 the ``filerevision`` template.
185 the ``filerevision`` template.
186
186
187 If ``path`` is not defined, information about the root directory will
187 If ``path`` is not defined, information about the root directory will
188 be rendered.
188 be rendered.
189 """
189 """
190 if web.req.qsparams.get('style') == 'raw':
190 if web.req.qsparams.get('style') == 'raw':
191 return rawfile(web)
191 return rawfile(web)
192
192
193 path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
193 path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
194 if not path:
194 if not path:
195 return manifest(web)
195 return manifest(web)
196 try:
196 try:
197 return _filerevision(web, webutil.filectx(web.repo, web.req))
197 return _filerevision(web, webutil.filectx(web.repo, web.req))
198 except error.LookupError as inst:
198 except error.LookupError as inst:
199 try:
199 try:
200 return manifest(web)
200 return manifest(web)
201 except ErrorResponse:
201 except ErrorResponse:
202 raise inst
202 raise inst
203
203
204 def _search(web):
204 def _search(web):
205 MODE_REVISION = 'rev'
205 MODE_REVISION = 'rev'
206 MODE_KEYWORD = 'keyword'
206 MODE_KEYWORD = 'keyword'
207 MODE_REVSET = 'revset'
207 MODE_REVSET = 'revset'
208
208
209 def revsearch(ctx):
209 def revsearch(ctx):
210 yield ctx
210 yield ctx
211
211
212 def keywordsearch(query):
212 def keywordsearch(query):
213 lower = encoding.lower
213 lower = encoding.lower
214 qw = lower(query).split()
214 qw = lower(query).split()
215
215
216 def revgen():
216 def revgen():
217 cl = web.repo.changelog
217 cl = web.repo.changelog
218 for i in xrange(len(web.repo) - 1, 0, -100):
218 for i in xrange(len(web.repo) - 1, 0, -100):
219 l = []
219 l = []
220 for j in cl.revs(max(0, i - 99), i):
220 for j in cl.revs(max(0, i - 99), i):
221 ctx = web.repo[j]
221 ctx = web.repo[j]
222 l.append(ctx)
222 l.append(ctx)
223 l.reverse()
223 l.reverse()
224 for e in l:
224 for e in l:
225 yield e
225 yield e
226
226
227 for ctx in revgen():
227 for ctx in revgen():
228 miss = 0
228 miss = 0
229 for q in qw:
229 for q in qw:
230 if not (q in lower(ctx.user()) or
230 if not (q in lower(ctx.user()) or
231 q in lower(ctx.description()) or
231 q in lower(ctx.description()) or
232 q in lower(" ".join(ctx.files()))):
232 q in lower(" ".join(ctx.files()))):
233 miss = 1
233 miss = 1
234 break
234 break
235 if miss:
235 if miss:
236 continue
236 continue
237
237
238 yield ctx
238 yield ctx
239
239
240 def revsetsearch(revs):
240 def revsetsearch(revs):
241 for r in revs:
241 for r in revs:
242 yield web.repo[r]
242 yield web.repo[r]
243
243
244 searchfuncs = {
244 searchfuncs = {
245 MODE_REVISION: (revsearch, 'exact revision search'),
245 MODE_REVISION: (revsearch, 'exact revision search'),
246 MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
246 MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
247 MODE_REVSET: (revsetsearch, 'revset expression search'),
247 MODE_REVSET: (revsetsearch, 'revset expression search'),
248 }
248 }
249
249
250 def getsearchmode(query):
250 def getsearchmode(query):
251 try:
251 try:
252 ctx = scmutil.revsymbol(web.repo, query)
252 ctx = scmutil.revsymbol(web.repo, query)
253 except (error.RepoError, error.LookupError):
253 except (error.RepoError, error.LookupError):
254 # query is not an exact revision pointer, need to
254 # query is not an exact revision pointer, need to
255 # decide if it's a revset expression or keywords
255 # decide if it's a revset expression or keywords
256 pass
256 pass
257 else:
257 else:
258 return MODE_REVISION, ctx
258 return MODE_REVISION, ctx
259
259
260 revdef = 'reverse(%s)' % query
260 revdef = 'reverse(%s)' % query
261 try:
261 try:
262 tree = revsetlang.parse(revdef)
262 tree = revsetlang.parse(revdef)
263 except error.ParseError:
263 except error.ParseError:
264 # can't parse to a revset tree
264 # can't parse to a revset tree
265 return MODE_KEYWORD, query
265 return MODE_KEYWORD, query
266
266
267 if revsetlang.depth(tree) <= 2:
267 if revsetlang.depth(tree) <= 2:
268 # no revset syntax used
268 # no revset syntax used
269 return MODE_KEYWORD, query
269 return MODE_KEYWORD, query
270
270
271 if any((token, (value or '')[:3]) == ('string', 're:')
271 if any((token, (value or '')[:3]) == ('string', 're:')
272 for token, value, pos in revsetlang.tokenize(revdef)):
272 for token, value, pos in revsetlang.tokenize(revdef)):
273 return MODE_KEYWORD, query
273 return MODE_KEYWORD, query
274
274
275 funcsused = revsetlang.funcsused(tree)
275 funcsused = revsetlang.funcsused(tree)
276 if not funcsused.issubset(revset.safesymbols):
276 if not funcsused.issubset(revset.safesymbols):
277 return MODE_KEYWORD, query
277 return MODE_KEYWORD, query
278
278
279 mfunc = revset.match(web.repo.ui, revdef, repo=web.repo)
279 mfunc = revset.match(web.repo.ui, revdef,
280 lookup=revset.lookupfn(web.repo))
280 try:
281 try:
281 revs = mfunc(web.repo)
282 revs = mfunc(web.repo)
282 return MODE_REVSET, revs
283 return MODE_REVSET, revs
283 # ParseError: wrongly placed tokens, wrongs arguments, etc
284 # ParseError: wrongly placed tokens, wrongs arguments, etc
284 # RepoLookupError: no such revision, e.g. in 'revision:'
285 # RepoLookupError: no such revision, e.g. in 'revision:'
285 # Abort: bookmark/tag not exists
286 # Abort: bookmark/tag not exists
286 # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
287 # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
287 except (error.ParseError, error.RepoLookupError, error.Abort,
288 except (error.ParseError, error.RepoLookupError, error.Abort,
288 LookupError):
289 LookupError):
289 return MODE_KEYWORD, query
290 return MODE_KEYWORD, query
290
291
291 def changelist(context):
292 def changelist(context):
292 count = 0
293 count = 0
293
294
294 for ctx in searchfunc[0](funcarg):
295 for ctx in searchfunc[0](funcarg):
295 count += 1
296 count += 1
296 n = ctx.node()
297 n = ctx.node()
297 showtags = webutil.showtag(web.repo, web.tmpl, 'changelogtag', n)
298 showtags = webutil.showtag(web.repo, web.tmpl, 'changelogtag', n)
298 files = webutil.listfilediffs(web.tmpl, ctx.files(), n,
299 files = webutil.listfilediffs(web.tmpl, ctx.files(), n,
299 web.maxfiles)
300 web.maxfiles)
300
301
301 lm = webutil.commonentry(web.repo, ctx)
302 lm = webutil.commonentry(web.repo, ctx)
302 lm.update({
303 lm.update({
303 'parity': next(parity),
304 'parity': next(parity),
304 'changelogtag': showtags,
305 'changelogtag': showtags,
305 'files': files,
306 'files': files,
306 })
307 })
307 yield lm
308 yield lm
308
309
309 if count >= revcount:
310 if count >= revcount:
310 break
311 break
311
312
312 query = web.req.qsparams['rev']
313 query = web.req.qsparams['rev']
313 revcount = web.maxchanges
314 revcount = web.maxchanges
314 if 'revcount' in web.req.qsparams:
315 if 'revcount' in web.req.qsparams:
315 try:
316 try:
316 revcount = int(web.req.qsparams.get('revcount', revcount))
317 revcount = int(web.req.qsparams.get('revcount', revcount))
317 revcount = max(revcount, 1)
318 revcount = max(revcount, 1)
318 web.tmpl.defaults['sessionvars']['revcount'] = revcount
319 web.tmpl.defaults['sessionvars']['revcount'] = revcount
319 except ValueError:
320 except ValueError:
320 pass
321 pass
321
322
322 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
323 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
323 lessvars['revcount'] = max(revcount // 2, 1)
324 lessvars['revcount'] = max(revcount // 2, 1)
324 lessvars['rev'] = query
325 lessvars['rev'] = query
325 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
326 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
326 morevars['revcount'] = revcount * 2
327 morevars['revcount'] = revcount * 2
327 morevars['rev'] = query
328 morevars['rev'] = query
328
329
329 mode, funcarg = getsearchmode(query)
330 mode, funcarg = getsearchmode(query)
330
331
331 if 'forcekw' in web.req.qsparams:
332 if 'forcekw' in web.req.qsparams:
332 showforcekw = ''
333 showforcekw = ''
333 showunforcekw = searchfuncs[mode][1]
334 showunforcekw = searchfuncs[mode][1]
334 mode = MODE_KEYWORD
335 mode = MODE_KEYWORD
335 funcarg = query
336 funcarg = query
336 else:
337 else:
337 if mode != MODE_KEYWORD:
338 if mode != MODE_KEYWORD:
338 showforcekw = searchfuncs[MODE_KEYWORD][1]
339 showforcekw = searchfuncs[MODE_KEYWORD][1]
339 else:
340 else:
340 showforcekw = ''
341 showforcekw = ''
341 showunforcekw = ''
342 showunforcekw = ''
342
343
343 searchfunc = searchfuncs[mode]
344 searchfunc = searchfuncs[mode]
344
345
345 tip = web.repo['tip']
346 tip = web.repo['tip']
346 parity = paritygen(web.stripecount)
347 parity = paritygen(web.stripecount)
347
348
348 return web.sendtemplate(
349 return web.sendtemplate(
349 'search',
350 'search',
350 query=query,
351 query=query,
351 node=tip.hex(),
352 node=tip.hex(),
352 symrev='tip',
353 symrev='tip',
353 entries=templateutil.mappinggenerator(changelist, name='searchentry'),
354 entries=templateutil.mappinggenerator(changelist, name='searchentry'),
354 archives=web.archivelist('tip'),
355 archives=web.archivelist('tip'),
355 morevars=morevars,
356 morevars=morevars,
356 lessvars=lessvars,
357 lessvars=lessvars,
357 modedesc=searchfunc[1],
358 modedesc=searchfunc[1],
358 showforcekw=showforcekw,
359 showforcekw=showforcekw,
359 showunforcekw=showunforcekw)
360 showunforcekw=showunforcekw)
360
361
361 @webcommand('changelog')
362 @webcommand('changelog')
362 def changelog(web, shortlog=False):
363 def changelog(web, shortlog=False):
363 """
364 """
364 /changelog[/{revision}]
365 /changelog[/{revision}]
365 -----------------------
366 -----------------------
366
367
367 Show information about multiple changesets.
368 Show information about multiple changesets.
368
369
369 If the optional ``revision`` URL argument is absent, information about
370 If the optional ``revision`` URL argument is absent, information about
370 all changesets starting at ``tip`` will be rendered. If the ``revision``
371 all changesets starting at ``tip`` will be rendered. If the ``revision``
371 argument is present, changesets will be shown starting from the specified
372 argument is present, changesets will be shown starting from the specified
372 revision.
373 revision.
373
374
374 If ``revision`` is absent, the ``rev`` query string argument may be
375 If ``revision`` is absent, the ``rev`` query string argument may be
375 defined. This will perform a search for changesets.
376 defined. This will perform a search for changesets.
376
377
377 The argument for ``rev`` can be a single revision, a revision set,
378 The argument for ``rev`` can be a single revision, a revision set,
378 or a literal keyword to search for in changeset data (equivalent to
379 or a literal keyword to search for in changeset data (equivalent to
379 :hg:`log -k`).
380 :hg:`log -k`).
380
381
381 The ``revcount`` query string argument defines the maximum numbers of
382 The ``revcount`` query string argument defines the maximum numbers of
382 changesets to render.
383 changesets to render.
383
384
384 For non-searches, the ``changelog`` template will be rendered.
385 For non-searches, the ``changelog`` template will be rendered.
385 """
386 """
386
387
387 query = ''
388 query = ''
388 if 'node' in web.req.qsparams:
389 if 'node' in web.req.qsparams:
389 ctx = webutil.changectx(web.repo, web.req)
390 ctx = webutil.changectx(web.repo, web.req)
390 symrev = webutil.symrevorshortnode(web.req, ctx)
391 symrev = webutil.symrevorshortnode(web.req, ctx)
391 elif 'rev' in web.req.qsparams:
392 elif 'rev' in web.req.qsparams:
392 return _search(web)
393 return _search(web)
393 else:
394 else:
394 ctx = web.repo['tip']
395 ctx = web.repo['tip']
395 symrev = 'tip'
396 symrev = 'tip'
396
397
397 def changelist():
398 def changelist():
398 revs = []
399 revs = []
399 if pos != -1:
400 if pos != -1:
400 revs = web.repo.changelog.revs(pos, 0)
401 revs = web.repo.changelog.revs(pos, 0)
401 curcount = 0
402 curcount = 0
402 for rev in revs:
403 for rev in revs:
403 curcount += 1
404 curcount += 1
404 if curcount > revcount + 1:
405 if curcount > revcount + 1:
405 break
406 break
406
407
407 entry = webutil.changelistentry(web, web.repo[rev])
408 entry = webutil.changelistentry(web, web.repo[rev])
408 entry['parity'] = next(parity)
409 entry['parity'] = next(parity)
409 yield entry
410 yield entry
410
411
411 if shortlog:
412 if shortlog:
412 revcount = web.maxshortchanges
413 revcount = web.maxshortchanges
413 else:
414 else:
414 revcount = web.maxchanges
415 revcount = web.maxchanges
415
416
416 if 'revcount' in web.req.qsparams:
417 if 'revcount' in web.req.qsparams:
417 try:
418 try:
418 revcount = int(web.req.qsparams.get('revcount', revcount))
419 revcount = int(web.req.qsparams.get('revcount', revcount))
419 revcount = max(revcount, 1)
420 revcount = max(revcount, 1)
420 web.tmpl.defaults['sessionvars']['revcount'] = revcount
421 web.tmpl.defaults['sessionvars']['revcount'] = revcount
421 except ValueError:
422 except ValueError:
422 pass
423 pass
423
424
424 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
425 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
425 lessvars['revcount'] = max(revcount // 2, 1)
426 lessvars['revcount'] = max(revcount // 2, 1)
426 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
427 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
427 morevars['revcount'] = revcount * 2
428 morevars['revcount'] = revcount * 2
428
429
429 count = len(web.repo)
430 count = len(web.repo)
430 pos = ctx.rev()
431 pos = ctx.rev()
431 parity = paritygen(web.stripecount)
432 parity = paritygen(web.stripecount)
432
433
433 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
434 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
434
435
435 entries = list(changelist())
436 entries = list(changelist())
436 latestentry = entries[:1]
437 latestentry = entries[:1]
437 if len(entries) > revcount:
438 if len(entries) > revcount:
438 nextentry = entries[-1:]
439 nextentry = entries[-1:]
439 entries = entries[:-1]
440 entries = entries[:-1]
440 else:
441 else:
441 nextentry = []
442 nextentry = []
442
443
443 return web.sendtemplate(
444 return web.sendtemplate(
444 'shortlog' if shortlog else 'changelog',
445 'shortlog' if shortlog else 'changelog',
445 changenav=changenav,
446 changenav=changenav,
446 node=ctx.hex(),
447 node=ctx.hex(),
447 rev=pos,
448 rev=pos,
448 symrev=symrev,
449 symrev=symrev,
449 changesets=count,
450 changesets=count,
450 entries=entries,
451 entries=entries,
451 latestentry=latestentry,
452 latestentry=latestentry,
452 nextentry=nextentry,
453 nextentry=nextentry,
453 archives=web.archivelist('tip'),
454 archives=web.archivelist('tip'),
454 revcount=revcount,
455 revcount=revcount,
455 morevars=morevars,
456 morevars=morevars,
456 lessvars=lessvars,
457 lessvars=lessvars,
457 query=query)
458 query=query)
458
459
459 @webcommand('shortlog')
460 @webcommand('shortlog')
460 def shortlog(web):
461 def shortlog(web):
461 """
462 """
462 /shortlog
463 /shortlog
463 ---------
464 ---------
464
465
465 Show basic information about a set of changesets.
466 Show basic information about a set of changesets.
466
467
467 This accepts the same parameters as the ``changelog`` handler. The only
468 This accepts the same parameters as the ``changelog`` handler. The only
468 difference is the ``shortlog`` template will be rendered instead of the
469 difference is the ``shortlog`` template will be rendered instead of the
469 ``changelog`` template.
470 ``changelog`` template.
470 """
471 """
471 return changelog(web, shortlog=True)
472 return changelog(web, shortlog=True)
472
473
473 @webcommand('changeset')
474 @webcommand('changeset')
474 def changeset(web):
475 def changeset(web):
475 """
476 """
476 /changeset[/{revision}]
477 /changeset[/{revision}]
477 -----------------------
478 -----------------------
478
479
479 Show information about a single changeset.
480 Show information about a single changeset.
480
481
481 A URL path argument is the changeset identifier to show. See ``hg help
482 A URL path argument is the changeset identifier to show. See ``hg help
482 revisions`` for possible values. If not defined, the ``tip`` changeset
483 revisions`` for possible values. If not defined, the ``tip`` changeset
483 will be shown.
484 will be shown.
484
485
485 The ``changeset`` template is rendered. Contents of the ``changesettag``,
486 The ``changeset`` template is rendered. Contents of the ``changesettag``,
486 ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many
487 ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many
487 templates related to diffs may all be used to produce the output.
488 templates related to diffs may all be used to produce the output.
488 """
489 """
489 ctx = webutil.changectx(web.repo, web.req)
490 ctx = webutil.changectx(web.repo, web.req)
490
491
491 return web.sendtemplate(
492 return web.sendtemplate(
492 'changeset',
493 'changeset',
493 **webutil.changesetentry(web, ctx))
494 **webutil.changesetentry(web, ctx))
494
495
495 rev = webcommand('rev')(changeset)
496 rev = webcommand('rev')(changeset)
496
497
497 def decodepath(path):
498 def decodepath(path):
498 """Hook for mapping a path in the repository to a path in the
499 """Hook for mapping a path in the repository to a path in the
499 working copy.
500 working copy.
500
501
501 Extensions (e.g., largefiles) can override this to remap files in
502 Extensions (e.g., largefiles) can override this to remap files in
502 the virtual file system presented by the manifest command below."""
503 the virtual file system presented by the manifest command below."""
503 return path
504 return path
504
505
505 @webcommand('manifest')
506 @webcommand('manifest')
506 def manifest(web):
507 def manifest(web):
507 """
508 """
508 /manifest[/{revision}[/{path}]]
509 /manifest[/{revision}[/{path}]]
509 -------------------------------
510 -------------------------------
510
511
511 Show information about a directory.
512 Show information about a directory.
512
513
513 If the URL path arguments are omitted, information about the root
514 If the URL path arguments are omitted, information about the root
514 directory for the ``tip`` changeset will be shown.
515 directory for the ``tip`` changeset will be shown.
515
516
516 Because this handler can only show information for directories, it
517 Because this handler can only show information for directories, it
517 is recommended to use the ``file`` handler instead, as it can handle both
518 is recommended to use the ``file`` handler instead, as it can handle both
518 directories and files.
519 directories and files.
519
520
520 The ``manifest`` template will be rendered for this handler.
521 The ``manifest`` template will be rendered for this handler.
521 """
522 """
522 if 'node' in web.req.qsparams:
523 if 'node' in web.req.qsparams:
523 ctx = webutil.changectx(web.repo, web.req)
524 ctx = webutil.changectx(web.repo, web.req)
524 symrev = webutil.symrevorshortnode(web.req, ctx)
525 symrev = webutil.symrevorshortnode(web.req, ctx)
525 else:
526 else:
526 ctx = web.repo['tip']
527 ctx = web.repo['tip']
527 symrev = 'tip'
528 symrev = 'tip'
528 path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
529 path = webutil.cleanpath(web.repo, web.req.qsparams.get('file', ''))
529 mf = ctx.manifest()
530 mf = ctx.manifest()
530 node = ctx.node()
531 node = ctx.node()
531
532
532 files = {}
533 files = {}
533 dirs = {}
534 dirs = {}
534 parity = paritygen(web.stripecount)
535 parity = paritygen(web.stripecount)
535
536
536 if path and path[-1:] != "/":
537 if path and path[-1:] != "/":
537 path += "/"
538 path += "/"
538 l = len(path)
539 l = len(path)
539 abspath = "/" + path
540 abspath = "/" + path
540
541
541 for full, n in mf.iteritems():
542 for full, n in mf.iteritems():
542 # the virtual path (working copy path) used for the full
543 # the virtual path (working copy path) used for the full
543 # (repository) path
544 # (repository) path
544 f = decodepath(full)
545 f = decodepath(full)
545
546
546 if f[:l] != path:
547 if f[:l] != path:
547 continue
548 continue
548 remain = f[l:]
549 remain = f[l:]
549 elements = remain.split('/')
550 elements = remain.split('/')
550 if len(elements) == 1:
551 if len(elements) == 1:
551 files[remain] = full
552 files[remain] = full
552 else:
553 else:
553 h = dirs # need to retain ref to dirs (root)
554 h = dirs # need to retain ref to dirs (root)
554 for elem in elements[0:-1]:
555 for elem in elements[0:-1]:
555 if elem not in h:
556 if elem not in h:
556 h[elem] = {}
557 h[elem] = {}
557 h = h[elem]
558 h = h[elem]
558 if len(h) > 1:
559 if len(h) > 1:
559 break
560 break
560 h[None] = None # denotes files present
561 h[None] = None # denotes files present
561
562
562 if mf and not files and not dirs:
563 if mf and not files and not dirs:
563 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
564 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
564
565
565 def filelist(**map):
566 def filelist(**map):
566 for f in sorted(files):
567 for f in sorted(files):
567 full = files[f]
568 full = files[f]
568
569
569 fctx = ctx.filectx(full)
570 fctx = ctx.filectx(full)
570 yield {"file": full,
571 yield {"file": full,
571 "parity": next(parity),
572 "parity": next(parity),
572 "basename": f,
573 "basename": f,
573 "date": fctx.date(),
574 "date": fctx.date(),
574 "size": fctx.size(),
575 "size": fctx.size(),
575 "permissions": mf.flags(full)}
576 "permissions": mf.flags(full)}
576
577
577 def dirlist(**map):
578 def dirlist(**map):
578 for d in sorted(dirs):
579 for d in sorted(dirs):
579
580
580 emptydirs = []
581 emptydirs = []
581 h = dirs[d]
582 h = dirs[d]
582 while isinstance(h, dict) and len(h) == 1:
583 while isinstance(h, dict) and len(h) == 1:
583 k, v = next(iter(h.items()))
584 k, v = next(iter(h.items()))
584 if v:
585 if v:
585 emptydirs.append(k)
586 emptydirs.append(k)
586 h = v
587 h = v
587
588
588 path = "%s%s" % (abspath, d)
589 path = "%s%s" % (abspath, d)
589 yield {"parity": next(parity),
590 yield {"parity": next(parity),
590 "path": path,
591 "path": path,
591 "emptydirs": "/".join(emptydirs),
592 "emptydirs": "/".join(emptydirs),
592 "basename": d}
593 "basename": d}
593
594
594 return web.sendtemplate(
595 return web.sendtemplate(
595 'manifest',
596 'manifest',
596 symrev=symrev,
597 symrev=symrev,
597 path=abspath,
598 path=abspath,
598 up=webutil.up(abspath),
599 up=webutil.up(abspath),
599 upparity=next(parity),
600 upparity=next(parity),
600 fentries=filelist,
601 fentries=filelist,
601 dentries=dirlist,
602 dentries=dirlist,
602 archives=web.archivelist(hex(node)),
603 archives=web.archivelist(hex(node)),
603 **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
604 **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
604
605
605 @webcommand('tags')
606 @webcommand('tags')
606 def tags(web):
607 def tags(web):
607 """
608 """
608 /tags
609 /tags
609 -----
610 -----
610
611
611 Show information about tags.
612 Show information about tags.
612
613
613 No arguments are accepted.
614 No arguments are accepted.
614
615
615 The ``tags`` template is rendered.
616 The ``tags`` template is rendered.
616 """
617 """
617 i = list(reversed(web.repo.tagslist()))
618 i = list(reversed(web.repo.tagslist()))
618 parity = paritygen(web.stripecount)
619 parity = paritygen(web.stripecount)
619
620
620 def entries(notip, latestonly, **map):
621 def entries(notip, latestonly, **map):
621 t = i
622 t = i
622 if notip:
623 if notip:
623 t = [(k, n) for k, n in i if k != "tip"]
624 t = [(k, n) for k, n in i if k != "tip"]
624 if latestonly:
625 if latestonly:
625 t = t[:1]
626 t = t[:1]
626 for k, n in t:
627 for k, n in t:
627 yield {"parity": next(parity),
628 yield {"parity": next(parity),
628 "tag": k,
629 "tag": k,
629 "date": web.repo[n].date(),
630 "date": web.repo[n].date(),
630 "node": hex(n)}
631 "node": hex(n)}
631
632
632 return web.sendtemplate(
633 return web.sendtemplate(
633 'tags',
634 'tags',
634 node=hex(web.repo.changelog.tip()),
635 node=hex(web.repo.changelog.tip()),
635 entries=lambda **x: entries(False, False, **x),
636 entries=lambda **x: entries(False, False, **x),
636 entriesnotip=lambda **x: entries(True, False, **x),
637 entriesnotip=lambda **x: entries(True, False, **x),
637 latestentry=lambda **x: entries(True, True, **x))
638 latestentry=lambda **x: entries(True, True, **x))
638
639
639 @webcommand('bookmarks')
640 @webcommand('bookmarks')
640 def bookmarks(web):
641 def bookmarks(web):
641 """
642 """
642 /bookmarks
643 /bookmarks
643 ----------
644 ----------
644
645
645 Show information about bookmarks.
646 Show information about bookmarks.
646
647
647 No arguments are accepted.
648 No arguments are accepted.
648
649
649 The ``bookmarks`` template is rendered.
650 The ``bookmarks`` template is rendered.
650 """
651 """
651 i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
652 i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
652 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
653 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
653 i = sorted(i, key=sortkey, reverse=True)
654 i = sorted(i, key=sortkey, reverse=True)
654 parity = paritygen(web.stripecount)
655 parity = paritygen(web.stripecount)
655
656
656 def entries(latestonly, **map):
657 def entries(latestonly, **map):
657 t = i
658 t = i
658 if latestonly:
659 if latestonly:
659 t = i[:1]
660 t = i[:1]
660 for k, n in t:
661 for k, n in t:
661 yield {"parity": next(parity),
662 yield {"parity": next(parity),
662 "bookmark": k,
663 "bookmark": k,
663 "date": web.repo[n].date(),
664 "date": web.repo[n].date(),
664 "node": hex(n)}
665 "node": hex(n)}
665
666
666 if i:
667 if i:
667 latestrev = i[0][1]
668 latestrev = i[0][1]
668 else:
669 else:
669 latestrev = -1
670 latestrev = -1
670
671
671 return web.sendtemplate(
672 return web.sendtemplate(
672 'bookmarks',
673 'bookmarks',
673 node=hex(web.repo.changelog.tip()),
674 node=hex(web.repo.changelog.tip()),
674 lastchange=[{'date': web.repo[latestrev].date()}],
675 lastchange=[{'date': web.repo[latestrev].date()}],
675 entries=lambda **x: entries(latestonly=False, **x),
676 entries=lambda **x: entries(latestonly=False, **x),
676 latestentry=lambda **x: entries(latestonly=True, **x))
677 latestentry=lambda **x: entries(latestonly=True, **x))
677
678
678 @webcommand('branches')
679 @webcommand('branches')
679 def branches(web):
680 def branches(web):
680 """
681 """
681 /branches
682 /branches
682 ---------
683 ---------
683
684
684 Show information about branches.
685 Show information about branches.
685
686
686 All known branches are contained in the output, even closed branches.
687 All known branches are contained in the output, even closed branches.
687
688
688 No arguments are accepted.
689 No arguments are accepted.
689
690
690 The ``branches`` template is rendered.
691 The ``branches`` template is rendered.
691 """
692 """
692 entries = webutil.branchentries(web.repo, web.stripecount)
693 entries = webutil.branchentries(web.repo, web.stripecount)
693 latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
694 latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
694
695
695 return web.sendtemplate(
696 return web.sendtemplate(
696 'branches',
697 'branches',
697 node=hex(web.repo.changelog.tip()),
698 node=hex(web.repo.changelog.tip()),
698 entries=entries,
699 entries=entries,
699 latestentry=latestentry)
700 latestentry=latestentry)
700
701
701 @webcommand('summary')
702 @webcommand('summary')
702 def summary(web):
703 def summary(web):
703 """
704 """
704 /summary
705 /summary
705 --------
706 --------
706
707
707 Show a summary of repository state.
708 Show a summary of repository state.
708
709
709 Information about the latest changesets, bookmarks, tags, and branches
710 Information about the latest changesets, bookmarks, tags, and branches
710 is captured by this handler.
711 is captured by this handler.
711
712
712 The ``summary`` template is rendered.
713 The ``summary`` template is rendered.
713 """
714 """
714 i = reversed(web.repo.tagslist())
715 i = reversed(web.repo.tagslist())
715
716
716 def tagentries(context):
717 def tagentries(context):
717 parity = paritygen(web.stripecount)
718 parity = paritygen(web.stripecount)
718 count = 0
719 count = 0
719 for k, n in i:
720 for k, n in i:
720 if k == "tip": # skip tip
721 if k == "tip": # skip tip
721 continue
722 continue
722
723
723 count += 1
724 count += 1
724 if count > 10: # limit to 10 tags
725 if count > 10: # limit to 10 tags
725 break
726 break
726
727
727 yield {
728 yield {
728 'parity': next(parity),
729 'parity': next(parity),
729 'tag': k,
730 'tag': k,
730 'node': hex(n),
731 'node': hex(n),
731 'date': web.repo[n].date(),
732 'date': web.repo[n].date(),
732 }
733 }
733
734
734 def bookmarks(**map):
735 def bookmarks(**map):
735 parity = paritygen(web.stripecount)
736 parity = paritygen(web.stripecount)
736 marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
737 marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
737 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
738 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
738 marks = sorted(marks, key=sortkey, reverse=True)
739 marks = sorted(marks, key=sortkey, reverse=True)
739 for k, n in marks[:10]: # limit to 10 bookmarks
740 for k, n in marks[:10]: # limit to 10 bookmarks
740 yield {'parity': next(parity),
741 yield {'parity': next(parity),
741 'bookmark': k,
742 'bookmark': k,
742 'date': web.repo[n].date(),
743 'date': web.repo[n].date(),
743 'node': hex(n)}
744 'node': hex(n)}
744
745
745 def changelist(context):
746 def changelist(context):
746 parity = paritygen(web.stripecount, offset=start - end)
747 parity = paritygen(web.stripecount, offset=start - end)
747 l = [] # build a list in forward order for efficiency
748 l = [] # build a list in forward order for efficiency
748 revs = []
749 revs = []
749 if start < end:
750 if start < end:
750 revs = web.repo.changelog.revs(start, end - 1)
751 revs = web.repo.changelog.revs(start, end - 1)
751 for i in revs:
752 for i in revs:
752 ctx = web.repo[i]
753 ctx = web.repo[i]
753 lm = webutil.commonentry(web.repo, ctx)
754 lm = webutil.commonentry(web.repo, ctx)
754 lm['parity'] = next(parity)
755 lm['parity'] = next(parity)
755 l.append(lm)
756 l.append(lm)
756
757
757 for entry in reversed(l):
758 for entry in reversed(l):
758 yield entry
759 yield entry
759
760
760 tip = web.repo['tip']
761 tip = web.repo['tip']
761 count = len(web.repo)
762 count = len(web.repo)
762 start = max(0, count - web.maxchanges)
763 start = max(0, count - web.maxchanges)
763 end = min(count, start + web.maxchanges)
764 end = min(count, start + web.maxchanges)
764
765
765 desc = web.config("web", "description")
766 desc = web.config("web", "description")
766 if not desc:
767 if not desc:
767 desc = 'unknown'
768 desc = 'unknown'
768 labels = web.configlist('web', 'labels')
769 labels = web.configlist('web', 'labels')
769
770
770 return web.sendtemplate(
771 return web.sendtemplate(
771 'summary',
772 'summary',
772 desc=desc,
773 desc=desc,
773 owner=get_contact(web.config) or 'unknown',
774 owner=get_contact(web.config) or 'unknown',
774 lastchange=tip.date(),
775 lastchange=tip.date(),
775 tags=templateutil.mappinggenerator(tagentries, name='tagentry'),
776 tags=templateutil.mappinggenerator(tagentries, name='tagentry'),
776 bookmarks=bookmarks,
777 bookmarks=bookmarks,
777 branches=webutil.branchentries(web.repo, web.stripecount, 10),
778 branches=webutil.branchentries(web.repo, web.stripecount, 10),
778 shortlog=templateutil.mappinggenerator(changelist,
779 shortlog=templateutil.mappinggenerator(changelist,
779 name='shortlogentry'),
780 name='shortlogentry'),
780 node=tip.hex(),
781 node=tip.hex(),
781 symrev='tip',
782 symrev='tip',
782 archives=web.archivelist('tip'),
783 archives=web.archivelist('tip'),
783 labels=templateutil.hybridlist(labels, name='label'))
784 labels=templateutil.hybridlist(labels, name='label'))
784
785
785 @webcommand('filediff')
786 @webcommand('filediff')
786 def filediff(web):
787 def filediff(web):
787 """
788 """
788 /diff/{revision}/{path}
789 /diff/{revision}/{path}
789 -----------------------
790 -----------------------
790
791
791 Show how a file changed in a particular commit.
792 Show how a file changed in a particular commit.
792
793
793 The ``filediff`` template is rendered.
794 The ``filediff`` template is rendered.
794
795
795 This handler is registered under both the ``/diff`` and ``/filediff``
796 This handler is registered under both the ``/diff`` and ``/filediff``
796 paths. ``/diff`` is used in modern code.
797 paths. ``/diff`` is used in modern code.
797 """
798 """
798 fctx, ctx = None, None
799 fctx, ctx = None, None
799 try:
800 try:
800 fctx = webutil.filectx(web.repo, web.req)
801 fctx = webutil.filectx(web.repo, web.req)
801 except LookupError:
802 except LookupError:
802 ctx = webutil.changectx(web.repo, web.req)
803 ctx = webutil.changectx(web.repo, web.req)
803 path = webutil.cleanpath(web.repo, web.req.qsparams['file'])
804 path = webutil.cleanpath(web.repo, web.req.qsparams['file'])
804 if path not in ctx.files():
805 if path not in ctx.files():
805 raise
806 raise
806
807
807 if fctx is not None:
808 if fctx is not None:
808 path = fctx.path()
809 path = fctx.path()
809 ctx = fctx.changectx()
810 ctx = fctx.changectx()
810 basectx = ctx.p1()
811 basectx = ctx.p1()
811
812
812 style = web.config('web', 'style')
813 style = web.config('web', 'style')
813 if 'style' in web.req.qsparams:
814 if 'style' in web.req.qsparams:
814 style = web.req.qsparams['style']
815 style = web.req.qsparams['style']
815
816
816 diffs = webutil.diffs(web, ctx, basectx, [path], style)
817 diffs = webutil.diffs(web, ctx, basectx, [path], style)
817 if fctx is not None:
818 if fctx is not None:
818 rename = webutil.renamelink(fctx)
819 rename = webutil.renamelink(fctx)
819 ctx = fctx
820 ctx = fctx
820 else:
821 else:
821 rename = []
822 rename = []
822 ctx = ctx
823 ctx = ctx
823
824
824 return web.sendtemplate(
825 return web.sendtemplate(
825 'filediff',
826 'filediff',
826 file=path,
827 file=path,
827 symrev=webutil.symrevorshortnode(web.req, ctx),
828 symrev=webutil.symrevorshortnode(web.req, ctx),
828 rename=rename,
829 rename=rename,
829 diff=diffs,
830 diff=diffs,
830 **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
831 **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
831
832
832 diff = webcommand('diff')(filediff)
833 diff = webcommand('diff')(filediff)
833
834
834 @webcommand('comparison')
835 @webcommand('comparison')
835 def comparison(web):
836 def comparison(web):
836 """
837 """
837 /comparison/{revision}/{path}
838 /comparison/{revision}/{path}
838 -----------------------------
839 -----------------------------
839
840
840 Show a comparison between the old and new versions of a file from changes
841 Show a comparison between the old and new versions of a file from changes
841 made on a particular revision.
842 made on a particular revision.
842
843
843 This is similar to the ``diff`` handler. However, this form features
844 This is similar to the ``diff`` handler. However, this form features
844 a split or side-by-side diff rather than a unified diff.
845 a split or side-by-side diff rather than a unified diff.
845
846
846 The ``context`` query string argument can be used to control the lines of
847 The ``context`` query string argument can be used to control the lines of
847 context in the diff.
848 context in the diff.
848
849
849 The ``filecomparison`` template is rendered.
850 The ``filecomparison`` template is rendered.
850 """
851 """
851 ctx = webutil.changectx(web.repo, web.req)
852 ctx = webutil.changectx(web.repo, web.req)
852 if 'file' not in web.req.qsparams:
853 if 'file' not in web.req.qsparams:
853 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
854 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
854 path = webutil.cleanpath(web.repo, web.req.qsparams['file'])
855 path = webutil.cleanpath(web.repo, web.req.qsparams['file'])
855
856
856 parsecontext = lambda v: v == 'full' and -1 or int(v)
857 parsecontext = lambda v: v == 'full' and -1 or int(v)
857 if 'context' in web.req.qsparams:
858 if 'context' in web.req.qsparams:
858 context = parsecontext(web.req.qsparams['context'])
859 context = parsecontext(web.req.qsparams['context'])
859 else:
860 else:
860 context = parsecontext(web.config('web', 'comparisoncontext', '5'))
861 context = parsecontext(web.config('web', 'comparisoncontext', '5'))
861
862
862 def filelines(f):
863 def filelines(f):
863 if f.isbinary():
864 if f.isbinary():
864 mt = mimetypes.guess_type(f.path())[0]
865 mt = mimetypes.guess_type(f.path())[0]
865 if not mt:
866 if not mt:
866 mt = 'application/octet-stream'
867 mt = 'application/octet-stream'
867 return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
868 return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
868 return f.data().splitlines()
869 return f.data().splitlines()
869
870
870 fctx = None
871 fctx = None
871 parent = ctx.p1()
872 parent = ctx.p1()
872 leftrev = parent.rev()
873 leftrev = parent.rev()
873 leftnode = parent.node()
874 leftnode = parent.node()
874 rightrev = ctx.rev()
875 rightrev = ctx.rev()
875 rightnode = ctx.node()
876 rightnode = ctx.node()
876 if path in ctx:
877 if path in ctx:
877 fctx = ctx[path]
878 fctx = ctx[path]
878 rightlines = filelines(fctx)
879 rightlines = filelines(fctx)
879 if path not in parent:
880 if path not in parent:
880 leftlines = ()
881 leftlines = ()
881 else:
882 else:
882 pfctx = parent[path]
883 pfctx = parent[path]
883 leftlines = filelines(pfctx)
884 leftlines = filelines(pfctx)
884 else:
885 else:
885 rightlines = ()
886 rightlines = ()
886 pfctx = ctx.parents()[0][path]
887 pfctx = ctx.parents()[0][path]
887 leftlines = filelines(pfctx)
888 leftlines = filelines(pfctx)
888
889
889 comparison = webutil.compare(web.tmpl, context, leftlines, rightlines)
890 comparison = webutil.compare(web.tmpl, context, leftlines, rightlines)
890 if fctx is not None:
891 if fctx is not None:
891 rename = webutil.renamelink(fctx)
892 rename = webutil.renamelink(fctx)
892 ctx = fctx
893 ctx = fctx
893 else:
894 else:
894 rename = []
895 rename = []
895 ctx = ctx
896 ctx = ctx
896
897
897 return web.sendtemplate(
898 return web.sendtemplate(
898 'filecomparison',
899 'filecomparison',
899 file=path,
900 file=path,
900 symrev=webutil.symrevorshortnode(web.req, ctx),
901 symrev=webutil.symrevorshortnode(web.req, ctx),
901 rename=rename,
902 rename=rename,
902 leftrev=leftrev,
903 leftrev=leftrev,
903 leftnode=hex(leftnode),
904 leftnode=hex(leftnode),
904 rightrev=rightrev,
905 rightrev=rightrev,
905 rightnode=hex(rightnode),
906 rightnode=hex(rightnode),
906 comparison=comparison,
907 comparison=comparison,
907 **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
908 **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
908
909
909 @webcommand('annotate')
910 @webcommand('annotate')
910 def annotate(web):
911 def annotate(web):
911 """
912 """
912 /annotate/{revision}/{path}
913 /annotate/{revision}/{path}
913 ---------------------------
914 ---------------------------
914
915
915 Show changeset information for each line in a file.
916 Show changeset information for each line in a file.
916
917
917 The ``ignorews``, ``ignorewsamount``, ``ignorewseol``, and
918 The ``ignorews``, ``ignorewsamount``, ``ignorewseol``, and
918 ``ignoreblanklines`` query string arguments have the same meaning as
919 ``ignoreblanklines`` query string arguments have the same meaning as
919 their ``[annotate]`` config equivalents. It uses the hgrc boolean
920 their ``[annotate]`` config equivalents. It uses the hgrc boolean
920 parsing logic to interpret the value. e.g. ``0`` and ``false`` are
921 parsing logic to interpret the value. e.g. ``0`` and ``false`` are
921 false and ``1`` and ``true`` are true. If not defined, the server
922 false and ``1`` and ``true`` are true. If not defined, the server
922 default settings are used.
923 default settings are used.
923
924
924 The ``fileannotate`` template is rendered.
925 The ``fileannotate`` template is rendered.
925 """
926 """
926 fctx = webutil.filectx(web.repo, web.req)
927 fctx = webutil.filectx(web.repo, web.req)
927 f = fctx.path()
928 f = fctx.path()
928 parity = paritygen(web.stripecount)
929 parity = paritygen(web.stripecount)
929 ishead = fctx.filerev() in fctx.filelog().headrevs()
930 ishead = fctx.filerev() in fctx.filelog().headrevs()
930
931
931 # parents() is called once per line and several lines likely belong to
932 # parents() is called once per line and several lines likely belong to
932 # same revision. So it is worth caching.
933 # same revision. So it is worth caching.
933 # TODO there are still redundant operations within basefilectx.parents()
934 # TODO there are still redundant operations within basefilectx.parents()
934 # and from the fctx.annotate() call itself that could be cached.
935 # and from the fctx.annotate() call itself that could be cached.
935 parentscache = {}
936 parentscache = {}
936 def parents(f):
937 def parents(f):
937 rev = f.rev()
938 rev = f.rev()
938 if rev not in parentscache:
939 if rev not in parentscache:
939 parentscache[rev] = []
940 parentscache[rev] = []
940 for p in f.parents():
941 for p in f.parents():
941 entry = {
942 entry = {
942 'node': p.hex(),
943 'node': p.hex(),
943 'rev': p.rev(),
944 'rev': p.rev(),
944 }
945 }
945 parentscache[rev].append(entry)
946 parentscache[rev].append(entry)
946
947
947 for p in parentscache[rev]:
948 for p in parentscache[rev]:
948 yield p
949 yield p
949
950
950 def annotate(**map):
951 def annotate(**map):
951 if fctx.isbinary():
952 if fctx.isbinary():
952 mt = (mimetypes.guess_type(fctx.path())[0]
953 mt = (mimetypes.guess_type(fctx.path())[0]
953 or 'application/octet-stream')
954 or 'application/octet-stream')
954 lines = [dagop.annotateline(fctx=fctx.filectx(fctx.filerev()),
955 lines = [dagop.annotateline(fctx=fctx.filectx(fctx.filerev()),
955 lineno=1, text='(binary:%s)' % mt)]
956 lineno=1, text='(binary:%s)' % mt)]
956 else:
957 else:
957 lines = webutil.annotate(web.req, fctx, web.repo.ui)
958 lines = webutil.annotate(web.req, fctx, web.repo.ui)
958
959
959 previousrev = None
960 previousrev = None
960 blockparitygen = paritygen(1)
961 blockparitygen = paritygen(1)
961 for lineno, aline in enumerate(lines):
962 for lineno, aline in enumerate(lines):
962 f = aline.fctx
963 f = aline.fctx
963 rev = f.rev()
964 rev = f.rev()
964 if rev != previousrev:
965 if rev != previousrev:
965 blockhead = True
966 blockhead = True
966 blockparity = next(blockparitygen)
967 blockparity = next(blockparitygen)
967 else:
968 else:
968 blockhead = None
969 blockhead = None
969 previousrev = rev
970 previousrev = rev
970 yield {"parity": next(parity),
971 yield {"parity": next(parity),
971 "node": f.hex(),
972 "node": f.hex(),
972 "rev": rev,
973 "rev": rev,
973 "author": f.user(),
974 "author": f.user(),
974 "parents": parents(f),
975 "parents": parents(f),
975 "desc": f.description(),
976 "desc": f.description(),
976 "extra": f.extra(),
977 "extra": f.extra(),
977 "file": f.path(),
978 "file": f.path(),
978 "blockhead": blockhead,
979 "blockhead": blockhead,
979 "blockparity": blockparity,
980 "blockparity": blockparity,
980 "targetline": aline.lineno,
981 "targetline": aline.lineno,
981 "line": aline.text,
982 "line": aline.text,
982 "lineno": lineno + 1,
983 "lineno": lineno + 1,
983 "lineid": "l%d" % (lineno + 1),
984 "lineid": "l%d" % (lineno + 1),
984 "linenumber": "% 6d" % (lineno + 1),
985 "linenumber": "% 6d" % (lineno + 1),
985 "revdate": f.date()}
986 "revdate": f.date()}
986
987
987 diffopts = webutil.difffeatureopts(web.req, web.repo.ui, 'annotate')
988 diffopts = webutil.difffeatureopts(web.req, web.repo.ui, 'annotate')
988 diffopts = {k: getattr(diffopts, k) for k in diffopts.defaults}
989 diffopts = {k: getattr(diffopts, k) for k in diffopts.defaults}
989
990
990 return web.sendtemplate(
991 return web.sendtemplate(
991 'fileannotate',
992 'fileannotate',
992 file=f,
993 file=f,
993 annotate=annotate,
994 annotate=annotate,
994 path=webutil.up(f),
995 path=webutil.up(f),
995 symrev=webutil.symrevorshortnode(web.req, fctx),
996 symrev=webutil.symrevorshortnode(web.req, fctx),
996 rename=webutil.renamelink(fctx),
997 rename=webutil.renamelink(fctx),
997 permissions=fctx.manifest().flags(f),
998 permissions=fctx.manifest().flags(f),
998 ishead=int(ishead),
999 ishead=int(ishead),
999 diffopts=diffopts,
1000 diffopts=diffopts,
1000 **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
1001 **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
1001
1002
1002 @webcommand('filelog')
1003 @webcommand('filelog')
1003 def filelog(web):
1004 def filelog(web):
1004 """
1005 """
1005 /filelog/{revision}/{path}
1006 /filelog/{revision}/{path}
1006 --------------------------
1007 --------------------------
1007
1008
1008 Show information about the history of a file in the repository.
1009 Show information about the history of a file in the repository.
1009
1010
1010 The ``revcount`` query string argument can be defined to control the
1011 The ``revcount`` query string argument can be defined to control the
1011 maximum number of entries to show.
1012 maximum number of entries to show.
1012
1013
1013 The ``filelog`` template will be rendered.
1014 The ``filelog`` template will be rendered.
1014 """
1015 """
1015
1016
1016 try:
1017 try:
1017 fctx = webutil.filectx(web.repo, web.req)
1018 fctx = webutil.filectx(web.repo, web.req)
1018 f = fctx.path()
1019 f = fctx.path()
1019 fl = fctx.filelog()
1020 fl = fctx.filelog()
1020 except error.LookupError:
1021 except error.LookupError:
1021 f = webutil.cleanpath(web.repo, web.req.qsparams['file'])
1022 f = webutil.cleanpath(web.repo, web.req.qsparams['file'])
1022 fl = web.repo.file(f)
1023 fl = web.repo.file(f)
1023 numrevs = len(fl)
1024 numrevs = len(fl)
1024 if not numrevs: # file doesn't exist at all
1025 if not numrevs: # file doesn't exist at all
1025 raise
1026 raise
1026 rev = webutil.changectx(web.repo, web.req).rev()
1027 rev = webutil.changectx(web.repo, web.req).rev()
1027 first = fl.linkrev(0)
1028 first = fl.linkrev(0)
1028 if rev < first: # current rev is from before file existed
1029 if rev < first: # current rev is from before file existed
1029 raise
1030 raise
1030 frev = numrevs - 1
1031 frev = numrevs - 1
1031 while fl.linkrev(frev) > rev:
1032 while fl.linkrev(frev) > rev:
1032 frev -= 1
1033 frev -= 1
1033 fctx = web.repo.filectx(f, fl.linkrev(frev))
1034 fctx = web.repo.filectx(f, fl.linkrev(frev))
1034
1035
1035 revcount = web.maxshortchanges
1036 revcount = web.maxshortchanges
1036 if 'revcount' in web.req.qsparams:
1037 if 'revcount' in web.req.qsparams:
1037 try:
1038 try:
1038 revcount = int(web.req.qsparams.get('revcount', revcount))
1039 revcount = int(web.req.qsparams.get('revcount', revcount))
1039 revcount = max(revcount, 1)
1040 revcount = max(revcount, 1)
1040 web.tmpl.defaults['sessionvars']['revcount'] = revcount
1041 web.tmpl.defaults['sessionvars']['revcount'] = revcount
1041 except ValueError:
1042 except ValueError:
1042 pass
1043 pass
1043
1044
1044 lrange = webutil.linerange(web.req)
1045 lrange = webutil.linerange(web.req)
1045
1046
1046 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
1047 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
1047 lessvars['revcount'] = max(revcount // 2, 1)
1048 lessvars['revcount'] = max(revcount // 2, 1)
1048 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
1049 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
1049 morevars['revcount'] = revcount * 2
1050 morevars['revcount'] = revcount * 2
1050
1051
1051 patch = 'patch' in web.req.qsparams
1052 patch = 'patch' in web.req.qsparams
1052 if patch:
1053 if patch:
1053 lessvars['patch'] = morevars['patch'] = web.req.qsparams['patch']
1054 lessvars['patch'] = morevars['patch'] = web.req.qsparams['patch']
1054 descend = 'descend' in web.req.qsparams
1055 descend = 'descend' in web.req.qsparams
1055 if descend:
1056 if descend:
1056 lessvars['descend'] = morevars['descend'] = web.req.qsparams['descend']
1057 lessvars['descend'] = morevars['descend'] = web.req.qsparams['descend']
1057
1058
1058 count = fctx.filerev() + 1
1059 count = fctx.filerev() + 1
1059 start = max(0, count - revcount) # first rev on this page
1060 start = max(0, count - revcount) # first rev on this page
1060 end = min(count, start + revcount) # last rev on this page
1061 end = min(count, start + revcount) # last rev on this page
1061 parity = paritygen(web.stripecount, offset=start - end)
1062 parity = paritygen(web.stripecount, offset=start - end)
1062
1063
1063 repo = web.repo
1064 repo = web.repo
1064 filelog = fctx.filelog()
1065 filelog = fctx.filelog()
1065 revs = [filerev for filerev in filelog.revs(start, end - 1)
1066 revs = [filerev for filerev in filelog.revs(start, end - 1)
1066 if filelog.linkrev(filerev) in repo]
1067 if filelog.linkrev(filerev) in repo]
1067 entries = []
1068 entries = []
1068
1069
1069 diffstyle = web.config('web', 'style')
1070 diffstyle = web.config('web', 'style')
1070 if 'style' in web.req.qsparams:
1071 if 'style' in web.req.qsparams:
1071 diffstyle = web.req.qsparams['style']
1072 diffstyle = web.req.qsparams['style']
1072
1073
1073 def diff(fctx, linerange=None):
1074 def diff(fctx, linerange=None):
1074 ctx = fctx.changectx()
1075 ctx = fctx.changectx()
1075 basectx = ctx.p1()
1076 basectx = ctx.p1()
1076 path = fctx.path()
1077 path = fctx.path()
1077 return webutil.diffs(web, ctx, basectx, [path], diffstyle,
1078 return webutil.diffs(web, ctx, basectx, [path], diffstyle,
1078 linerange=linerange,
1079 linerange=linerange,
1079 lineidprefix='%s-' % ctx.hex()[:12])
1080 lineidprefix='%s-' % ctx.hex()[:12])
1080
1081
1081 linerange = None
1082 linerange = None
1082 if lrange is not None:
1083 if lrange is not None:
1083 linerange = webutil.formatlinerange(*lrange)
1084 linerange = webutil.formatlinerange(*lrange)
1084 # deactivate numeric nav links when linerange is specified as this
1085 # deactivate numeric nav links when linerange is specified as this
1085 # would required a dedicated "revnav" class
1086 # would required a dedicated "revnav" class
1086 nav = []
1087 nav = []
1087 if descend:
1088 if descend:
1088 it = dagop.blockdescendants(fctx, *lrange)
1089 it = dagop.blockdescendants(fctx, *lrange)
1089 else:
1090 else:
1090 it = dagop.blockancestors(fctx, *lrange)
1091 it = dagop.blockancestors(fctx, *lrange)
1091 for i, (c, lr) in enumerate(it, 1):
1092 for i, (c, lr) in enumerate(it, 1):
1092 diffs = None
1093 diffs = None
1093 if patch:
1094 if patch:
1094 diffs = diff(c, linerange=lr)
1095 diffs = diff(c, linerange=lr)
1095 # follow renames accross filtered (not in range) revisions
1096 # follow renames accross filtered (not in range) revisions
1096 path = c.path()
1097 path = c.path()
1097 entries.append(dict(
1098 entries.append(dict(
1098 parity=next(parity),
1099 parity=next(parity),
1099 filerev=c.rev(),
1100 filerev=c.rev(),
1100 file=path,
1101 file=path,
1101 diff=diffs,
1102 diff=diffs,
1102 linerange=webutil.formatlinerange(*lr),
1103 linerange=webutil.formatlinerange(*lr),
1103 **pycompat.strkwargs(webutil.commonentry(repo, c))))
1104 **pycompat.strkwargs(webutil.commonentry(repo, c))))
1104 if i == revcount:
1105 if i == revcount:
1105 break
1106 break
1106 lessvars['linerange'] = webutil.formatlinerange(*lrange)
1107 lessvars['linerange'] = webutil.formatlinerange(*lrange)
1107 morevars['linerange'] = lessvars['linerange']
1108 morevars['linerange'] = lessvars['linerange']
1108 else:
1109 else:
1109 for i in revs:
1110 for i in revs:
1110 iterfctx = fctx.filectx(i)
1111 iterfctx = fctx.filectx(i)
1111 diffs = None
1112 diffs = None
1112 if patch:
1113 if patch:
1113 diffs = diff(iterfctx)
1114 diffs = diff(iterfctx)
1114 entries.append(dict(
1115 entries.append(dict(
1115 parity=next(parity),
1116 parity=next(parity),
1116 filerev=i,
1117 filerev=i,
1117 file=f,
1118 file=f,
1118 diff=diffs,
1119 diff=diffs,
1119 rename=webutil.renamelink(iterfctx),
1120 rename=webutil.renamelink(iterfctx),
1120 **pycompat.strkwargs(webutil.commonentry(repo, iterfctx))))
1121 **pycompat.strkwargs(webutil.commonentry(repo, iterfctx))))
1121 entries.reverse()
1122 entries.reverse()
1122 revnav = webutil.filerevnav(web.repo, fctx.path())
1123 revnav = webutil.filerevnav(web.repo, fctx.path())
1123 nav = revnav.gen(end - 1, revcount, count)
1124 nav = revnav.gen(end - 1, revcount, count)
1124
1125
1125 latestentry = entries[:1]
1126 latestentry = entries[:1]
1126
1127
1127 return web.sendtemplate(
1128 return web.sendtemplate(
1128 'filelog',
1129 'filelog',
1129 file=f,
1130 file=f,
1130 nav=nav,
1131 nav=nav,
1131 symrev=webutil.symrevorshortnode(web.req, fctx),
1132 symrev=webutil.symrevorshortnode(web.req, fctx),
1132 entries=entries,
1133 entries=entries,
1133 descend=descend,
1134 descend=descend,
1134 patch=patch,
1135 patch=patch,
1135 latestentry=latestentry,
1136 latestentry=latestentry,
1136 linerange=linerange,
1137 linerange=linerange,
1137 revcount=revcount,
1138 revcount=revcount,
1138 morevars=morevars,
1139 morevars=morevars,
1139 lessvars=lessvars,
1140 lessvars=lessvars,
1140 **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
1141 **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))
1141
1142
1142 @webcommand('archive')
1143 @webcommand('archive')
1143 def archive(web):
1144 def archive(web):
1144 """
1145 """
1145 /archive/{revision}.{format}[/{path}]
1146 /archive/{revision}.{format}[/{path}]
1146 -------------------------------------
1147 -------------------------------------
1147
1148
1148 Obtain an archive of repository content.
1149 Obtain an archive of repository content.
1149
1150
1150 The content and type of the archive is defined by a URL path parameter.
1151 The content and type of the archive is defined by a URL path parameter.
1151 ``format`` is the file extension of the archive type to be generated. e.g.
1152 ``format`` is the file extension of the archive type to be generated. e.g.
1152 ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your
1153 ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your
1153 server configuration.
1154 server configuration.
1154
1155
1155 The optional ``path`` URL parameter controls content to include in the
1156 The optional ``path`` URL parameter controls content to include in the
1156 archive. If omitted, every file in the specified revision is present in the
1157 archive. If omitted, every file in the specified revision is present in the
1157 archive. If included, only the specified file or contents of the specified
1158 archive. If included, only the specified file or contents of the specified
1158 directory will be included in the archive.
1159 directory will be included in the archive.
1159
1160
1160 No template is used for this handler. Raw, binary content is generated.
1161 No template is used for this handler. Raw, binary content is generated.
1161 """
1162 """
1162
1163
1163 type_ = web.req.qsparams.get('type')
1164 type_ = web.req.qsparams.get('type')
1164 allowed = web.configlist("web", "allow_archive")
1165 allowed = web.configlist("web", "allow_archive")
1165 key = web.req.qsparams['node']
1166 key = web.req.qsparams['node']
1166
1167
1167 if type_ not in webutil.archivespecs:
1168 if type_ not in webutil.archivespecs:
1168 msg = 'Unsupported archive type: %s' % type_
1169 msg = 'Unsupported archive type: %s' % type_
1169 raise ErrorResponse(HTTP_NOT_FOUND, msg)
1170 raise ErrorResponse(HTTP_NOT_FOUND, msg)
1170
1171
1171 if not ((type_ in allowed or
1172 if not ((type_ in allowed or
1172 web.configbool("web", "allow" + type_))):
1173 web.configbool("web", "allow" + type_))):
1173 msg = 'Archive type not allowed: %s' % type_
1174 msg = 'Archive type not allowed: %s' % type_
1174 raise ErrorResponse(HTTP_FORBIDDEN, msg)
1175 raise ErrorResponse(HTTP_FORBIDDEN, msg)
1175
1176
1176 reponame = re.sub(br"\W+", "-", os.path.basename(web.reponame))
1177 reponame = re.sub(br"\W+", "-", os.path.basename(web.reponame))
1177 cnode = web.repo.lookup(key)
1178 cnode = web.repo.lookup(key)
1178 arch_version = key
1179 arch_version = key
1179 if cnode == key or key == 'tip':
1180 if cnode == key or key == 'tip':
1180 arch_version = short(cnode)
1181 arch_version = short(cnode)
1181 name = "%s-%s" % (reponame, arch_version)
1182 name = "%s-%s" % (reponame, arch_version)
1182
1183
1183 ctx = webutil.changectx(web.repo, web.req)
1184 ctx = webutil.changectx(web.repo, web.req)
1184 pats = []
1185 pats = []
1185 match = scmutil.match(ctx, [])
1186 match = scmutil.match(ctx, [])
1186 file = web.req.qsparams.get('file')
1187 file = web.req.qsparams.get('file')
1187 if file:
1188 if file:
1188 pats = ['path:' + file]
1189 pats = ['path:' + file]
1189 match = scmutil.match(ctx, pats, default='path')
1190 match = scmutil.match(ctx, pats, default='path')
1190 if pats:
1191 if pats:
1191 files = [f for f in ctx.manifest().keys() if match(f)]
1192 files = [f for f in ctx.manifest().keys() if match(f)]
1192 if not files:
1193 if not files:
1193 raise ErrorResponse(HTTP_NOT_FOUND,
1194 raise ErrorResponse(HTTP_NOT_FOUND,
1194 'file(s) not found: %s' % file)
1195 'file(s) not found: %s' % file)
1195
1196
1196 mimetype, artype, extension, encoding = webutil.archivespecs[type_]
1197 mimetype, artype, extension, encoding = webutil.archivespecs[type_]
1197
1198
1198 web.res.headers['Content-Type'] = mimetype
1199 web.res.headers['Content-Type'] = mimetype
1199 web.res.headers['Content-Disposition'] = 'attachment; filename=%s%s' % (
1200 web.res.headers['Content-Disposition'] = 'attachment; filename=%s%s' % (
1200 name, extension)
1201 name, extension)
1201
1202
1202 if encoding:
1203 if encoding:
1203 web.res.headers['Content-Encoding'] = encoding
1204 web.res.headers['Content-Encoding'] = encoding
1204
1205
1205 web.res.setbodywillwrite()
1206 web.res.setbodywillwrite()
1206 if list(web.res.sendresponse()):
1207 if list(web.res.sendresponse()):
1207 raise error.ProgrammingError('sendresponse() should not emit data '
1208 raise error.ProgrammingError('sendresponse() should not emit data '
1208 'if writing later')
1209 'if writing later')
1209
1210
1210 bodyfh = web.res.getbodyfile()
1211 bodyfh = web.res.getbodyfile()
1211
1212
1212 archival.archive(web.repo, bodyfh, cnode, artype, prefix=name,
1213 archival.archive(web.repo, bodyfh, cnode, artype, prefix=name,
1213 matchfn=match,
1214 matchfn=match,
1214 subrepos=web.configbool("web", "archivesubrepos"))
1215 subrepos=web.configbool("web", "archivesubrepos"))
1215
1216
1216 return []
1217 return []
1217
1218
1218 @webcommand('static')
1219 @webcommand('static')
1219 def static(web):
1220 def static(web):
1220 fname = web.req.qsparams['file']
1221 fname = web.req.qsparams['file']
1221 # a repo owner may set web.static in .hg/hgrc to get any file
1222 # a repo owner may set web.static in .hg/hgrc to get any file
1222 # readable by the user running the CGI script
1223 # readable by the user running the CGI script
1223 static = web.config("web", "static", None, untrusted=False)
1224 static = web.config("web", "static", None, untrusted=False)
1224 if not static:
1225 if not static:
1225 tp = web.templatepath or templater.templatepaths()
1226 tp = web.templatepath or templater.templatepaths()
1226 if isinstance(tp, str):
1227 if isinstance(tp, str):
1227 tp = [tp]
1228 tp = [tp]
1228 static = [os.path.join(p, 'static') for p in tp]
1229 static = [os.path.join(p, 'static') for p in tp]
1229
1230
1230 staticfile(static, fname, web.res)
1231 staticfile(static, fname, web.res)
1231 return web.res.sendresponse()
1232 return web.res.sendresponse()
1232
1233
1233 @webcommand('graph')
1234 @webcommand('graph')
1234 def graph(web):
1235 def graph(web):
1235 """
1236 """
1236 /graph[/{revision}]
1237 /graph[/{revision}]
1237 -------------------
1238 -------------------
1238
1239
1239 Show information about the graphical topology of the repository.
1240 Show information about the graphical topology of the repository.
1240
1241
1241 Information rendered by this handler can be used to create visual
1242 Information rendered by this handler can be used to create visual
1242 representations of repository topology.
1243 representations of repository topology.
1243
1244
1244 The ``revision`` URL parameter controls the starting changeset. If it's
1245 The ``revision`` URL parameter controls the starting changeset. If it's
1245 absent, the default is ``tip``.
1246 absent, the default is ``tip``.
1246
1247
1247 The ``revcount`` query string argument can define the number of changesets
1248 The ``revcount`` query string argument can define the number of changesets
1248 to show information for.
1249 to show information for.
1249
1250
1250 The ``graphtop`` query string argument can specify the starting changeset
1251 The ``graphtop`` query string argument can specify the starting changeset
1251 for producing ``jsdata`` variable that is used for rendering graph in
1252 for producing ``jsdata`` variable that is used for rendering graph in
1252 JavaScript. By default it has the same value as ``revision``.
1253 JavaScript. By default it has the same value as ``revision``.
1253
1254
1254 This handler will render the ``graph`` template.
1255 This handler will render the ``graph`` template.
1255 """
1256 """
1256
1257
1257 if 'node' in web.req.qsparams:
1258 if 'node' in web.req.qsparams:
1258 ctx = webutil.changectx(web.repo, web.req)
1259 ctx = webutil.changectx(web.repo, web.req)
1259 symrev = webutil.symrevorshortnode(web.req, ctx)
1260 symrev = webutil.symrevorshortnode(web.req, ctx)
1260 else:
1261 else:
1261 ctx = web.repo['tip']
1262 ctx = web.repo['tip']
1262 symrev = 'tip'
1263 symrev = 'tip'
1263 rev = ctx.rev()
1264 rev = ctx.rev()
1264
1265
1265 bg_height = 39
1266 bg_height = 39
1266 revcount = web.maxshortchanges
1267 revcount = web.maxshortchanges
1267 if 'revcount' in web.req.qsparams:
1268 if 'revcount' in web.req.qsparams:
1268 try:
1269 try:
1269 revcount = int(web.req.qsparams.get('revcount', revcount))
1270 revcount = int(web.req.qsparams.get('revcount', revcount))
1270 revcount = max(revcount, 1)
1271 revcount = max(revcount, 1)
1271 web.tmpl.defaults['sessionvars']['revcount'] = revcount
1272 web.tmpl.defaults['sessionvars']['revcount'] = revcount
1272 except ValueError:
1273 except ValueError:
1273 pass
1274 pass
1274
1275
1275 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
1276 lessvars = copy.copy(web.tmpl.defaults['sessionvars'])
1276 lessvars['revcount'] = max(revcount // 2, 1)
1277 lessvars['revcount'] = max(revcount // 2, 1)
1277 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
1278 morevars = copy.copy(web.tmpl.defaults['sessionvars'])
1278 morevars['revcount'] = revcount * 2
1279 morevars['revcount'] = revcount * 2
1279
1280
1280 graphtop = web.req.qsparams.get('graphtop', ctx.hex())
1281 graphtop = web.req.qsparams.get('graphtop', ctx.hex())
1281 graphvars = copy.copy(web.tmpl.defaults['sessionvars'])
1282 graphvars = copy.copy(web.tmpl.defaults['sessionvars'])
1282 graphvars['graphtop'] = graphtop
1283 graphvars['graphtop'] = graphtop
1283
1284
1284 count = len(web.repo)
1285 count = len(web.repo)
1285 pos = rev
1286 pos = rev
1286
1287
1287 uprev = min(max(0, count - 1), rev + revcount)
1288 uprev = min(max(0, count - 1), rev + revcount)
1288 downrev = max(0, rev - revcount)
1289 downrev = max(0, rev - revcount)
1289 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
1290 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
1290
1291
1291 tree = []
1292 tree = []
1292 nextentry = []
1293 nextentry = []
1293 lastrev = 0
1294 lastrev = 0
1294 if pos != -1:
1295 if pos != -1:
1295 allrevs = web.repo.changelog.revs(pos, 0)
1296 allrevs = web.repo.changelog.revs(pos, 0)
1296 revs = []
1297 revs = []
1297 for i in allrevs:
1298 for i in allrevs:
1298 revs.append(i)
1299 revs.append(i)
1299 if len(revs) >= revcount + 1:
1300 if len(revs) >= revcount + 1:
1300 break
1301 break
1301
1302
1302 if len(revs) > revcount:
1303 if len(revs) > revcount:
1303 nextentry = [webutil.commonentry(web.repo, web.repo[revs[-1]])]
1304 nextentry = [webutil.commonentry(web.repo, web.repo[revs[-1]])]
1304 revs = revs[:-1]
1305 revs = revs[:-1]
1305
1306
1306 lastrev = revs[-1]
1307 lastrev = revs[-1]
1307
1308
1308 # We have to feed a baseset to dagwalker as it is expecting smartset
1309 # We have to feed a baseset to dagwalker as it is expecting smartset
1309 # object. This does not have a big impact on hgweb performance itself
1310 # object. This does not have a big impact on hgweb performance itself
1310 # since hgweb graphing code is not itself lazy yet.
1311 # since hgweb graphing code is not itself lazy yet.
1311 dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
1312 dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
1312 # As we said one line above... not lazy.
1313 # As we said one line above... not lazy.
1313 tree = list(item for item in graphmod.colored(dag, web.repo)
1314 tree = list(item for item in graphmod.colored(dag, web.repo)
1314 if item[1] == graphmod.CHANGESET)
1315 if item[1] == graphmod.CHANGESET)
1315
1316
1316 def nodecurrent(ctx):
1317 def nodecurrent(ctx):
1317 wpnodes = web.repo.dirstate.parents()
1318 wpnodes = web.repo.dirstate.parents()
1318 if wpnodes[1] == nullid:
1319 if wpnodes[1] == nullid:
1319 wpnodes = wpnodes[:1]
1320 wpnodes = wpnodes[:1]
1320 if ctx.node() in wpnodes:
1321 if ctx.node() in wpnodes:
1321 return '@'
1322 return '@'
1322 return ''
1323 return ''
1323
1324
1324 def nodesymbol(ctx):
1325 def nodesymbol(ctx):
1325 if ctx.obsolete():
1326 if ctx.obsolete():
1326 return 'x'
1327 return 'x'
1327 elif ctx.isunstable():
1328 elif ctx.isunstable():
1328 return '*'
1329 return '*'
1329 elif ctx.closesbranch():
1330 elif ctx.closesbranch():
1330 return '_'
1331 return '_'
1331 else:
1332 else:
1332 return 'o'
1333 return 'o'
1333
1334
1334 def fulltree():
1335 def fulltree():
1335 pos = web.repo[graphtop].rev()
1336 pos = web.repo[graphtop].rev()
1336 tree = []
1337 tree = []
1337 if pos != -1:
1338 if pos != -1:
1338 revs = web.repo.changelog.revs(pos, lastrev)
1339 revs = web.repo.changelog.revs(pos, lastrev)
1339 dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
1340 dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
1340 tree = list(item for item in graphmod.colored(dag, web.repo)
1341 tree = list(item for item in graphmod.colored(dag, web.repo)
1341 if item[1] == graphmod.CHANGESET)
1342 if item[1] == graphmod.CHANGESET)
1342 return tree
1343 return tree
1343
1344
1344 def jsdata():
1345 def jsdata():
1345 return [{'node': pycompat.bytestr(ctx),
1346 return [{'node': pycompat.bytestr(ctx),
1346 'graphnode': nodecurrent(ctx) + nodesymbol(ctx),
1347 'graphnode': nodecurrent(ctx) + nodesymbol(ctx),
1347 'vertex': vtx,
1348 'vertex': vtx,
1348 'edges': edges}
1349 'edges': edges}
1349 for (id, type, ctx, vtx, edges) in fulltree()]
1350 for (id, type, ctx, vtx, edges) in fulltree()]
1350
1351
1351 def nodes():
1352 def nodes():
1352 parity = paritygen(web.stripecount)
1353 parity = paritygen(web.stripecount)
1353 for row, (id, type, ctx, vtx, edges) in enumerate(tree):
1354 for row, (id, type, ctx, vtx, edges) in enumerate(tree):
1354 entry = webutil.commonentry(web.repo, ctx)
1355 entry = webutil.commonentry(web.repo, ctx)
1355 edgedata = [{'col': edge[0],
1356 edgedata = [{'col': edge[0],
1356 'nextcol': edge[1],
1357 'nextcol': edge[1],
1357 'color': (edge[2] - 1) % 6 + 1,
1358 'color': (edge[2] - 1) % 6 + 1,
1358 'width': edge[3],
1359 'width': edge[3],
1359 'bcolor': edge[4]}
1360 'bcolor': edge[4]}
1360 for edge in edges]
1361 for edge in edges]
1361
1362
1362 entry.update({'col': vtx[0],
1363 entry.update({'col': vtx[0],
1363 'color': (vtx[1] - 1) % 6 + 1,
1364 'color': (vtx[1] - 1) % 6 + 1,
1364 'parity': next(parity),
1365 'parity': next(parity),
1365 'edges': edgedata,
1366 'edges': edgedata,
1366 'row': row,
1367 'row': row,
1367 'nextrow': row + 1})
1368 'nextrow': row + 1})
1368
1369
1369 yield entry
1370 yield entry
1370
1371
1371 rows = len(tree)
1372 rows = len(tree)
1372
1373
1373 return web.sendtemplate(
1374 return web.sendtemplate(
1374 'graph',
1375 'graph',
1375 rev=rev,
1376 rev=rev,
1376 symrev=symrev,
1377 symrev=symrev,
1377 revcount=revcount,
1378 revcount=revcount,
1378 uprev=uprev,
1379 uprev=uprev,
1379 lessvars=lessvars,
1380 lessvars=lessvars,
1380 morevars=morevars,
1381 morevars=morevars,
1381 downrev=downrev,
1382 downrev=downrev,
1382 graphvars=graphvars,
1383 graphvars=graphvars,
1383 rows=rows,
1384 rows=rows,
1384 bg_height=bg_height,
1385 bg_height=bg_height,
1385 changesets=count,
1386 changesets=count,
1386 nextentry=nextentry,
1387 nextentry=nextentry,
1387 jsdata=lambda **x: jsdata(),
1388 jsdata=lambda **x: jsdata(),
1388 nodes=lambda **x: nodes(),
1389 nodes=lambda **x: nodes(),
1389 node=ctx.hex(),
1390 node=ctx.hex(),
1390 changenav=changenav)
1391 changenav=changenav)
1391
1392
1392 def _getdoc(e):
1393 def _getdoc(e):
1393 doc = e[0].__doc__
1394 doc = e[0].__doc__
1394 if doc:
1395 if doc:
1395 doc = _(doc).partition('\n')[0]
1396 doc = _(doc).partition('\n')[0]
1396 else:
1397 else:
1397 doc = _('(no help text available)')
1398 doc = _('(no help text available)')
1398 return doc
1399 return doc
1399
1400
1400 @webcommand('help')
1401 @webcommand('help')
1401 def help(web):
1402 def help(web):
1402 """
1403 """
1403 /help[/{topic}]
1404 /help[/{topic}]
1404 ---------------
1405 ---------------
1405
1406
1406 Render help documentation.
1407 Render help documentation.
1407
1408
1408 This web command is roughly equivalent to :hg:`help`. If a ``topic``
1409 This web command is roughly equivalent to :hg:`help`. If a ``topic``
1409 is defined, that help topic will be rendered. If not, an index of
1410 is defined, that help topic will be rendered. If not, an index of
1410 available help topics will be rendered.
1411 available help topics will be rendered.
1411
1412
1412 The ``help`` template will be rendered when requesting help for a topic.
1413 The ``help`` template will be rendered when requesting help for a topic.
1413 ``helptopics`` will be rendered for the index of help topics.
1414 ``helptopics`` will be rendered for the index of help topics.
1414 """
1415 """
1415 from .. import commands, help as helpmod # avoid cycle
1416 from .. import commands, help as helpmod # avoid cycle
1416
1417
1417 topicname = web.req.qsparams.get('node')
1418 topicname = web.req.qsparams.get('node')
1418 if not topicname:
1419 if not topicname:
1419 def topics(**map):
1420 def topics(**map):
1420 for entries, summary, _doc in helpmod.helptable:
1421 for entries, summary, _doc in helpmod.helptable:
1421 yield {'topic': entries[0], 'summary': summary}
1422 yield {'topic': entries[0], 'summary': summary}
1422
1423
1423 early, other = [], []
1424 early, other = [], []
1424 primary = lambda s: s.partition('|')[0]
1425 primary = lambda s: s.partition('|')[0]
1425 for c, e in commands.table.iteritems():
1426 for c, e in commands.table.iteritems():
1426 doc = _getdoc(e)
1427 doc = _getdoc(e)
1427 if 'DEPRECATED' in doc or c.startswith('debug'):
1428 if 'DEPRECATED' in doc or c.startswith('debug'):
1428 continue
1429 continue
1429 cmd = primary(c)
1430 cmd = primary(c)
1430 if cmd.startswith('^'):
1431 if cmd.startswith('^'):
1431 early.append((cmd[1:], doc))
1432 early.append((cmd[1:], doc))
1432 else:
1433 else:
1433 other.append((cmd, doc))
1434 other.append((cmd, doc))
1434
1435
1435 early.sort()
1436 early.sort()
1436 other.sort()
1437 other.sort()
1437
1438
1438 def earlycommands(**map):
1439 def earlycommands(**map):
1439 for c, doc in early:
1440 for c, doc in early:
1440 yield {'topic': c, 'summary': doc}
1441 yield {'topic': c, 'summary': doc}
1441
1442
1442 def othercommands(**map):
1443 def othercommands(**map):
1443 for c, doc in other:
1444 for c, doc in other:
1444 yield {'topic': c, 'summary': doc}
1445 yield {'topic': c, 'summary': doc}
1445
1446
1446 return web.sendtemplate(
1447 return web.sendtemplate(
1447 'helptopics',
1448 'helptopics',
1448 topics=topics,
1449 topics=topics,
1449 earlycommands=earlycommands,
1450 earlycommands=earlycommands,
1450 othercommands=othercommands,
1451 othercommands=othercommands,
1451 title='Index')
1452 title='Index')
1452
1453
1453 # Render an index of sub-topics.
1454 # Render an index of sub-topics.
1454 if topicname in helpmod.subtopics:
1455 if topicname in helpmod.subtopics:
1455 topics = []
1456 topics = []
1456 for entries, summary, _doc in helpmod.subtopics[topicname]:
1457 for entries, summary, _doc in helpmod.subtopics[topicname]:
1457 topics.append({
1458 topics.append({
1458 'topic': '%s.%s' % (topicname, entries[0]),
1459 'topic': '%s.%s' % (topicname, entries[0]),
1459 'basename': entries[0],
1460 'basename': entries[0],
1460 'summary': summary,
1461 'summary': summary,
1461 })
1462 })
1462
1463
1463 return web.sendtemplate(
1464 return web.sendtemplate(
1464 'helptopics',
1465 'helptopics',
1465 topics=topics,
1466 topics=topics,
1466 title=topicname,
1467 title=topicname,
1467 subindex=True)
1468 subindex=True)
1468
1469
1469 u = webutil.wsgiui.load()
1470 u = webutil.wsgiui.load()
1470 u.verbose = True
1471 u.verbose = True
1471
1472
1472 # Render a page from a sub-topic.
1473 # Render a page from a sub-topic.
1473 if '.' in topicname:
1474 if '.' in topicname:
1474 # TODO implement support for rendering sections, like
1475 # TODO implement support for rendering sections, like
1475 # `hg help` works.
1476 # `hg help` works.
1476 topic, subtopic = topicname.split('.', 1)
1477 topic, subtopic = topicname.split('.', 1)
1477 if topic not in helpmod.subtopics:
1478 if topic not in helpmod.subtopics:
1478 raise ErrorResponse(HTTP_NOT_FOUND)
1479 raise ErrorResponse(HTTP_NOT_FOUND)
1479 else:
1480 else:
1480 topic = topicname
1481 topic = topicname
1481 subtopic = None
1482 subtopic = None
1482
1483
1483 try:
1484 try:
1484 doc = helpmod.help_(u, commands, topic, subtopic=subtopic)
1485 doc = helpmod.help_(u, commands, topic, subtopic=subtopic)
1485 except error.Abort:
1486 except error.Abort:
1486 raise ErrorResponse(HTTP_NOT_FOUND)
1487 raise ErrorResponse(HTTP_NOT_FOUND)
1487
1488
1488 return web.sendtemplate(
1489 return web.sendtemplate(
1489 'help',
1490 'help',
1490 topic=topicname,
1491 topic=topicname,
1491 doc=doc)
1492 doc=doc)
1492
1493
1493 # tell hggettext to extract docstrings from these functions:
1494 # tell hggettext to extract docstrings from these functions:
1494 i18nfunctions = commands.values()
1495 i18nfunctions = commands.values()
@@ -1,2379 +1,2380 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from .thirdparty.zope import (
24 from .thirdparty.zope import (
25 interface as zi,
25 interface as zi,
26 )
26 )
27 from . import (
27 from . import (
28 bookmarks,
28 bookmarks,
29 branchmap,
29 branchmap,
30 bundle2,
30 bundle2,
31 changegroup,
31 changegroup,
32 changelog,
32 changelog,
33 color,
33 color,
34 context,
34 context,
35 dirstate,
35 dirstate,
36 dirstateguard,
36 dirstateguard,
37 discovery,
37 discovery,
38 encoding,
38 encoding,
39 error,
39 error,
40 exchange,
40 exchange,
41 extensions,
41 extensions,
42 filelog,
42 filelog,
43 hook,
43 hook,
44 lock as lockmod,
44 lock as lockmod,
45 manifest,
45 manifest,
46 match as matchmod,
46 match as matchmod,
47 merge as mergemod,
47 merge as mergemod,
48 mergeutil,
48 mergeutil,
49 namespaces,
49 namespaces,
50 narrowspec,
50 narrowspec,
51 obsolete,
51 obsolete,
52 pathutil,
52 pathutil,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 repository,
56 repository,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store,
62 store,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70 from .utils import (
70 from .utils import (
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 release = lockmod.release
75 release = lockmod.release
76 urlerr = util.urlerr
76 urlerr = util.urlerr
77 urlreq = util.urlreq
77 urlreq = util.urlreq
78
78
79 # set of (path, vfs-location) tuples. vfs-location is:
79 # set of (path, vfs-location) tuples. vfs-location is:
80 # - 'plain for vfs relative paths
80 # - 'plain for vfs relative paths
81 # - '' for svfs relative paths
81 # - '' for svfs relative paths
82 _cachedfiles = set()
82 _cachedfiles = set()
83
83
84 class _basefilecache(scmutil.filecache):
84 class _basefilecache(scmutil.filecache):
85 """All filecache usage on repo are done for logic that should be unfiltered
85 """All filecache usage on repo are done for logic that should be unfiltered
86 """
86 """
87 def __get__(self, repo, type=None):
87 def __get__(self, repo, type=None):
88 if repo is None:
88 if repo is None:
89 return self
89 return self
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
91 def __set__(self, repo, value):
91 def __set__(self, repo, value):
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
93 def __delete__(self, repo):
93 def __delete__(self, repo):
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
95
95
96 class repofilecache(_basefilecache):
96 class repofilecache(_basefilecache):
97 """filecache for files in .hg but outside of .hg/store"""
97 """filecache for files in .hg but outside of .hg/store"""
98 def __init__(self, *paths):
98 def __init__(self, *paths):
99 super(repofilecache, self).__init__(*paths)
99 super(repofilecache, self).__init__(*paths)
100 for path in paths:
100 for path in paths:
101 _cachedfiles.add((path, 'plain'))
101 _cachedfiles.add((path, 'plain'))
102
102
103 def join(self, obj, fname):
103 def join(self, obj, fname):
104 return obj.vfs.join(fname)
104 return obj.vfs.join(fname)
105
105
106 class storecache(_basefilecache):
106 class storecache(_basefilecache):
107 """filecache for files in the store"""
107 """filecache for files in the store"""
108 def __init__(self, *paths):
108 def __init__(self, *paths):
109 super(storecache, self).__init__(*paths)
109 super(storecache, self).__init__(*paths)
110 for path in paths:
110 for path in paths:
111 _cachedfiles.add((path, ''))
111 _cachedfiles.add((path, ''))
112
112
113 def join(self, obj, fname):
113 def join(self, obj, fname):
114 return obj.sjoin(fname)
114 return obj.sjoin(fname)
115
115
116 def isfilecached(repo, name):
116 def isfilecached(repo, name):
117 """check if a repo has already cached "name" filecache-ed property
117 """check if a repo has already cached "name" filecache-ed property
118
118
119 This returns (cachedobj-or-None, iscached) tuple.
119 This returns (cachedobj-or-None, iscached) tuple.
120 """
120 """
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
122 if not cacheentry:
122 if not cacheentry:
123 return None, False
123 return None, False
124 return cacheentry.obj, True
124 return cacheentry.obj, True
125
125
126 class unfilteredpropertycache(util.propertycache):
126 class unfilteredpropertycache(util.propertycache):
127 """propertycache that apply to unfiltered repo only"""
127 """propertycache that apply to unfiltered repo only"""
128
128
129 def __get__(self, repo, type=None):
129 def __get__(self, repo, type=None):
130 unfi = repo.unfiltered()
130 unfi = repo.unfiltered()
131 if unfi is repo:
131 if unfi is repo:
132 return super(unfilteredpropertycache, self).__get__(unfi)
132 return super(unfilteredpropertycache, self).__get__(unfi)
133 return getattr(unfi, self.name)
133 return getattr(unfi, self.name)
134
134
135 class filteredpropertycache(util.propertycache):
135 class filteredpropertycache(util.propertycache):
136 """propertycache that must take filtering in account"""
136 """propertycache that must take filtering in account"""
137
137
138 def cachevalue(self, obj, value):
138 def cachevalue(self, obj, value):
139 object.__setattr__(obj, self.name, value)
139 object.__setattr__(obj, self.name, value)
140
140
141
141
142 def hasunfilteredcache(repo, name):
142 def hasunfilteredcache(repo, name):
143 """check if a repo has an unfilteredpropertycache value for <name>"""
143 """check if a repo has an unfilteredpropertycache value for <name>"""
144 return name in vars(repo.unfiltered())
144 return name in vars(repo.unfiltered())
145
145
146 def unfilteredmethod(orig):
146 def unfilteredmethod(orig):
147 """decorate method that always need to be run on unfiltered version"""
147 """decorate method that always need to be run on unfiltered version"""
148 def wrapper(repo, *args, **kwargs):
148 def wrapper(repo, *args, **kwargs):
149 return orig(repo.unfiltered(), *args, **kwargs)
149 return orig(repo.unfiltered(), *args, **kwargs)
150 return wrapper
150 return wrapper
151
151
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
153 'unbundle'}
153 'unbundle'}
154 legacycaps = moderncaps.union({'changegroupsubset'})
154 legacycaps = moderncaps.union({'changegroupsubset'})
155
155
156 @zi.implementer(repository.ipeercommandexecutor)
156 @zi.implementer(repository.ipeercommandexecutor)
157 class localcommandexecutor(object):
157 class localcommandexecutor(object):
158 def __init__(self, peer):
158 def __init__(self, peer):
159 self._peer = peer
159 self._peer = peer
160 self._sent = False
160 self._sent = False
161 self._closed = False
161 self._closed = False
162
162
163 def __enter__(self):
163 def __enter__(self):
164 return self
164 return self
165
165
166 def __exit__(self, exctype, excvalue, exctb):
166 def __exit__(self, exctype, excvalue, exctb):
167 self.close()
167 self.close()
168
168
169 def callcommand(self, command, args):
169 def callcommand(self, command, args):
170 if self._sent:
170 if self._sent:
171 raise error.ProgrammingError('callcommand() cannot be used after '
171 raise error.ProgrammingError('callcommand() cannot be used after '
172 'sendcommands()')
172 'sendcommands()')
173
173
174 if self._closed:
174 if self._closed:
175 raise error.ProgrammingError('callcommand() cannot be used after '
175 raise error.ProgrammingError('callcommand() cannot be used after '
176 'close()')
176 'close()')
177
177
178 # We don't need to support anything fancy. Just call the named
178 # We don't need to support anything fancy. Just call the named
179 # method on the peer and return a resolved future.
179 # method on the peer and return a resolved future.
180 fn = getattr(self._peer, pycompat.sysstr(command))
180 fn = getattr(self._peer, pycompat.sysstr(command))
181
181
182 f = pycompat.futures.Future()
182 f = pycompat.futures.Future()
183
183
184 try:
184 try:
185 result = fn(**pycompat.strkwargs(args))
185 result = fn(**pycompat.strkwargs(args))
186 except Exception:
186 except Exception:
187 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
187 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
188 else:
188 else:
189 f.set_result(result)
189 f.set_result(result)
190
190
191 return f
191 return f
192
192
193 def sendcommands(self):
193 def sendcommands(self):
194 self._sent = True
194 self._sent = True
195
195
196 def close(self):
196 def close(self):
197 self._closed = True
197 self._closed = True
198
198
199 @zi.implementer(repository.ipeercommands)
199 @zi.implementer(repository.ipeercommands)
200 class localpeer(repository.peer):
200 class localpeer(repository.peer):
201 '''peer for a local repo; reflects only the most recent API'''
201 '''peer for a local repo; reflects only the most recent API'''
202
202
203 def __init__(self, repo, caps=None):
203 def __init__(self, repo, caps=None):
204 super(localpeer, self).__init__()
204 super(localpeer, self).__init__()
205
205
206 if caps is None:
206 if caps is None:
207 caps = moderncaps.copy()
207 caps = moderncaps.copy()
208 self._repo = repo.filtered('served')
208 self._repo = repo.filtered('served')
209 self.ui = repo.ui
209 self.ui = repo.ui
210 self._caps = repo._restrictcapabilities(caps)
210 self._caps = repo._restrictcapabilities(caps)
211
211
212 # Begin of _basepeer interface.
212 # Begin of _basepeer interface.
213
213
214 def url(self):
214 def url(self):
215 return self._repo.url()
215 return self._repo.url()
216
216
217 def local(self):
217 def local(self):
218 return self._repo
218 return self._repo
219
219
220 def peer(self):
220 def peer(self):
221 return self
221 return self
222
222
223 def canpush(self):
223 def canpush(self):
224 return True
224 return True
225
225
226 def close(self):
226 def close(self):
227 self._repo.close()
227 self._repo.close()
228
228
229 # End of _basepeer interface.
229 # End of _basepeer interface.
230
230
231 # Begin of _basewirecommands interface.
231 # Begin of _basewirecommands interface.
232
232
233 def branchmap(self):
233 def branchmap(self):
234 return self._repo.branchmap()
234 return self._repo.branchmap()
235
235
236 def capabilities(self):
236 def capabilities(self):
237 return self._caps
237 return self._caps
238
238
239 def clonebundles(self):
239 def clonebundles(self):
240 return self._repo.tryread('clonebundles.manifest')
240 return self._repo.tryread('clonebundles.manifest')
241
241
242 def debugwireargs(self, one, two, three=None, four=None, five=None):
242 def debugwireargs(self, one, two, three=None, four=None, five=None):
243 """Used to test argument passing over the wire"""
243 """Used to test argument passing over the wire"""
244 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
244 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
245 pycompat.bytestr(four),
245 pycompat.bytestr(four),
246 pycompat.bytestr(five))
246 pycompat.bytestr(five))
247
247
248 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
248 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
249 **kwargs):
249 **kwargs):
250 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
250 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
251 common=common, bundlecaps=bundlecaps,
251 common=common, bundlecaps=bundlecaps,
252 **kwargs)[1]
252 **kwargs)[1]
253 cb = util.chunkbuffer(chunks)
253 cb = util.chunkbuffer(chunks)
254
254
255 if exchange.bundle2requested(bundlecaps):
255 if exchange.bundle2requested(bundlecaps):
256 # When requesting a bundle2, getbundle returns a stream to make the
256 # When requesting a bundle2, getbundle returns a stream to make the
257 # wire level function happier. We need to build a proper object
257 # wire level function happier. We need to build a proper object
258 # from it in local peer.
258 # from it in local peer.
259 return bundle2.getunbundler(self.ui, cb)
259 return bundle2.getunbundler(self.ui, cb)
260 else:
260 else:
261 return changegroup.getunbundler('01', cb, None)
261 return changegroup.getunbundler('01', cb, None)
262
262
263 def heads(self):
263 def heads(self):
264 return self._repo.heads()
264 return self._repo.heads()
265
265
266 def known(self, nodes):
266 def known(self, nodes):
267 return self._repo.known(nodes)
267 return self._repo.known(nodes)
268
268
269 def listkeys(self, namespace):
269 def listkeys(self, namespace):
270 return self._repo.listkeys(namespace)
270 return self._repo.listkeys(namespace)
271
271
272 def lookup(self, key):
272 def lookup(self, key):
273 return self._repo.lookup(key)
273 return self._repo.lookup(key)
274
274
275 def pushkey(self, namespace, key, old, new):
275 def pushkey(self, namespace, key, old, new):
276 return self._repo.pushkey(namespace, key, old, new)
276 return self._repo.pushkey(namespace, key, old, new)
277
277
278 def stream_out(self):
278 def stream_out(self):
279 raise error.Abort(_('cannot perform stream clone against local '
279 raise error.Abort(_('cannot perform stream clone against local '
280 'peer'))
280 'peer'))
281
281
282 def unbundle(self, bundle, heads, url):
282 def unbundle(self, bundle, heads, url):
283 """apply a bundle on a repo
283 """apply a bundle on a repo
284
284
285 This function handles the repo locking itself."""
285 This function handles the repo locking itself."""
286 try:
286 try:
287 try:
287 try:
288 bundle = exchange.readbundle(self.ui, bundle, None)
288 bundle = exchange.readbundle(self.ui, bundle, None)
289 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
289 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
290 if util.safehasattr(ret, 'getchunks'):
290 if util.safehasattr(ret, 'getchunks'):
291 # This is a bundle20 object, turn it into an unbundler.
291 # This is a bundle20 object, turn it into an unbundler.
292 # This little dance should be dropped eventually when the
292 # This little dance should be dropped eventually when the
293 # API is finally improved.
293 # API is finally improved.
294 stream = util.chunkbuffer(ret.getchunks())
294 stream = util.chunkbuffer(ret.getchunks())
295 ret = bundle2.getunbundler(self.ui, stream)
295 ret = bundle2.getunbundler(self.ui, stream)
296 return ret
296 return ret
297 except Exception as exc:
297 except Exception as exc:
298 # If the exception contains output salvaged from a bundle2
298 # If the exception contains output salvaged from a bundle2
299 # reply, we need to make sure it is printed before continuing
299 # reply, we need to make sure it is printed before continuing
300 # to fail. So we build a bundle2 with such output and consume
300 # to fail. So we build a bundle2 with such output and consume
301 # it directly.
301 # it directly.
302 #
302 #
303 # This is not very elegant but allows a "simple" solution for
303 # This is not very elegant but allows a "simple" solution for
304 # issue4594
304 # issue4594
305 output = getattr(exc, '_bundle2salvagedoutput', ())
305 output = getattr(exc, '_bundle2salvagedoutput', ())
306 if output:
306 if output:
307 bundler = bundle2.bundle20(self._repo.ui)
307 bundler = bundle2.bundle20(self._repo.ui)
308 for out in output:
308 for out in output:
309 bundler.addpart(out)
309 bundler.addpart(out)
310 stream = util.chunkbuffer(bundler.getchunks())
310 stream = util.chunkbuffer(bundler.getchunks())
311 b = bundle2.getunbundler(self.ui, stream)
311 b = bundle2.getunbundler(self.ui, stream)
312 bundle2.processbundle(self._repo, b)
312 bundle2.processbundle(self._repo, b)
313 raise
313 raise
314 except error.PushRaced as exc:
314 except error.PushRaced as exc:
315 raise error.ResponseError(_('push failed:'),
315 raise error.ResponseError(_('push failed:'),
316 stringutil.forcebytestr(exc))
316 stringutil.forcebytestr(exc))
317
317
318 # End of _basewirecommands interface.
318 # End of _basewirecommands interface.
319
319
320 # Begin of peer interface.
320 # Begin of peer interface.
321
321
322 def commandexecutor(self):
322 def commandexecutor(self):
323 return localcommandexecutor(self)
323 return localcommandexecutor(self)
324
324
325 # End of peer interface.
325 # End of peer interface.
326
326
327 @zi.implementer(repository.ipeerlegacycommands)
327 @zi.implementer(repository.ipeerlegacycommands)
328 class locallegacypeer(localpeer):
328 class locallegacypeer(localpeer):
329 '''peer extension which implements legacy methods too; used for tests with
329 '''peer extension which implements legacy methods too; used for tests with
330 restricted capabilities'''
330 restricted capabilities'''
331
331
332 def __init__(self, repo):
332 def __init__(self, repo):
333 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
333 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
334
334
335 # Begin of baselegacywirecommands interface.
335 # Begin of baselegacywirecommands interface.
336
336
337 def between(self, pairs):
337 def between(self, pairs):
338 return self._repo.between(pairs)
338 return self._repo.between(pairs)
339
339
340 def branches(self, nodes):
340 def branches(self, nodes):
341 return self._repo.branches(nodes)
341 return self._repo.branches(nodes)
342
342
343 def changegroup(self, nodes, source):
343 def changegroup(self, nodes, source):
344 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
344 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
345 missingheads=self._repo.heads())
345 missingheads=self._repo.heads())
346 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
346 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
347
347
348 def changegroupsubset(self, bases, heads, source):
348 def changegroupsubset(self, bases, heads, source):
349 outgoing = discovery.outgoing(self._repo, missingroots=bases,
349 outgoing = discovery.outgoing(self._repo, missingroots=bases,
350 missingheads=heads)
350 missingheads=heads)
351 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
352
352
353 # End of baselegacywirecommands interface.
353 # End of baselegacywirecommands interface.
354
354
355 # Increment the sub-version when the revlog v2 format changes to lock out old
355 # Increment the sub-version when the revlog v2 format changes to lock out old
356 # clients.
356 # clients.
357 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
357 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
358
358
359 # Functions receiving (ui, features) that extensions can register to impact
359 # Functions receiving (ui, features) that extensions can register to impact
360 # the ability to load repositories with custom requirements. Only
360 # the ability to load repositories with custom requirements. Only
361 # functions defined in loaded extensions are called.
361 # functions defined in loaded extensions are called.
362 #
362 #
363 # The function receives a set of requirement strings that the repository
363 # The function receives a set of requirement strings that the repository
364 # is capable of opening. Functions will typically add elements to the
364 # is capable of opening. Functions will typically add elements to the
365 # set to reflect that the extension knows how to handle that requirements.
365 # set to reflect that the extension knows how to handle that requirements.
366 featuresetupfuncs = set()
366 featuresetupfuncs = set()
367
367
368 @zi.implementer(repository.completelocalrepository)
368 @zi.implementer(repository.completelocalrepository)
369 class localrepository(object):
369 class localrepository(object):
370
370
371 # obsolete experimental requirements:
371 # obsolete experimental requirements:
372 # - manifestv2: An experimental new manifest format that allowed
372 # - manifestv2: An experimental new manifest format that allowed
373 # for stem compression of long paths. Experiment ended up not
373 # for stem compression of long paths. Experiment ended up not
374 # being successful (repository sizes went up due to worse delta
374 # being successful (repository sizes went up due to worse delta
375 # chains), and the code was deleted in 4.6.
375 # chains), and the code was deleted in 4.6.
376 supportedformats = {
376 supportedformats = {
377 'revlogv1',
377 'revlogv1',
378 'generaldelta',
378 'generaldelta',
379 'treemanifest',
379 'treemanifest',
380 REVLOGV2_REQUIREMENT,
380 REVLOGV2_REQUIREMENT,
381 }
381 }
382 _basesupported = supportedformats | {
382 _basesupported = supportedformats | {
383 'store',
383 'store',
384 'fncache',
384 'fncache',
385 'shared',
385 'shared',
386 'relshared',
386 'relshared',
387 'dotencode',
387 'dotencode',
388 'exp-sparse',
388 'exp-sparse',
389 }
389 }
390 openerreqs = {
390 openerreqs = {
391 'revlogv1',
391 'revlogv1',
392 'generaldelta',
392 'generaldelta',
393 'treemanifest',
393 'treemanifest',
394 }
394 }
395
395
396 # list of prefix for file which can be written without 'wlock'
396 # list of prefix for file which can be written without 'wlock'
397 # Extensions should extend this list when needed
397 # Extensions should extend this list when needed
398 _wlockfreeprefix = {
398 _wlockfreeprefix = {
399 # We migh consider requiring 'wlock' for the next
399 # We migh consider requiring 'wlock' for the next
400 # two, but pretty much all the existing code assume
400 # two, but pretty much all the existing code assume
401 # wlock is not needed so we keep them excluded for
401 # wlock is not needed so we keep them excluded for
402 # now.
402 # now.
403 'hgrc',
403 'hgrc',
404 'requires',
404 'requires',
405 # XXX cache is a complicatged business someone
405 # XXX cache is a complicatged business someone
406 # should investigate this in depth at some point
406 # should investigate this in depth at some point
407 'cache/',
407 'cache/',
408 # XXX shouldn't be dirstate covered by the wlock?
408 # XXX shouldn't be dirstate covered by the wlock?
409 'dirstate',
409 'dirstate',
410 # XXX bisect was still a bit too messy at the time
410 # XXX bisect was still a bit too messy at the time
411 # this changeset was introduced. Someone should fix
411 # this changeset was introduced. Someone should fix
412 # the remainig bit and drop this line
412 # the remainig bit and drop this line
413 'bisect.state',
413 'bisect.state',
414 }
414 }
415
415
416 def __init__(self, baseui, path, create=False):
416 def __init__(self, baseui, path, create=False):
417 self.requirements = set()
417 self.requirements = set()
418 self.filtername = None
418 self.filtername = None
419 # wvfs: rooted at the repository root, used to access the working copy
419 # wvfs: rooted at the repository root, used to access the working copy
420 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
420 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
421 # vfs: rooted at .hg, used to access repo files outside of .hg/store
421 # vfs: rooted at .hg, used to access repo files outside of .hg/store
422 self.vfs = None
422 self.vfs = None
423 # svfs: usually rooted at .hg/store, used to access repository history
423 # svfs: usually rooted at .hg/store, used to access repository history
424 # If this is a shared repository, this vfs may point to another
424 # If this is a shared repository, this vfs may point to another
425 # repository's .hg/store directory.
425 # repository's .hg/store directory.
426 self.svfs = None
426 self.svfs = None
427 self.root = self.wvfs.base
427 self.root = self.wvfs.base
428 self.path = self.wvfs.join(".hg")
428 self.path = self.wvfs.join(".hg")
429 self.origroot = path
429 self.origroot = path
430 # This is only used by context.workingctx.match in order to
430 # This is only used by context.workingctx.match in order to
431 # detect files in subrepos.
431 # detect files in subrepos.
432 self.auditor = pathutil.pathauditor(
432 self.auditor = pathutil.pathauditor(
433 self.root, callback=self._checknested)
433 self.root, callback=self._checknested)
434 # This is only used by context.basectx.match in order to detect
434 # This is only used by context.basectx.match in order to detect
435 # files in subrepos.
435 # files in subrepos.
436 self.nofsauditor = pathutil.pathauditor(
436 self.nofsauditor = pathutil.pathauditor(
437 self.root, callback=self._checknested, realfs=False, cached=True)
437 self.root, callback=self._checknested, realfs=False, cached=True)
438 self.baseui = baseui
438 self.baseui = baseui
439 self.ui = baseui.copy()
439 self.ui = baseui.copy()
440 self.ui.copy = baseui.copy # prevent copying repo configuration
440 self.ui.copy = baseui.copy # prevent copying repo configuration
441 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
441 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
442 if (self.ui.configbool('devel', 'all-warnings') or
442 if (self.ui.configbool('devel', 'all-warnings') or
443 self.ui.configbool('devel', 'check-locks')):
443 self.ui.configbool('devel', 'check-locks')):
444 self.vfs.audit = self._getvfsward(self.vfs.audit)
444 self.vfs.audit = self._getvfsward(self.vfs.audit)
445 # A list of callback to shape the phase if no data were found.
445 # A list of callback to shape the phase if no data were found.
446 # Callback are in the form: func(repo, roots) --> processed root.
446 # Callback are in the form: func(repo, roots) --> processed root.
447 # This list it to be filled by extension during repo setup
447 # This list it to be filled by extension during repo setup
448 self._phasedefaults = []
448 self._phasedefaults = []
449 try:
449 try:
450 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
450 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
451 self._loadextensions()
451 self._loadextensions()
452 except IOError:
452 except IOError:
453 pass
453 pass
454
454
455 if featuresetupfuncs:
455 if featuresetupfuncs:
456 self.supported = set(self._basesupported) # use private copy
456 self.supported = set(self._basesupported) # use private copy
457 extmods = set(m.__name__ for n, m
457 extmods = set(m.__name__ for n, m
458 in extensions.extensions(self.ui))
458 in extensions.extensions(self.ui))
459 for setupfunc in featuresetupfuncs:
459 for setupfunc in featuresetupfuncs:
460 if setupfunc.__module__ in extmods:
460 if setupfunc.__module__ in extmods:
461 setupfunc(self.ui, self.supported)
461 setupfunc(self.ui, self.supported)
462 else:
462 else:
463 self.supported = self._basesupported
463 self.supported = self._basesupported
464 color.setup(self.ui)
464 color.setup(self.ui)
465
465
466 # Add compression engines.
466 # Add compression engines.
467 for name in util.compengines:
467 for name in util.compengines:
468 engine = util.compengines[name]
468 engine = util.compengines[name]
469 if engine.revlogheader():
469 if engine.revlogheader():
470 self.supported.add('exp-compression-%s' % name)
470 self.supported.add('exp-compression-%s' % name)
471
471
472 if not self.vfs.isdir():
472 if not self.vfs.isdir():
473 if create:
473 if create:
474 self.requirements = newreporequirements(self)
474 self.requirements = newreporequirements(self)
475
475
476 if not self.wvfs.exists():
476 if not self.wvfs.exists():
477 self.wvfs.makedirs()
477 self.wvfs.makedirs()
478 self.vfs.makedir(notindexed=True)
478 self.vfs.makedir(notindexed=True)
479
479
480 if 'store' in self.requirements:
480 if 'store' in self.requirements:
481 self.vfs.mkdir("store")
481 self.vfs.mkdir("store")
482
482
483 # create an invalid changelog
483 # create an invalid changelog
484 self.vfs.append(
484 self.vfs.append(
485 "00changelog.i",
485 "00changelog.i",
486 '\0\0\0\2' # represents revlogv2
486 '\0\0\0\2' # represents revlogv2
487 ' dummy changelog to prevent using the old repo layout'
487 ' dummy changelog to prevent using the old repo layout'
488 )
488 )
489 else:
489 else:
490 raise error.RepoError(_("repository %s not found") % path)
490 raise error.RepoError(_("repository %s not found") % path)
491 elif create:
491 elif create:
492 raise error.RepoError(_("repository %s already exists") % path)
492 raise error.RepoError(_("repository %s already exists") % path)
493 else:
493 else:
494 try:
494 try:
495 self.requirements = scmutil.readrequires(
495 self.requirements = scmutil.readrequires(
496 self.vfs, self.supported)
496 self.vfs, self.supported)
497 except IOError as inst:
497 except IOError as inst:
498 if inst.errno != errno.ENOENT:
498 if inst.errno != errno.ENOENT:
499 raise
499 raise
500
500
501 cachepath = self.vfs.join('cache')
501 cachepath = self.vfs.join('cache')
502 self.sharedpath = self.path
502 self.sharedpath = self.path
503 try:
503 try:
504 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
504 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
505 if 'relshared' in self.requirements:
505 if 'relshared' in self.requirements:
506 sharedpath = self.vfs.join(sharedpath)
506 sharedpath = self.vfs.join(sharedpath)
507 vfs = vfsmod.vfs(sharedpath, realpath=True)
507 vfs = vfsmod.vfs(sharedpath, realpath=True)
508 cachepath = vfs.join('cache')
508 cachepath = vfs.join('cache')
509 s = vfs.base
509 s = vfs.base
510 if not vfs.exists():
510 if not vfs.exists():
511 raise error.RepoError(
511 raise error.RepoError(
512 _('.hg/sharedpath points to nonexistent directory %s') % s)
512 _('.hg/sharedpath points to nonexistent directory %s') % s)
513 self.sharedpath = s
513 self.sharedpath = s
514 except IOError as inst:
514 except IOError as inst:
515 if inst.errno != errno.ENOENT:
515 if inst.errno != errno.ENOENT:
516 raise
516 raise
517
517
518 if 'exp-sparse' in self.requirements and not sparse.enabled:
518 if 'exp-sparse' in self.requirements and not sparse.enabled:
519 raise error.RepoError(_('repository is using sparse feature but '
519 raise error.RepoError(_('repository is using sparse feature but '
520 'sparse is not enabled; enable the '
520 'sparse is not enabled; enable the '
521 '"sparse" extensions to access'))
521 '"sparse" extensions to access'))
522
522
523 self.store = store.store(
523 self.store = store.store(
524 self.requirements, self.sharedpath,
524 self.requirements, self.sharedpath,
525 lambda base: vfsmod.vfs(base, cacheaudited=True))
525 lambda base: vfsmod.vfs(base, cacheaudited=True))
526 self.spath = self.store.path
526 self.spath = self.store.path
527 self.svfs = self.store.vfs
527 self.svfs = self.store.vfs
528 self.sjoin = self.store.join
528 self.sjoin = self.store.join
529 self.vfs.createmode = self.store.createmode
529 self.vfs.createmode = self.store.createmode
530 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
530 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
531 self.cachevfs.createmode = self.store.createmode
531 self.cachevfs.createmode = self.store.createmode
532 if (self.ui.configbool('devel', 'all-warnings') or
532 if (self.ui.configbool('devel', 'all-warnings') or
533 self.ui.configbool('devel', 'check-locks')):
533 self.ui.configbool('devel', 'check-locks')):
534 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
534 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
535 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
535 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
536 else: # standard vfs
536 else: # standard vfs
537 self.svfs.audit = self._getsvfsward(self.svfs.audit)
537 self.svfs.audit = self._getsvfsward(self.svfs.audit)
538 self._applyopenerreqs()
538 self._applyopenerreqs()
539 if create:
539 if create:
540 self._writerequirements()
540 self._writerequirements()
541
541
542 self._dirstatevalidatewarned = False
542 self._dirstatevalidatewarned = False
543
543
544 self._branchcaches = {}
544 self._branchcaches = {}
545 self._revbranchcache = None
545 self._revbranchcache = None
546 self._filterpats = {}
546 self._filterpats = {}
547 self._datafilters = {}
547 self._datafilters = {}
548 self._transref = self._lockref = self._wlockref = None
548 self._transref = self._lockref = self._wlockref = None
549
549
550 # A cache for various files under .hg/ that tracks file changes,
550 # A cache for various files under .hg/ that tracks file changes,
551 # (used by the filecache decorator)
551 # (used by the filecache decorator)
552 #
552 #
553 # Maps a property name to its util.filecacheentry
553 # Maps a property name to its util.filecacheentry
554 self._filecache = {}
554 self._filecache = {}
555
555
556 # hold sets of revision to be filtered
556 # hold sets of revision to be filtered
557 # should be cleared when something might have changed the filter value:
557 # should be cleared when something might have changed the filter value:
558 # - new changesets,
558 # - new changesets,
559 # - phase change,
559 # - phase change,
560 # - new obsolescence marker,
560 # - new obsolescence marker,
561 # - working directory parent change,
561 # - working directory parent change,
562 # - bookmark changes
562 # - bookmark changes
563 self.filteredrevcache = {}
563 self.filteredrevcache = {}
564
564
565 # post-dirstate-status hooks
565 # post-dirstate-status hooks
566 self._postdsstatus = []
566 self._postdsstatus = []
567
567
568 # generic mapping between names and nodes
568 # generic mapping between names and nodes
569 self.names = namespaces.namespaces()
569 self.names = namespaces.namespaces()
570
570
571 # Key to signature value.
571 # Key to signature value.
572 self._sparsesignaturecache = {}
572 self._sparsesignaturecache = {}
573 # Signature to cached matcher instance.
573 # Signature to cached matcher instance.
574 self._sparsematchercache = {}
574 self._sparsematchercache = {}
575
575
576 def _getvfsward(self, origfunc):
576 def _getvfsward(self, origfunc):
577 """build a ward for self.vfs"""
577 """build a ward for self.vfs"""
578 rref = weakref.ref(self)
578 rref = weakref.ref(self)
579 def checkvfs(path, mode=None):
579 def checkvfs(path, mode=None):
580 ret = origfunc(path, mode=mode)
580 ret = origfunc(path, mode=mode)
581 repo = rref()
581 repo = rref()
582 if (repo is None
582 if (repo is None
583 or not util.safehasattr(repo, '_wlockref')
583 or not util.safehasattr(repo, '_wlockref')
584 or not util.safehasattr(repo, '_lockref')):
584 or not util.safehasattr(repo, '_lockref')):
585 return
585 return
586 if mode in (None, 'r', 'rb'):
586 if mode in (None, 'r', 'rb'):
587 return
587 return
588 if path.startswith(repo.path):
588 if path.startswith(repo.path):
589 # truncate name relative to the repository (.hg)
589 # truncate name relative to the repository (.hg)
590 path = path[len(repo.path) + 1:]
590 path = path[len(repo.path) + 1:]
591 if path.startswith('cache/'):
591 if path.startswith('cache/'):
592 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
592 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
593 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
593 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
594 if path.startswith('journal.'):
594 if path.startswith('journal.'):
595 # journal is covered by 'lock'
595 # journal is covered by 'lock'
596 if repo._currentlock(repo._lockref) is None:
596 if repo._currentlock(repo._lockref) is None:
597 repo.ui.develwarn('write with no lock: "%s"' % path,
597 repo.ui.develwarn('write with no lock: "%s"' % path,
598 stacklevel=2, config='check-locks')
598 stacklevel=2, config='check-locks')
599 elif repo._currentlock(repo._wlockref) is None:
599 elif repo._currentlock(repo._wlockref) is None:
600 # rest of vfs files are covered by 'wlock'
600 # rest of vfs files are covered by 'wlock'
601 #
601 #
602 # exclude special files
602 # exclude special files
603 for prefix in self._wlockfreeprefix:
603 for prefix in self._wlockfreeprefix:
604 if path.startswith(prefix):
604 if path.startswith(prefix):
605 return
605 return
606 repo.ui.develwarn('write with no wlock: "%s"' % path,
606 repo.ui.develwarn('write with no wlock: "%s"' % path,
607 stacklevel=2, config='check-locks')
607 stacklevel=2, config='check-locks')
608 return ret
608 return ret
609 return checkvfs
609 return checkvfs
610
610
611 def _getsvfsward(self, origfunc):
611 def _getsvfsward(self, origfunc):
612 """build a ward for self.svfs"""
612 """build a ward for self.svfs"""
613 rref = weakref.ref(self)
613 rref = weakref.ref(self)
614 def checksvfs(path, mode=None):
614 def checksvfs(path, mode=None):
615 ret = origfunc(path, mode=mode)
615 ret = origfunc(path, mode=mode)
616 repo = rref()
616 repo = rref()
617 if repo is None or not util.safehasattr(repo, '_lockref'):
617 if repo is None or not util.safehasattr(repo, '_lockref'):
618 return
618 return
619 if mode in (None, 'r', 'rb'):
619 if mode in (None, 'r', 'rb'):
620 return
620 return
621 if path.startswith(repo.sharedpath):
621 if path.startswith(repo.sharedpath):
622 # truncate name relative to the repository (.hg)
622 # truncate name relative to the repository (.hg)
623 path = path[len(repo.sharedpath) + 1:]
623 path = path[len(repo.sharedpath) + 1:]
624 if repo._currentlock(repo._lockref) is None:
624 if repo._currentlock(repo._lockref) is None:
625 repo.ui.develwarn('write with no lock: "%s"' % path,
625 repo.ui.develwarn('write with no lock: "%s"' % path,
626 stacklevel=3)
626 stacklevel=3)
627 return ret
627 return ret
628 return checksvfs
628 return checksvfs
629
629
630 def close(self):
630 def close(self):
631 self._writecaches()
631 self._writecaches()
632
632
633 def _loadextensions(self):
633 def _loadextensions(self):
634 extensions.loadall(self.ui)
634 extensions.loadall(self.ui)
635
635
636 def _writecaches(self):
636 def _writecaches(self):
637 if self._revbranchcache:
637 if self._revbranchcache:
638 self._revbranchcache.write()
638 self._revbranchcache.write()
639
639
640 def _restrictcapabilities(self, caps):
640 def _restrictcapabilities(self, caps):
641 if self.ui.configbool('experimental', 'bundle2-advertise'):
641 if self.ui.configbool('experimental', 'bundle2-advertise'):
642 caps = set(caps)
642 caps = set(caps)
643 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
643 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
644 role='client'))
644 role='client'))
645 caps.add('bundle2=' + urlreq.quote(capsblob))
645 caps.add('bundle2=' + urlreq.quote(capsblob))
646 return caps
646 return caps
647
647
648 def _applyopenerreqs(self):
648 def _applyopenerreqs(self):
649 self.svfs.options = dict((r, 1) for r in self.requirements
649 self.svfs.options = dict((r, 1) for r in self.requirements
650 if r in self.openerreqs)
650 if r in self.openerreqs)
651 # experimental config: format.chunkcachesize
651 # experimental config: format.chunkcachesize
652 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
652 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
653 if chunkcachesize is not None:
653 if chunkcachesize is not None:
654 self.svfs.options['chunkcachesize'] = chunkcachesize
654 self.svfs.options['chunkcachesize'] = chunkcachesize
655 # experimental config: format.maxchainlen
655 # experimental config: format.maxchainlen
656 maxchainlen = self.ui.configint('format', 'maxchainlen')
656 maxchainlen = self.ui.configint('format', 'maxchainlen')
657 if maxchainlen is not None:
657 if maxchainlen is not None:
658 self.svfs.options['maxchainlen'] = maxchainlen
658 self.svfs.options['maxchainlen'] = maxchainlen
659 # experimental config: format.manifestcachesize
659 # experimental config: format.manifestcachesize
660 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
660 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
661 if manifestcachesize is not None:
661 if manifestcachesize is not None:
662 self.svfs.options['manifestcachesize'] = manifestcachesize
662 self.svfs.options['manifestcachesize'] = manifestcachesize
663 # experimental config: format.aggressivemergedeltas
663 # experimental config: format.aggressivemergedeltas
664 aggressivemergedeltas = self.ui.configbool('format',
664 aggressivemergedeltas = self.ui.configbool('format',
665 'aggressivemergedeltas')
665 'aggressivemergedeltas')
666 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
666 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
667 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
667 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
668 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
668 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
669 if 0 <= chainspan:
669 if 0 <= chainspan:
670 self.svfs.options['maxdeltachainspan'] = chainspan
670 self.svfs.options['maxdeltachainspan'] = chainspan
671 mmapindexthreshold = self.ui.configbytes('experimental',
671 mmapindexthreshold = self.ui.configbytes('experimental',
672 'mmapindexthreshold')
672 'mmapindexthreshold')
673 if mmapindexthreshold is not None:
673 if mmapindexthreshold is not None:
674 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
674 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
675 withsparseread = self.ui.configbool('experimental', 'sparse-read')
675 withsparseread = self.ui.configbool('experimental', 'sparse-read')
676 srdensitythres = float(self.ui.config('experimental',
676 srdensitythres = float(self.ui.config('experimental',
677 'sparse-read.density-threshold'))
677 'sparse-read.density-threshold'))
678 srmingapsize = self.ui.configbytes('experimental',
678 srmingapsize = self.ui.configbytes('experimental',
679 'sparse-read.min-gap-size')
679 'sparse-read.min-gap-size')
680 self.svfs.options['with-sparse-read'] = withsparseread
680 self.svfs.options['with-sparse-read'] = withsparseread
681 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
681 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
682 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
682 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
683
683
684 for r in self.requirements:
684 for r in self.requirements:
685 if r.startswith('exp-compression-'):
685 if r.startswith('exp-compression-'):
686 self.svfs.options['compengine'] = r[len('exp-compression-'):]
686 self.svfs.options['compengine'] = r[len('exp-compression-'):]
687
687
688 # TODO move "revlogv2" to openerreqs once finalized.
688 # TODO move "revlogv2" to openerreqs once finalized.
689 if REVLOGV2_REQUIREMENT in self.requirements:
689 if REVLOGV2_REQUIREMENT in self.requirements:
690 self.svfs.options['revlogv2'] = True
690 self.svfs.options['revlogv2'] = True
691
691
692 def _writerequirements(self):
692 def _writerequirements(self):
693 scmutil.writerequires(self.vfs, self.requirements)
693 scmutil.writerequires(self.vfs, self.requirements)
694
694
695 def _checknested(self, path):
695 def _checknested(self, path):
696 """Determine if path is a legal nested repository."""
696 """Determine if path is a legal nested repository."""
697 if not path.startswith(self.root):
697 if not path.startswith(self.root):
698 return False
698 return False
699 subpath = path[len(self.root) + 1:]
699 subpath = path[len(self.root) + 1:]
700 normsubpath = util.pconvert(subpath)
700 normsubpath = util.pconvert(subpath)
701
701
702 # XXX: Checking against the current working copy is wrong in
702 # XXX: Checking against the current working copy is wrong in
703 # the sense that it can reject things like
703 # the sense that it can reject things like
704 #
704 #
705 # $ hg cat -r 10 sub/x.txt
705 # $ hg cat -r 10 sub/x.txt
706 #
706 #
707 # if sub/ is no longer a subrepository in the working copy
707 # if sub/ is no longer a subrepository in the working copy
708 # parent revision.
708 # parent revision.
709 #
709 #
710 # However, it can of course also allow things that would have
710 # However, it can of course also allow things that would have
711 # been rejected before, such as the above cat command if sub/
711 # been rejected before, such as the above cat command if sub/
712 # is a subrepository now, but was a normal directory before.
712 # is a subrepository now, but was a normal directory before.
713 # The old path auditor would have rejected by mistake since it
713 # The old path auditor would have rejected by mistake since it
714 # panics when it sees sub/.hg/.
714 # panics when it sees sub/.hg/.
715 #
715 #
716 # All in all, checking against the working copy seems sensible
716 # All in all, checking against the working copy seems sensible
717 # since we want to prevent access to nested repositories on
717 # since we want to prevent access to nested repositories on
718 # the filesystem *now*.
718 # the filesystem *now*.
719 ctx = self[None]
719 ctx = self[None]
720 parts = util.splitpath(subpath)
720 parts = util.splitpath(subpath)
721 while parts:
721 while parts:
722 prefix = '/'.join(parts)
722 prefix = '/'.join(parts)
723 if prefix in ctx.substate:
723 if prefix in ctx.substate:
724 if prefix == normsubpath:
724 if prefix == normsubpath:
725 return True
725 return True
726 else:
726 else:
727 sub = ctx.sub(prefix)
727 sub = ctx.sub(prefix)
728 return sub.checknested(subpath[len(prefix) + 1:])
728 return sub.checknested(subpath[len(prefix) + 1:])
729 else:
729 else:
730 parts.pop()
730 parts.pop()
731 return False
731 return False
732
732
733 def peer(self):
733 def peer(self):
734 return localpeer(self) # not cached to avoid reference cycle
734 return localpeer(self) # not cached to avoid reference cycle
735
735
736 def unfiltered(self):
736 def unfiltered(self):
737 """Return unfiltered version of the repository
737 """Return unfiltered version of the repository
738
738
739 Intended to be overwritten by filtered repo."""
739 Intended to be overwritten by filtered repo."""
740 return self
740 return self
741
741
742 def filtered(self, name, visibilityexceptions=None):
742 def filtered(self, name, visibilityexceptions=None):
743 """Return a filtered version of a repository"""
743 """Return a filtered version of a repository"""
744 cls = repoview.newtype(self.unfiltered().__class__)
744 cls = repoview.newtype(self.unfiltered().__class__)
745 return cls(self, name, visibilityexceptions)
745 return cls(self, name, visibilityexceptions)
746
746
747 @repofilecache('bookmarks', 'bookmarks.current')
747 @repofilecache('bookmarks', 'bookmarks.current')
748 def _bookmarks(self):
748 def _bookmarks(self):
749 return bookmarks.bmstore(self)
749 return bookmarks.bmstore(self)
750
750
751 @property
751 @property
752 def _activebookmark(self):
752 def _activebookmark(self):
753 return self._bookmarks.active
753 return self._bookmarks.active
754
754
755 # _phasesets depend on changelog. what we need is to call
755 # _phasesets depend on changelog. what we need is to call
756 # _phasecache.invalidate() if '00changelog.i' was changed, but it
756 # _phasecache.invalidate() if '00changelog.i' was changed, but it
757 # can't be easily expressed in filecache mechanism.
757 # can't be easily expressed in filecache mechanism.
758 @storecache('phaseroots', '00changelog.i')
758 @storecache('phaseroots', '00changelog.i')
759 def _phasecache(self):
759 def _phasecache(self):
760 return phases.phasecache(self, self._phasedefaults)
760 return phases.phasecache(self, self._phasedefaults)
761
761
762 @storecache('obsstore')
762 @storecache('obsstore')
763 def obsstore(self):
763 def obsstore(self):
764 return obsolete.makestore(self.ui, self)
764 return obsolete.makestore(self.ui, self)
765
765
766 @storecache('00changelog.i')
766 @storecache('00changelog.i')
767 def changelog(self):
767 def changelog(self):
768 return changelog.changelog(self.svfs,
768 return changelog.changelog(self.svfs,
769 trypending=txnutil.mayhavepending(self.root))
769 trypending=txnutil.mayhavepending(self.root))
770
770
771 def _constructmanifest(self):
771 def _constructmanifest(self):
772 # This is a temporary function while we migrate from manifest to
772 # This is a temporary function while we migrate from manifest to
773 # manifestlog. It allows bundlerepo and unionrepo to intercept the
773 # manifestlog. It allows bundlerepo and unionrepo to intercept the
774 # manifest creation.
774 # manifest creation.
775 return manifest.manifestrevlog(self.svfs)
775 return manifest.manifestrevlog(self.svfs)
776
776
777 @storecache('00manifest.i')
777 @storecache('00manifest.i')
778 def manifestlog(self):
778 def manifestlog(self):
779 return manifest.manifestlog(self.svfs, self)
779 return manifest.manifestlog(self.svfs, self)
780
780
781 @repofilecache('dirstate')
781 @repofilecache('dirstate')
782 def dirstate(self):
782 def dirstate(self):
783 sparsematchfn = lambda: sparse.matcher(self)
783 sparsematchfn = lambda: sparse.matcher(self)
784
784
785 return dirstate.dirstate(self.vfs, self.ui, self.root,
785 return dirstate.dirstate(self.vfs, self.ui, self.root,
786 self._dirstatevalidate, sparsematchfn)
786 self._dirstatevalidate, sparsematchfn)
787
787
788 def _dirstatevalidate(self, node):
788 def _dirstatevalidate(self, node):
789 try:
789 try:
790 self.changelog.rev(node)
790 self.changelog.rev(node)
791 return node
791 return node
792 except error.LookupError:
792 except error.LookupError:
793 if not self._dirstatevalidatewarned:
793 if not self._dirstatevalidatewarned:
794 self._dirstatevalidatewarned = True
794 self._dirstatevalidatewarned = True
795 self.ui.warn(_("warning: ignoring unknown"
795 self.ui.warn(_("warning: ignoring unknown"
796 " working parent %s!\n") % short(node))
796 " working parent %s!\n") % short(node))
797 return nullid
797 return nullid
798
798
799 @repofilecache(narrowspec.FILENAME)
799 @repofilecache(narrowspec.FILENAME)
800 def narrowpats(self):
800 def narrowpats(self):
801 """matcher patterns for this repository's narrowspec
801 """matcher patterns for this repository's narrowspec
802
802
803 A tuple of (includes, excludes).
803 A tuple of (includes, excludes).
804 """
804 """
805 source = self
805 source = self
806 if self.shared():
806 if self.shared():
807 from . import hg
807 from . import hg
808 source = hg.sharedreposource(self)
808 source = hg.sharedreposource(self)
809 return narrowspec.load(source)
809 return narrowspec.load(source)
810
810
811 @repofilecache(narrowspec.FILENAME)
811 @repofilecache(narrowspec.FILENAME)
812 def _narrowmatch(self):
812 def _narrowmatch(self):
813 if changegroup.NARROW_REQUIREMENT not in self.requirements:
813 if changegroup.NARROW_REQUIREMENT not in self.requirements:
814 return matchmod.always(self.root, '')
814 return matchmod.always(self.root, '')
815 include, exclude = self.narrowpats
815 include, exclude = self.narrowpats
816 return narrowspec.match(self.root, include=include, exclude=exclude)
816 return narrowspec.match(self.root, include=include, exclude=exclude)
817
817
818 # TODO(martinvonz): make this property-like instead?
818 # TODO(martinvonz): make this property-like instead?
819 def narrowmatch(self):
819 def narrowmatch(self):
820 return self._narrowmatch
820 return self._narrowmatch
821
821
822 def setnarrowpats(self, newincludes, newexcludes):
822 def setnarrowpats(self, newincludes, newexcludes):
823 target = self
823 target = self
824 if self.shared():
824 if self.shared():
825 from . import hg
825 from . import hg
826 target = hg.sharedreposource(self)
826 target = hg.sharedreposource(self)
827 narrowspec.save(target, newincludes, newexcludes)
827 narrowspec.save(target, newincludes, newexcludes)
828 self.invalidate(clearfilecache=True)
828 self.invalidate(clearfilecache=True)
829
829
830 def __getitem__(self, changeid):
830 def __getitem__(self, changeid):
831 if changeid is None:
831 if changeid is None:
832 return context.workingctx(self)
832 return context.workingctx(self)
833 if isinstance(changeid, context.basectx):
833 if isinstance(changeid, context.basectx):
834 return changeid
834 return changeid
835 if isinstance(changeid, slice):
835 if isinstance(changeid, slice):
836 # wdirrev isn't contiguous so the slice shouldn't include it
836 # wdirrev isn't contiguous so the slice shouldn't include it
837 return [context.changectx(self, i)
837 return [context.changectx(self, i)
838 for i in xrange(*changeid.indices(len(self)))
838 for i in xrange(*changeid.indices(len(self)))
839 if i not in self.changelog.filteredrevs]
839 if i not in self.changelog.filteredrevs]
840 try:
840 try:
841 return context.changectx(self, changeid)
841 return context.changectx(self, changeid)
842 except error.WdirUnsupported:
842 except error.WdirUnsupported:
843 return context.workingctx(self)
843 return context.workingctx(self)
844
844
845 def __contains__(self, changeid):
845 def __contains__(self, changeid):
846 """True if the given changeid exists
846 """True if the given changeid exists
847
847
848 error.LookupError is raised if an ambiguous node specified.
848 error.LookupError is raised if an ambiguous node specified.
849 """
849 """
850 try:
850 try:
851 self[changeid]
851 self[changeid]
852 return True
852 return True
853 except (error.RepoLookupError, error.FilteredIndexError,
853 except (error.RepoLookupError, error.FilteredIndexError,
854 error.FilteredLookupError):
854 error.FilteredLookupError):
855 return False
855 return False
856
856
857 def __nonzero__(self):
857 def __nonzero__(self):
858 return True
858 return True
859
859
860 __bool__ = __nonzero__
860 __bool__ = __nonzero__
861
861
862 def __len__(self):
862 def __len__(self):
863 # no need to pay the cost of repoview.changelog
863 # no need to pay the cost of repoview.changelog
864 unfi = self.unfiltered()
864 unfi = self.unfiltered()
865 return len(unfi.changelog)
865 return len(unfi.changelog)
866
866
867 def __iter__(self):
867 def __iter__(self):
868 return iter(self.changelog)
868 return iter(self.changelog)
869
869
870 def revs(self, expr, *args):
870 def revs(self, expr, *args):
871 '''Find revisions matching a revset.
871 '''Find revisions matching a revset.
872
872
873 The revset is specified as a string ``expr`` that may contain
873 The revset is specified as a string ``expr`` that may contain
874 %-formatting to escape certain types. See ``revsetlang.formatspec``.
874 %-formatting to escape certain types. See ``revsetlang.formatspec``.
875
875
876 Revset aliases from the configuration are not expanded. To expand
876 Revset aliases from the configuration are not expanded. To expand
877 user aliases, consider calling ``scmutil.revrange()`` or
877 user aliases, consider calling ``scmutil.revrange()`` or
878 ``repo.anyrevs([expr], user=True)``.
878 ``repo.anyrevs([expr], user=True)``.
879
879
880 Returns a revset.abstractsmartset, which is a list-like interface
880 Returns a revset.abstractsmartset, which is a list-like interface
881 that contains integer revisions.
881 that contains integer revisions.
882 '''
882 '''
883 expr = revsetlang.formatspec(expr, *args)
883 expr = revsetlang.formatspec(expr, *args)
884 m = revset.match(None, expr)
884 m = revset.match(None, expr)
885 return m(self)
885 return m(self)
886
886
887 def set(self, expr, *args):
887 def set(self, expr, *args):
888 '''Find revisions matching a revset and emit changectx instances.
888 '''Find revisions matching a revset and emit changectx instances.
889
889
890 This is a convenience wrapper around ``revs()`` that iterates the
890 This is a convenience wrapper around ``revs()`` that iterates the
891 result and is a generator of changectx instances.
891 result and is a generator of changectx instances.
892
892
893 Revset aliases from the configuration are not expanded. To expand
893 Revset aliases from the configuration are not expanded. To expand
894 user aliases, consider calling ``scmutil.revrange()``.
894 user aliases, consider calling ``scmutil.revrange()``.
895 '''
895 '''
896 for r in self.revs(expr, *args):
896 for r in self.revs(expr, *args):
897 yield self[r]
897 yield self[r]
898
898
899 def anyrevs(self, specs, user=False, localalias=None):
899 def anyrevs(self, specs, user=False, localalias=None):
900 '''Find revisions matching one of the given revsets.
900 '''Find revisions matching one of the given revsets.
901
901
902 Revset aliases from the configuration are not expanded by default. To
902 Revset aliases from the configuration are not expanded by default. To
903 expand user aliases, specify ``user=True``. To provide some local
903 expand user aliases, specify ``user=True``. To provide some local
904 definitions overriding user aliases, set ``localalias`` to
904 definitions overriding user aliases, set ``localalias`` to
905 ``{name: definitionstring}``.
905 ``{name: definitionstring}``.
906 '''
906 '''
907 if user:
907 if user:
908 m = revset.matchany(self.ui, specs, repo=self,
908 m = revset.matchany(self.ui, specs,
909 lookup=revset.lookupfn(self),
909 localalias=localalias)
910 localalias=localalias)
910 else:
911 else:
911 m = revset.matchany(None, specs, localalias=localalias)
912 m = revset.matchany(None, specs, localalias=localalias)
912 return m(self)
913 return m(self)
913
914
914 def url(self):
915 def url(self):
915 return 'file:' + self.root
916 return 'file:' + self.root
916
917
917 def hook(self, name, throw=False, **args):
918 def hook(self, name, throw=False, **args):
918 """Call a hook, passing this repo instance.
919 """Call a hook, passing this repo instance.
919
920
920 This a convenience method to aid invoking hooks. Extensions likely
921 This a convenience method to aid invoking hooks. Extensions likely
921 won't call this unless they have registered a custom hook or are
922 won't call this unless they have registered a custom hook or are
922 replacing code that is expected to call a hook.
923 replacing code that is expected to call a hook.
923 """
924 """
924 return hook.hook(self.ui, self, name, throw, **args)
925 return hook.hook(self.ui, self, name, throw, **args)
925
926
926 @filteredpropertycache
927 @filteredpropertycache
927 def _tagscache(self):
928 def _tagscache(self):
928 '''Returns a tagscache object that contains various tags related
929 '''Returns a tagscache object that contains various tags related
929 caches.'''
930 caches.'''
930
931
931 # This simplifies its cache management by having one decorated
932 # This simplifies its cache management by having one decorated
932 # function (this one) and the rest simply fetch things from it.
933 # function (this one) and the rest simply fetch things from it.
933 class tagscache(object):
934 class tagscache(object):
934 def __init__(self):
935 def __init__(self):
935 # These two define the set of tags for this repository. tags
936 # These two define the set of tags for this repository. tags
936 # maps tag name to node; tagtypes maps tag name to 'global' or
937 # maps tag name to node; tagtypes maps tag name to 'global' or
937 # 'local'. (Global tags are defined by .hgtags across all
938 # 'local'. (Global tags are defined by .hgtags across all
938 # heads, and local tags are defined in .hg/localtags.)
939 # heads, and local tags are defined in .hg/localtags.)
939 # They constitute the in-memory cache of tags.
940 # They constitute the in-memory cache of tags.
940 self.tags = self.tagtypes = None
941 self.tags = self.tagtypes = None
941
942
942 self.nodetagscache = self.tagslist = None
943 self.nodetagscache = self.tagslist = None
943
944
944 cache = tagscache()
945 cache = tagscache()
945 cache.tags, cache.tagtypes = self._findtags()
946 cache.tags, cache.tagtypes = self._findtags()
946
947
947 return cache
948 return cache
948
949
949 def tags(self):
950 def tags(self):
950 '''return a mapping of tag to node'''
951 '''return a mapping of tag to node'''
951 t = {}
952 t = {}
952 if self.changelog.filteredrevs:
953 if self.changelog.filteredrevs:
953 tags, tt = self._findtags()
954 tags, tt = self._findtags()
954 else:
955 else:
955 tags = self._tagscache.tags
956 tags = self._tagscache.tags
956 for k, v in tags.iteritems():
957 for k, v in tags.iteritems():
957 try:
958 try:
958 # ignore tags to unknown nodes
959 # ignore tags to unknown nodes
959 self.changelog.rev(v)
960 self.changelog.rev(v)
960 t[k] = v
961 t[k] = v
961 except (error.LookupError, ValueError):
962 except (error.LookupError, ValueError):
962 pass
963 pass
963 return t
964 return t
964
965
965 def _findtags(self):
966 def _findtags(self):
966 '''Do the hard work of finding tags. Return a pair of dicts
967 '''Do the hard work of finding tags. Return a pair of dicts
967 (tags, tagtypes) where tags maps tag name to node, and tagtypes
968 (tags, tagtypes) where tags maps tag name to node, and tagtypes
968 maps tag name to a string like \'global\' or \'local\'.
969 maps tag name to a string like \'global\' or \'local\'.
969 Subclasses or extensions are free to add their own tags, but
970 Subclasses or extensions are free to add their own tags, but
970 should be aware that the returned dicts will be retained for the
971 should be aware that the returned dicts will be retained for the
971 duration of the localrepo object.'''
972 duration of the localrepo object.'''
972
973
973 # XXX what tagtype should subclasses/extensions use? Currently
974 # XXX what tagtype should subclasses/extensions use? Currently
974 # mq and bookmarks add tags, but do not set the tagtype at all.
975 # mq and bookmarks add tags, but do not set the tagtype at all.
975 # Should each extension invent its own tag type? Should there
976 # Should each extension invent its own tag type? Should there
976 # be one tagtype for all such "virtual" tags? Or is the status
977 # be one tagtype for all such "virtual" tags? Or is the status
977 # quo fine?
978 # quo fine?
978
979
979
980
980 # map tag name to (node, hist)
981 # map tag name to (node, hist)
981 alltags = tagsmod.findglobaltags(self.ui, self)
982 alltags = tagsmod.findglobaltags(self.ui, self)
982 # map tag name to tag type
983 # map tag name to tag type
983 tagtypes = dict((tag, 'global') for tag in alltags)
984 tagtypes = dict((tag, 'global') for tag in alltags)
984
985
985 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
986 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
986
987
987 # Build the return dicts. Have to re-encode tag names because
988 # Build the return dicts. Have to re-encode tag names because
988 # the tags module always uses UTF-8 (in order not to lose info
989 # the tags module always uses UTF-8 (in order not to lose info
989 # writing to the cache), but the rest of Mercurial wants them in
990 # writing to the cache), but the rest of Mercurial wants them in
990 # local encoding.
991 # local encoding.
991 tags = {}
992 tags = {}
992 for (name, (node, hist)) in alltags.iteritems():
993 for (name, (node, hist)) in alltags.iteritems():
993 if node != nullid:
994 if node != nullid:
994 tags[encoding.tolocal(name)] = node
995 tags[encoding.tolocal(name)] = node
995 tags['tip'] = self.changelog.tip()
996 tags['tip'] = self.changelog.tip()
996 tagtypes = dict([(encoding.tolocal(name), value)
997 tagtypes = dict([(encoding.tolocal(name), value)
997 for (name, value) in tagtypes.iteritems()])
998 for (name, value) in tagtypes.iteritems()])
998 return (tags, tagtypes)
999 return (tags, tagtypes)
999
1000
1000 def tagtype(self, tagname):
1001 def tagtype(self, tagname):
1001 '''
1002 '''
1002 return the type of the given tag. result can be:
1003 return the type of the given tag. result can be:
1003
1004
1004 'local' : a local tag
1005 'local' : a local tag
1005 'global' : a global tag
1006 'global' : a global tag
1006 None : tag does not exist
1007 None : tag does not exist
1007 '''
1008 '''
1008
1009
1009 return self._tagscache.tagtypes.get(tagname)
1010 return self._tagscache.tagtypes.get(tagname)
1010
1011
1011 def tagslist(self):
1012 def tagslist(self):
1012 '''return a list of tags ordered by revision'''
1013 '''return a list of tags ordered by revision'''
1013 if not self._tagscache.tagslist:
1014 if not self._tagscache.tagslist:
1014 l = []
1015 l = []
1015 for t, n in self.tags().iteritems():
1016 for t, n in self.tags().iteritems():
1016 l.append((self.changelog.rev(n), t, n))
1017 l.append((self.changelog.rev(n), t, n))
1017 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1018 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1018
1019
1019 return self._tagscache.tagslist
1020 return self._tagscache.tagslist
1020
1021
1021 def nodetags(self, node):
1022 def nodetags(self, node):
1022 '''return the tags associated with a node'''
1023 '''return the tags associated with a node'''
1023 if not self._tagscache.nodetagscache:
1024 if not self._tagscache.nodetagscache:
1024 nodetagscache = {}
1025 nodetagscache = {}
1025 for t, n in self._tagscache.tags.iteritems():
1026 for t, n in self._tagscache.tags.iteritems():
1026 nodetagscache.setdefault(n, []).append(t)
1027 nodetagscache.setdefault(n, []).append(t)
1027 for tags in nodetagscache.itervalues():
1028 for tags in nodetagscache.itervalues():
1028 tags.sort()
1029 tags.sort()
1029 self._tagscache.nodetagscache = nodetagscache
1030 self._tagscache.nodetagscache = nodetagscache
1030 return self._tagscache.nodetagscache.get(node, [])
1031 return self._tagscache.nodetagscache.get(node, [])
1031
1032
1032 def nodebookmarks(self, node):
1033 def nodebookmarks(self, node):
1033 """return the list of bookmarks pointing to the specified node"""
1034 """return the list of bookmarks pointing to the specified node"""
1034 marks = []
1035 marks = []
1035 for bookmark, n in self._bookmarks.iteritems():
1036 for bookmark, n in self._bookmarks.iteritems():
1036 if n == node:
1037 if n == node:
1037 marks.append(bookmark)
1038 marks.append(bookmark)
1038 return sorted(marks)
1039 return sorted(marks)
1039
1040
1040 def branchmap(self):
1041 def branchmap(self):
1041 '''returns a dictionary {branch: [branchheads]} with branchheads
1042 '''returns a dictionary {branch: [branchheads]} with branchheads
1042 ordered by increasing revision number'''
1043 ordered by increasing revision number'''
1043 branchmap.updatecache(self)
1044 branchmap.updatecache(self)
1044 return self._branchcaches[self.filtername]
1045 return self._branchcaches[self.filtername]
1045
1046
1046 @unfilteredmethod
1047 @unfilteredmethod
1047 def revbranchcache(self):
1048 def revbranchcache(self):
1048 if not self._revbranchcache:
1049 if not self._revbranchcache:
1049 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1050 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1050 return self._revbranchcache
1051 return self._revbranchcache
1051
1052
1052 def branchtip(self, branch, ignoremissing=False):
1053 def branchtip(self, branch, ignoremissing=False):
1053 '''return the tip node for a given branch
1054 '''return the tip node for a given branch
1054
1055
1055 If ignoremissing is True, then this method will not raise an error.
1056 If ignoremissing is True, then this method will not raise an error.
1056 This is helpful for callers that only expect None for a missing branch
1057 This is helpful for callers that only expect None for a missing branch
1057 (e.g. namespace).
1058 (e.g. namespace).
1058
1059
1059 '''
1060 '''
1060 try:
1061 try:
1061 return self.branchmap().branchtip(branch)
1062 return self.branchmap().branchtip(branch)
1062 except KeyError:
1063 except KeyError:
1063 if not ignoremissing:
1064 if not ignoremissing:
1064 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1065 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1065 else:
1066 else:
1066 pass
1067 pass
1067
1068
1068 def lookup(self, key):
1069 def lookup(self, key):
1069 return scmutil.revsymbol(self, key).node()
1070 return scmutil.revsymbol(self, key).node()
1070
1071
1071 def lookupbranch(self, key):
1072 def lookupbranch(self, key):
1072 if key in self.branchmap():
1073 if key in self.branchmap():
1073 return key
1074 return key
1074
1075
1075 return scmutil.revsymbol(self, key).branch()
1076 return scmutil.revsymbol(self, key).branch()
1076
1077
1077 def known(self, nodes):
1078 def known(self, nodes):
1078 cl = self.changelog
1079 cl = self.changelog
1079 nm = cl.nodemap
1080 nm = cl.nodemap
1080 filtered = cl.filteredrevs
1081 filtered = cl.filteredrevs
1081 result = []
1082 result = []
1082 for n in nodes:
1083 for n in nodes:
1083 r = nm.get(n)
1084 r = nm.get(n)
1084 resp = not (r is None or r in filtered)
1085 resp = not (r is None or r in filtered)
1085 result.append(resp)
1086 result.append(resp)
1086 return result
1087 return result
1087
1088
1088 def local(self):
1089 def local(self):
1089 return self
1090 return self
1090
1091
1091 def publishing(self):
1092 def publishing(self):
1092 # it's safe (and desirable) to trust the publish flag unconditionally
1093 # it's safe (and desirable) to trust the publish flag unconditionally
1093 # so that we don't finalize changes shared between users via ssh or nfs
1094 # so that we don't finalize changes shared between users via ssh or nfs
1094 return self.ui.configbool('phases', 'publish', untrusted=True)
1095 return self.ui.configbool('phases', 'publish', untrusted=True)
1095
1096
1096 def cancopy(self):
1097 def cancopy(self):
1097 # so statichttprepo's override of local() works
1098 # so statichttprepo's override of local() works
1098 if not self.local():
1099 if not self.local():
1099 return False
1100 return False
1100 if not self.publishing():
1101 if not self.publishing():
1101 return True
1102 return True
1102 # if publishing we can't copy if there is filtered content
1103 # if publishing we can't copy if there is filtered content
1103 return not self.filtered('visible').changelog.filteredrevs
1104 return not self.filtered('visible').changelog.filteredrevs
1104
1105
1105 def shared(self):
1106 def shared(self):
1106 '''the type of shared repository (None if not shared)'''
1107 '''the type of shared repository (None if not shared)'''
1107 if self.sharedpath != self.path:
1108 if self.sharedpath != self.path:
1108 return 'store'
1109 return 'store'
1109 return None
1110 return None
1110
1111
1111 def wjoin(self, f, *insidef):
1112 def wjoin(self, f, *insidef):
1112 return self.vfs.reljoin(self.root, f, *insidef)
1113 return self.vfs.reljoin(self.root, f, *insidef)
1113
1114
1114 def file(self, f):
1115 def file(self, f):
1115 if f[0] == '/':
1116 if f[0] == '/':
1116 f = f[1:]
1117 f = f[1:]
1117 return filelog.filelog(self.svfs, f)
1118 return filelog.filelog(self.svfs, f)
1118
1119
1119 def setparents(self, p1, p2=nullid):
1120 def setparents(self, p1, p2=nullid):
1120 with self.dirstate.parentchange():
1121 with self.dirstate.parentchange():
1121 copies = self.dirstate.setparents(p1, p2)
1122 copies = self.dirstate.setparents(p1, p2)
1122 pctx = self[p1]
1123 pctx = self[p1]
1123 if copies:
1124 if copies:
1124 # Adjust copy records, the dirstate cannot do it, it
1125 # Adjust copy records, the dirstate cannot do it, it
1125 # requires access to parents manifests. Preserve them
1126 # requires access to parents manifests. Preserve them
1126 # only for entries added to first parent.
1127 # only for entries added to first parent.
1127 for f in copies:
1128 for f in copies:
1128 if f not in pctx and copies[f] in pctx:
1129 if f not in pctx and copies[f] in pctx:
1129 self.dirstate.copy(copies[f], f)
1130 self.dirstate.copy(copies[f], f)
1130 if p2 == nullid:
1131 if p2 == nullid:
1131 for f, s in sorted(self.dirstate.copies().items()):
1132 for f, s in sorted(self.dirstate.copies().items()):
1132 if f not in pctx and s not in pctx:
1133 if f not in pctx and s not in pctx:
1133 self.dirstate.copy(None, f)
1134 self.dirstate.copy(None, f)
1134
1135
1135 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1136 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1136 """changeid can be a changeset revision, node, or tag.
1137 """changeid can be a changeset revision, node, or tag.
1137 fileid can be a file revision or node."""
1138 fileid can be a file revision or node."""
1138 return context.filectx(self, path, changeid, fileid,
1139 return context.filectx(self, path, changeid, fileid,
1139 changectx=changectx)
1140 changectx=changectx)
1140
1141
1141 def getcwd(self):
1142 def getcwd(self):
1142 return self.dirstate.getcwd()
1143 return self.dirstate.getcwd()
1143
1144
1144 def pathto(self, f, cwd=None):
1145 def pathto(self, f, cwd=None):
1145 return self.dirstate.pathto(f, cwd)
1146 return self.dirstate.pathto(f, cwd)
1146
1147
1147 def _loadfilter(self, filter):
1148 def _loadfilter(self, filter):
1148 if filter not in self._filterpats:
1149 if filter not in self._filterpats:
1149 l = []
1150 l = []
1150 for pat, cmd in self.ui.configitems(filter):
1151 for pat, cmd in self.ui.configitems(filter):
1151 if cmd == '!':
1152 if cmd == '!':
1152 continue
1153 continue
1153 mf = matchmod.match(self.root, '', [pat])
1154 mf = matchmod.match(self.root, '', [pat])
1154 fn = None
1155 fn = None
1155 params = cmd
1156 params = cmd
1156 for name, filterfn in self._datafilters.iteritems():
1157 for name, filterfn in self._datafilters.iteritems():
1157 if cmd.startswith(name):
1158 if cmd.startswith(name):
1158 fn = filterfn
1159 fn = filterfn
1159 params = cmd[len(name):].lstrip()
1160 params = cmd[len(name):].lstrip()
1160 break
1161 break
1161 if not fn:
1162 if not fn:
1162 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1163 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1163 # Wrap old filters not supporting keyword arguments
1164 # Wrap old filters not supporting keyword arguments
1164 if not pycompat.getargspec(fn)[2]:
1165 if not pycompat.getargspec(fn)[2]:
1165 oldfn = fn
1166 oldfn = fn
1166 fn = lambda s, c, **kwargs: oldfn(s, c)
1167 fn = lambda s, c, **kwargs: oldfn(s, c)
1167 l.append((mf, fn, params))
1168 l.append((mf, fn, params))
1168 self._filterpats[filter] = l
1169 self._filterpats[filter] = l
1169 return self._filterpats[filter]
1170 return self._filterpats[filter]
1170
1171
1171 def _filter(self, filterpats, filename, data):
1172 def _filter(self, filterpats, filename, data):
1172 for mf, fn, cmd in filterpats:
1173 for mf, fn, cmd in filterpats:
1173 if mf(filename):
1174 if mf(filename):
1174 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1175 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1175 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1176 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1176 break
1177 break
1177
1178
1178 return data
1179 return data
1179
1180
1180 @unfilteredpropertycache
1181 @unfilteredpropertycache
1181 def _encodefilterpats(self):
1182 def _encodefilterpats(self):
1182 return self._loadfilter('encode')
1183 return self._loadfilter('encode')
1183
1184
1184 @unfilteredpropertycache
1185 @unfilteredpropertycache
1185 def _decodefilterpats(self):
1186 def _decodefilterpats(self):
1186 return self._loadfilter('decode')
1187 return self._loadfilter('decode')
1187
1188
1188 def adddatafilter(self, name, filter):
1189 def adddatafilter(self, name, filter):
1189 self._datafilters[name] = filter
1190 self._datafilters[name] = filter
1190
1191
1191 def wread(self, filename):
1192 def wread(self, filename):
1192 if self.wvfs.islink(filename):
1193 if self.wvfs.islink(filename):
1193 data = self.wvfs.readlink(filename)
1194 data = self.wvfs.readlink(filename)
1194 else:
1195 else:
1195 data = self.wvfs.read(filename)
1196 data = self.wvfs.read(filename)
1196 return self._filter(self._encodefilterpats, filename, data)
1197 return self._filter(self._encodefilterpats, filename, data)
1197
1198
1198 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1199 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1199 """write ``data`` into ``filename`` in the working directory
1200 """write ``data`` into ``filename`` in the working directory
1200
1201
1201 This returns length of written (maybe decoded) data.
1202 This returns length of written (maybe decoded) data.
1202 """
1203 """
1203 data = self._filter(self._decodefilterpats, filename, data)
1204 data = self._filter(self._decodefilterpats, filename, data)
1204 if 'l' in flags:
1205 if 'l' in flags:
1205 self.wvfs.symlink(data, filename)
1206 self.wvfs.symlink(data, filename)
1206 else:
1207 else:
1207 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1208 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1208 **kwargs)
1209 **kwargs)
1209 if 'x' in flags:
1210 if 'x' in flags:
1210 self.wvfs.setflags(filename, False, True)
1211 self.wvfs.setflags(filename, False, True)
1211 else:
1212 else:
1212 self.wvfs.setflags(filename, False, False)
1213 self.wvfs.setflags(filename, False, False)
1213 return len(data)
1214 return len(data)
1214
1215
1215 def wwritedata(self, filename, data):
1216 def wwritedata(self, filename, data):
1216 return self._filter(self._decodefilterpats, filename, data)
1217 return self._filter(self._decodefilterpats, filename, data)
1217
1218
1218 def currenttransaction(self):
1219 def currenttransaction(self):
1219 """return the current transaction or None if non exists"""
1220 """return the current transaction or None if non exists"""
1220 if self._transref:
1221 if self._transref:
1221 tr = self._transref()
1222 tr = self._transref()
1222 else:
1223 else:
1223 tr = None
1224 tr = None
1224
1225
1225 if tr and tr.running():
1226 if tr and tr.running():
1226 return tr
1227 return tr
1227 return None
1228 return None
1228
1229
1229 def transaction(self, desc, report=None):
1230 def transaction(self, desc, report=None):
1230 if (self.ui.configbool('devel', 'all-warnings')
1231 if (self.ui.configbool('devel', 'all-warnings')
1231 or self.ui.configbool('devel', 'check-locks')):
1232 or self.ui.configbool('devel', 'check-locks')):
1232 if self._currentlock(self._lockref) is None:
1233 if self._currentlock(self._lockref) is None:
1233 raise error.ProgrammingError('transaction requires locking')
1234 raise error.ProgrammingError('transaction requires locking')
1234 tr = self.currenttransaction()
1235 tr = self.currenttransaction()
1235 if tr is not None:
1236 if tr is not None:
1236 return tr.nest(name=desc)
1237 return tr.nest(name=desc)
1237
1238
1238 # abort here if the journal already exists
1239 # abort here if the journal already exists
1239 if self.svfs.exists("journal"):
1240 if self.svfs.exists("journal"):
1240 raise error.RepoError(
1241 raise error.RepoError(
1241 _("abandoned transaction found"),
1242 _("abandoned transaction found"),
1242 hint=_("run 'hg recover' to clean up transaction"))
1243 hint=_("run 'hg recover' to clean up transaction"))
1243
1244
1244 idbase = "%.40f#%f" % (random.random(), time.time())
1245 idbase = "%.40f#%f" % (random.random(), time.time())
1245 ha = hex(hashlib.sha1(idbase).digest())
1246 ha = hex(hashlib.sha1(idbase).digest())
1246 txnid = 'TXN:' + ha
1247 txnid = 'TXN:' + ha
1247 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1248 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1248
1249
1249 self._writejournal(desc)
1250 self._writejournal(desc)
1250 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1251 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1251 if report:
1252 if report:
1252 rp = report
1253 rp = report
1253 else:
1254 else:
1254 rp = self.ui.warn
1255 rp = self.ui.warn
1255 vfsmap = {'plain': self.vfs} # root of .hg/
1256 vfsmap = {'plain': self.vfs} # root of .hg/
1256 # we must avoid cyclic reference between repo and transaction.
1257 # we must avoid cyclic reference between repo and transaction.
1257 reporef = weakref.ref(self)
1258 reporef = weakref.ref(self)
1258 # Code to track tag movement
1259 # Code to track tag movement
1259 #
1260 #
1260 # Since tags are all handled as file content, it is actually quite hard
1261 # Since tags are all handled as file content, it is actually quite hard
1261 # to track these movement from a code perspective. So we fallback to a
1262 # to track these movement from a code perspective. So we fallback to a
1262 # tracking at the repository level. One could envision to track changes
1263 # tracking at the repository level. One could envision to track changes
1263 # to the '.hgtags' file through changegroup apply but that fails to
1264 # to the '.hgtags' file through changegroup apply but that fails to
1264 # cope with case where transaction expose new heads without changegroup
1265 # cope with case where transaction expose new heads without changegroup
1265 # being involved (eg: phase movement).
1266 # being involved (eg: phase movement).
1266 #
1267 #
1267 # For now, We gate the feature behind a flag since this likely comes
1268 # For now, We gate the feature behind a flag since this likely comes
1268 # with performance impacts. The current code run more often than needed
1269 # with performance impacts. The current code run more often than needed
1269 # and do not use caches as much as it could. The current focus is on
1270 # and do not use caches as much as it could. The current focus is on
1270 # the behavior of the feature so we disable it by default. The flag
1271 # the behavior of the feature so we disable it by default. The flag
1271 # will be removed when we are happy with the performance impact.
1272 # will be removed when we are happy with the performance impact.
1272 #
1273 #
1273 # Once this feature is no longer experimental move the following
1274 # Once this feature is no longer experimental move the following
1274 # documentation to the appropriate help section:
1275 # documentation to the appropriate help section:
1275 #
1276 #
1276 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1277 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1277 # tags (new or changed or deleted tags). In addition the details of
1278 # tags (new or changed or deleted tags). In addition the details of
1278 # these changes are made available in a file at:
1279 # these changes are made available in a file at:
1279 # ``REPOROOT/.hg/changes/tags.changes``.
1280 # ``REPOROOT/.hg/changes/tags.changes``.
1280 # Make sure you check for HG_TAG_MOVED before reading that file as it
1281 # Make sure you check for HG_TAG_MOVED before reading that file as it
1281 # might exist from a previous transaction even if no tag were touched
1282 # might exist from a previous transaction even if no tag were touched
1282 # in this one. Changes are recorded in a line base format::
1283 # in this one. Changes are recorded in a line base format::
1283 #
1284 #
1284 # <action> <hex-node> <tag-name>\n
1285 # <action> <hex-node> <tag-name>\n
1285 #
1286 #
1286 # Actions are defined as follow:
1287 # Actions are defined as follow:
1287 # "-R": tag is removed,
1288 # "-R": tag is removed,
1288 # "+A": tag is added,
1289 # "+A": tag is added,
1289 # "-M": tag is moved (old value),
1290 # "-M": tag is moved (old value),
1290 # "+M": tag is moved (new value),
1291 # "+M": tag is moved (new value),
1291 tracktags = lambda x: None
1292 tracktags = lambda x: None
1292 # experimental config: experimental.hook-track-tags
1293 # experimental config: experimental.hook-track-tags
1293 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1294 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1294 if desc != 'strip' and shouldtracktags:
1295 if desc != 'strip' and shouldtracktags:
1295 oldheads = self.changelog.headrevs()
1296 oldheads = self.changelog.headrevs()
1296 def tracktags(tr2):
1297 def tracktags(tr2):
1297 repo = reporef()
1298 repo = reporef()
1298 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1299 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1299 newheads = repo.changelog.headrevs()
1300 newheads = repo.changelog.headrevs()
1300 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1301 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1301 # notes: we compare lists here.
1302 # notes: we compare lists here.
1302 # As we do it only once buiding set would not be cheaper
1303 # As we do it only once buiding set would not be cheaper
1303 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1304 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1304 if changes:
1305 if changes:
1305 tr2.hookargs['tag_moved'] = '1'
1306 tr2.hookargs['tag_moved'] = '1'
1306 with repo.vfs('changes/tags.changes', 'w',
1307 with repo.vfs('changes/tags.changes', 'w',
1307 atomictemp=True) as changesfile:
1308 atomictemp=True) as changesfile:
1308 # note: we do not register the file to the transaction
1309 # note: we do not register the file to the transaction
1309 # because we needs it to still exist on the transaction
1310 # because we needs it to still exist on the transaction
1310 # is close (for txnclose hooks)
1311 # is close (for txnclose hooks)
1311 tagsmod.writediff(changesfile, changes)
1312 tagsmod.writediff(changesfile, changes)
1312 def validate(tr2):
1313 def validate(tr2):
1313 """will run pre-closing hooks"""
1314 """will run pre-closing hooks"""
1314 # XXX the transaction API is a bit lacking here so we take a hacky
1315 # XXX the transaction API is a bit lacking here so we take a hacky
1315 # path for now
1316 # path for now
1316 #
1317 #
1317 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1318 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1318 # dict is copied before these run. In addition we needs the data
1319 # dict is copied before these run. In addition we needs the data
1319 # available to in memory hooks too.
1320 # available to in memory hooks too.
1320 #
1321 #
1321 # Moreover, we also need to make sure this runs before txnclose
1322 # Moreover, we also need to make sure this runs before txnclose
1322 # hooks and there is no "pending" mechanism that would execute
1323 # hooks and there is no "pending" mechanism that would execute
1323 # logic only if hooks are about to run.
1324 # logic only if hooks are about to run.
1324 #
1325 #
1325 # Fixing this limitation of the transaction is also needed to track
1326 # Fixing this limitation of the transaction is also needed to track
1326 # other families of changes (bookmarks, phases, obsolescence).
1327 # other families of changes (bookmarks, phases, obsolescence).
1327 #
1328 #
1328 # This will have to be fixed before we remove the experimental
1329 # This will have to be fixed before we remove the experimental
1329 # gating.
1330 # gating.
1330 tracktags(tr2)
1331 tracktags(tr2)
1331 repo = reporef()
1332 repo = reporef()
1332 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1333 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1333 scmutil.enforcesinglehead(repo, tr2, desc)
1334 scmutil.enforcesinglehead(repo, tr2, desc)
1334 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1335 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1335 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1336 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1336 args = tr.hookargs.copy()
1337 args = tr.hookargs.copy()
1337 args.update(bookmarks.preparehookargs(name, old, new))
1338 args.update(bookmarks.preparehookargs(name, old, new))
1338 repo.hook('pretxnclose-bookmark', throw=True,
1339 repo.hook('pretxnclose-bookmark', throw=True,
1339 txnname=desc,
1340 txnname=desc,
1340 **pycompat.strkwargs(args))
1341 **pycompat.strkwargs(args))
1341 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1342 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1342 cl = repo.unfiltered().changelog
1343 cl = repo.unfiltered().changelog
1343 for rev, (old, new) in tr.changes['phases'].items():
1344 for rev, (old, new) in tr.changes['phases'].items():
1344 args = tr.hookargs.copy()
1345 args = tr.hookargs.copy()
1345 node = hex(cl.node(rev))
1346 node = hex(cl.node(rev))
1346 args.update(phases.preparehookargs(node, old, new))
1347 args.update(phases.preparehookargs(node, old, new))
1347 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1348 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1348 **pycompat.strkwargs(args))
1349 **pycompat.strkwargs(args))
1349
1350
1350 repo.hook('pretxnclose', throw=True,
1351 repo.hook('pretxnclose', throw=True,
1351 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1352 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1352 def releasefn(tr, success):
1353 def releasefn(tr, success):
1353 repo = reporef()
1354 repo = reporef()
1354 if success:
1355 if success:
1355 # this should be explicitly invoked here, because
1356 # this should be explicitly invoked here, because
1356 # in-memory changes aren't written out at closing
1357 # in-memory changes aren't written out at closing
1357 # transaction, if tr.addfilegenerator (via
1358 # transaction, if tr.addfilegenerator (via
1358 # dirstate.write or so) isn't invoked while
1359 # dirstate.write or so) isn't invoked while
1359 # transaction running
1360 # transaction running
1360 repo.dirstate.write(None)
1361 repo.dirstate.write(None)
1361 else:
1362 else:
1362 # discard all changes (including ones already written
1363 # discard all changes (including ones already written
1363 # out) in this transaction
1364 # out) in this transaction
1364 repo.dirstate.restorebackup(None, 'journal.dirstate')
1365 repo.dirstate.restorebackup(None, 'journal.dirstate')
1365
1366
1366 repo.invalidate(clearfilecache=True)
1367 repo.invalidate(clearfilecache=True)
1367
1368
1368 tr = transaction.transaction(rp, self.svfs, vfsmap,
1369 tr = transaction.transaction(rp, self.svfs, vfsmap,
1369 "journal",
1370 "journal",
1370 "undo",
1371 "undo",
1371 aftertrans(renames),
1372 aftertrans(renames),
1372 self.store.createmode,
1373 self.store.createmode,
1373 validator=validate,
1374 validator=validate,
1374 releasefn=releasefn,
1375 releasefn=releasefn,
1375 checkambigfiles=_cachedfiles,
1376 checkambigfiles=_cachedfiles,
1376 name=desc)
1377 name=desc)
1377 tr.changes['revs'] = xrange(0, 0)
1378 tr.changes['revs'] = xrange(0, 0)
1378 tr.changes['obsmarkers'] = set()
1379 tr.changes['obsmarkers'] = set()
1379 tr.changes['phases'] = {}
1380 tr.changes['phases'] = {}
1380 tr.changes['bookmarks'] = {}
1381 tr.changes['bookmarks'] = {}
1381
1382
1382 tr.hookargs['txnid'] = txnid
1383 tr.hookargs['txnid'] = txnid
1383 # note: writing the fncache only during finalize mean that the file is
1384 # note: writing the fncache only during finalize mean that the file is
1384 # outdated when running hooks. As fncache is used for streaming clone,
1385 # outdated when running hooks. As fncache is used for streaming clone,
1385 # this is not expected to break anything that happen during the hooks.
1386 # this is not expected to break anything that happen during the hooks.
1386 tr.addfinalize('flush-fncache', self.store.write)
1387 tr.addfinalize('flush-fncache', self.store.write)
1387 def txnclosehook(tr2):
1388 def txnclosehook(tr2):
1388 """To be run if transaction is successful, will schedule a hook run
1389 """To be run if transaction is successful, will schedule a hook run
1389 """
1390 """
1390 # Don't reference tr2 in hook() so we don't hold a reference.
1391 # Don't reference tr2 in hook() so we don't hold a reference.
1391 # This reduces memory consumption when there are multiple
1392 # This reduces memory consumption when there are multiple
1392 # transactions per lock. This can likely go away if issue5045
1393 # transactions per lock. This can likely go away if issue5045
1393 # fixes the function accumulation.
1394 # fixes the function accumulation.
1394 hookargs = tr2.hookargs
1395 hookargs = tr2.hookargs
1395
1396
1396 def hookfunc():
1397 def hookfunc():
1397 repo = reporef()
1398 repo = reporef()
1398 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1399 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1399 bmchanges = sorted(tr.changes['bookmarks'].items())
1400 bmchanges = sorted(tr.changes['bookmarks'].items())
1400 for name, (old, new) in bmchanges:
1401 for name, (old, new) in bmchanges:
1401 args = tr.hookargs.copy()
1402 args = tr.hookargs.copy()
1402 args.update(bookmarks.preparehookargs(name, old, new))
1403 args.update(bookmarks.preparehookargs(name, old, new))
1403 repo.hook('txnclose-bookmark', throw=False,
1404 repo.hook('txnclose-bookmark', throw=False,
1404 txnname=desc, **pycompat.strkwargs(args))
1405 txnname=desc, **pycompat.strkwargs(args))
1405
1406
1406 if hook.hashook(repo.ui, 'txnclose-phase'):
1407 if hook.hashook(repo.ui, 'txnclose-phase'):
1407 cl = repo.unfiltered().changelog
1408 cl = repo.unfiltered().changelog
1408 phasemv = sorted(tr.changes['phases'].items())
1409 phasemv = sorted(tr.changes['phases'].items())
1409 for rev, (old, new) in phasemv:
1410 for rev, (old, new) in phasemv:
1410 args = tr.hookargs.copy()
1411 args = tr.hookargs.copy()
1411 node = hex(cl.node(rev))
1412 node = hex(cl.node(rev))
1412 args.update(phases.preparehookargs(node, old, new))
1413 args.update(phases.preparehookargs(node, old, new))
1413 repo.hook('txnclose-phase', throw=False, txnname=desc,
1414 repo.hook('txnclose-phase', throw=False, txnname=desc,
1414 **pycompat.strkwargs(args))
1415 **pycompat.strkwargs(args))
1415
1416
1416 repo.hook('txnclose', throw=False, txnname=desc,
1417 repo.hook('txnclose', throw=False, txnname=desc,
1417 **pycompat.strkwargs(hookargs))
1418 **pycompat.strkwargs(hookargs))
1418 reporef()._afterlock(hookfunc)
1419 reporef()._afterlock(hookfunc)
1419 tr.addfinalize('txnclose-hook', txnclosehook)
1420 tr.addfinalize('txnclose-hook', txnclosehook)
1420 # Include a leading "-" to make it happen before the transaction summary
1421 # Include a leading "-" to make it happen before the transaction summary
1421 # reports registered via scmutil.registersummarycallback() whose names
1422 # reports registered via scmutil.registersummarycallback() whose names
1422 # are 00-txnreport etc. That way, the caches will be warm when the
1423 # are 00-txnreport etc. That way, the caches will be warm when the
1423 # callbacks run.
1424 # callbacks run.
1424 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1425 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1425 def txnaborthook(tr2):
1426 def txnaborthook(tr2):
1426 """To be run if transaction is aborted
1427 """To be run if transaction is aborted
1427 """
1428 """
1428 reporef().hook('txnabort', throw=False, txnname=desc,
1429 reporef().hook('txnabort', throw=False, txnname=desc,
1429 **pycompat.strkwargs(tr2.hookargs))
1430 **pycompat.strkwargs(tr2.hookargs))
1430 tr.addabort('txnabort-hook', txnaborthook)
1431 tr.addabort('txnabort-hook', txnaborthook)
1431 # avoid eager cache invalidation. in-memory data should be identical
1432 # avoid eager cache invalidation. in-memory data should be identical
1432 # to stored data if transaction has no error.
1433 # to stored data if transaction has no error.
1433 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1434 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1434 self._transref = weakref.ref(tr)
1435 self._transref = weakref.ref(tr)
1435 scmutil.registersummarycallback(self, tr, desc)
1436 scmutil.registersummarycallback(self, tr, desc)
1436 return tr
1437 return tr
1437
1438
1438 def _journalfiles(self):
1439 def _journalfiles(self):
1439 return ((self.svfs, 'journal'),
1440 return ((self.svfs, 'journal'),
1440 (self.vfs, 'journal.dirstate'),
1441 (self.vfs, 'journal.dirstate'),
1441 (self.vfs, 'journal.branch'),
1442 (self.vfs, 'journal.branch'),
1442 (self.vfs, 'journal.desc'),
1443 (self.vfs, 'journal.desc'),
1443 (self.vfs, 'journal.bookmarks'),
1444 (self.vfs, 'journal.bookmarks'),
1444 (self.svfs, 'journal.phaseroots'))
1445 (self.svfs, 'journal.phaseroots'))
1445
1446
1446 def undofiles(self):
1447 def undofiles(self):
1447 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1448 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1448
1449
1449 @unfilteredmethod
1450 @unfilteredmethod
1450 def _writejournal(self, desc):
1451 def _writejournal(self, desc):
1451 self.dirstate.savebackup(None, 'journal.dirstate')
1452 self.dirstate.savebackup(None, 'journal.dirstate')
1452 self.vfs.write("journal.branch",
1453 self.vfs.write("journal.branch",
1453 encoding.fromlocal(self.dirstate.branch()))
1454 encoding.fromlocal(self.dirstate.branch()))
1454 self.vfs.write("journal.desc",
1455 self.vfs.write("journal.desc",
1455 "%d\n%s\n" % (len(self), desc))
1456 "%d\n%s\n" % (len(self), desc))
1456 self.vfs.write("journal.bookmarks",
1457 self.vfs.write("journal.bookmarks",
1457 self.vfs.tryread("bookmarks"))
1458 self.vfs.tryread("bookmarks"))
1458 self.svfs.write("journal.phaseroots",
1459 self.svfs.write("journal.phaseroots",
1459 self.svfs.tryread("phaseroots"))
1460 self.svfs.tryread("phaseroots"))
1460
1461
1461 def recover(self):
1462 def recover(self):
1462 with self.lock():
1463 with self.lock():
1463 if self.svfs.exists("journal"):
1464 if self.svfs.exists("journal"):
1464 self.ui.status(_("rolling back interrupted transaction\n"))
1465 self.ui.status(_("rolling back interrupted transaction\n"))
1465 vfsmap = {'': self.svfs,
1466 vfsmap = {'': self.svfs,
1466 'plain': self.vfs,}
1467 'plain': self.vfs,}
1467 transaction.rollback(self.svfs, vfsmap, "journal",
1468 transaction.rollback(self.svfs, vfsmap, "journal",
1468 self.ui.warn,
1469 self.ui.warn,
1469 checkambigfiles=_cachedfiles)
1470 checkambigfiles=_cachedfiles)
1470 self.invalidate()
1471 self.invalidate()
1471 return True
1472 return True
1472 else:
1473 else:
1473 self.ui.warn(_("no interrupted transaction available\n"))
1474 self.ui.warn(_("no interrupted transaction available\n"))
1474 return False
1475 return False
1475
1476
1476 def rollback(self, dryrun=False, force=False):
1477 def rollback(self, dryrun=False, force=False):
1477 wlock = lock = dsguard = None
1478 wlock = lock = dsguard = None
1478 try:
1479 try:
1479 wlock = self.wlock()
1480 wlock = self.wlock()
1480 lock = self.lock()
1481 lock = self.lock()
1481 if self.svfs.exists("undo"):
1482 if self.svfs.exists("undo"):
1482 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1483 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1483
1484
1484 return self._rollback(dryrun, force, dsguard)
1485 return self._rollback(dryrun, force, dsguard)
1485 else:
1486 else:
1486 self.ui.warn(_("no rollback information available\n"))
1487 self.ui.warn(_("no rollback information available\n"))
1487 return 1
1488 return 1
1488 finally:
1489 finally:
1489 release(dsguard, lock, wlock)
1490 release(dsguard, lock, wlock)
1490
1491
1491 @unfilteredmethod # Until we get smarter cache management
1492 @unfilteredmethod # Until we get smarter cache management
1492 def _rollback(self, dryrun, force, dsguard):
1493 def _rollback(self, dryrun, force, dsguard):
1493 ui = self.ui
1494 ui = self.ui
1494 try:
1495 try:
1495 args = self.vfs.read('undo.desc').splitlines()
1496 args = self.vfs.read('undo.desc').splitlines()
1496 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1497 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1497 if len(args) >= 3:
1498 if len(args) >= 3:
1498 detail = args[2]
1499 detail = args[2]
1499 oldtip = oldlen - 1
1500 oldtip = oldlen - 1
1500
1501
1501 if detail and ui.verbose:
1502 if detail and ui.verbose:
1502 msg = (_('repository tip rolled back to revision %d'
1503 msg = (_('repository tip rolled back to revision %d'
1503 ' (undo %s: %s)\n')
1504 ' (undo %s: %s)\n')
1504 % (oldtip, desc, detail))
1505 % (oldtip, desc, detail))
1505 else:
1506 else:
1506 msg = (_('repository tip rolled back to revision %d'
1507 msg = (_('repository tip rolled back to revision %d'
1507 ' (undo %s)\n')
1508 ' (undo %s)\n')
1508 % (oldtip, desc))
1509 % (oldtip, desc))
1509 except IOError:
1510 except IOError:
1510 msg = _('rolling back unknown transaction\n')
1511 msg = _('rolling back unknown transaction\n')
1511 desc = None
1512 desc = None
1512
1513
1513 if not force and self['.'] != self['tip'] and desc == 'commit':
1514 if not force and self['.'] != self['tip'] and desc == 'commit':
1514 raise error.Abort(
1515 raise error.Abort(
1515 _('rollback of last commit while not checked out '
1516 _('rollback of last commit while not checked out '
1516 'may lose data'), hint=_('use -f to force'))
1517 'may lose data'), hint=_('use -f to force'))
1517
1518
1518 ui.status(msg)
1519 ui.status(msg)
1519 if dryrun:
1520 if dryrun:
1520 return 0
1521 return 0
1521
1522
1522 parents = self.dirstate.parents()
1523 parents = self.dirstate.parents()
1523 self.destroying()
1524 self.destroying()
1524 vfsmap = {'plain': self.vfs, '': self.svfs}
1525 vfsmap = {'plain': self.vfs, '': self.svfs}
1525 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1526 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1526 checkambigfiles=_cachedfiles)
1527 checkambigfiles=_cachedfiles)
1527 if self.vfs.exists('undo.bookmarks'):
1528 if self.vfs.exists('undo.bookmarks'):
1528 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1529 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1529 if self.svfs.exists('undo.phaseroots'):
1530 if self.svfs.exists('undo.phaseroots'):
1530 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1531 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1531 self.invalidate()
1532 self.invalidate()
1532
1533
1533 parentgone = (parents[0] not in self.changelog.nodemap or
1534 parentgone = (parents[0] not in self.changelog.nodemap or
1534 parents[1] not in self.changelog.nodemap)
1535 parents[1] not in self.changelog.nodemap)
1535 if parentgone:
1536 if parentgone:
1536 # prevent dirstateguard from overwriting already restored one
1537 # prevent dirstateguard from overwriting already restored one
1537 dsguard.close()
1538 dsguard.close()
1538
1539
1539 self.dirstate.restorebackup(None, 'undo.dirstate')
1540 self.dirstate.restorebackup(None, 'undo.dirstate')
1540 try:
1541 try:
1541 branch = self.vfs.read('undo.branch')
1542 branch = self.vfs.read('undo.branch')
1542 self.dirstate.setbranch(encoding.tolocal(branch))
1543 self.dirstate.setbranch(encoding.tolocal(branch))
1543 except IOError:
1544 except IOError:
1544 ui.warn(_('named branch could not be reset: '
1545 ui.warn(_('named branch could not be reset: '
1545 'current branch is still \'%s\'\n')
1546 'current branch is still \'%s\'\n')
1546 % self.dirstate.branch())
1547 % self.dirstate.branch())
1547
1548
1548 parents = tuple([p.rev() for p in self[None].parents()])
1549 parents = tuple([p.rev() for p in self[None].parents()])
1549 if len(parents) > 1:
1550 if len(parents) > 1:
1550 ui.status(_('working directory now based on '
1551 ui.status(_('working directory now based on '
1551 'revisions %d and %d\n') % parents)
1552 'revisions %d and %d\n') % parents)
1552 else:
1553 else:
1553 ui.status(_('working directory now based on '
1554 ui.status(_('working directory now based on '
1554 'revision %d\n') % parents)
1555 'revision %d\n') % parents)
1555 mergemod.mergestate.clean(self, self['.'].node())
1556 mergemod.mergestate.clean(self, self['.'].node())
1556
1557
1557 # TODO: if we know which new heads may result from this rollback, pass
1558 # TODO: if we know which new heads may result from this rollback, pass
1558 # them to destroy(), which will prevent the branchhead cache from being
1559 # them to destroy(), which will prevent the branchhead cache from being
1559 # invalidated.
1560 # invalidated.
1560 self.destroyed()
1561 self.destroyed()
1561 return 0
1562 return 0
1562
1563
1563 def _buildcacheupdater(self, newtransaction):
1564 def _buildcacheupdater(self, newtransaction):
1564 """called during transaction to build the callback updating cache
1565 """called during transaction to build the callback updating cache
1565
1566
1566 Lives on the repository to help extension who might want to augment
1567 Lives on the repository to help extension who might want to augment
1567 this logic. For this purpose, the created transaction is passed to the
1568 this logic. For this purpose, the created transaction is passed to the
1568 method.
1569 method.
1569 """
1570 """
1570 # we must avoid cyclic reference between repo and transaction.
1571 # we must avoid cyclic reference between repo and transaction.
1571 reporef = weakref.ref(self)
1572 reporef = weakref.ref(self)
1572 def updater(tr):
1573 def updater(tr):
1573 repo = reporef()
1574 repo = reporef()
1574 repo.updatecaches(tr)
1575 repo.updatecaches(tr)
1575 return updater
1576 return updater
1576
1577
1577 @unfilteredmethod
1578 @unfilteredmethod
1578 def updatecaches(self, tr=None, full=False):
1579 def updatecaches(self, tr=None, full=False):
1579 """warm appropriate caches
1580 """warm appropriate caches
1580
1581
1581 If this function is called after a transaction closed. The transaction
1582 If this function is called after a transaction closed. The transaction
1582 will be available in the 'tr' argument. This can be used to selectively
1583 will be available in the 'tr' argument. This can be used to selectively
1583 update caches relevant to the changes in that transaction.
1584 update caches relevant to the changes in that transaction.
1584
1585
1585 If 'full' is set, make sure all caches the function knows about have
1586 If 'full' is set, make sure all caches the function knows about have
1586 up-to-date data. Even the ones usually loaded more lazily.
1587 up-to-date data. Even the ones usually loaded more lazily.
1587 """
1588 """
1588 if tr is not None and tr.hookargs.get('source') == 'strip':
1589 if tr is not None and tr.hookargs.get('source') == 'strip':
1589 # During strip, many caches are invalid but
1590 # During strip, many caches are invalid but
1590 # later call to `destroyed` will refresh them.
1591 # later call to `destroyed` will refresh them.
1591 return
1592 return
1592
1593
1593 if tr is None or tr.changes['revs']:
1594 if tr is None or tr.changes['revs']:
1594 # updating the unfiltered branchmap should refresh all the others,
1595 # updating the unfiltered branchmap should refresh all the others,
1595 self.ui.debug('updating the branch cache\n')
1596 self.ui.debug('updating the branch cache\n')
1596 branchmap.updatecache(self.filtered('served'))
1597 branchmap.updatecache(self.filtered('served'))
1597
1598
1598 if full:
1599 if full:
1599 rbc = self.revbranchcache()
1600 rbc = self.revbranchcache()
1600 for r in self.changelog:
1601 for r in self.changelog:
1601 rbc.branchinfo(r)
1602 rbc.branchinfo(r)
1602 rbc.write()
1603 rbc.write()
1603
1604
1604 def invalidatecaches(self):
1605 def invalidatecaches(self):
1605
1606
1606 if '_tagscache' in vars(self):
1607 if '_tagscache' in vars(self):
1607 # can't use delattr on proxy
1608 # can't use delattr on proxy
1608 del self.__dict__['_tagscache']
1609 del self.__dict__['_tagscache']
1609
1610
1610 self.unfiltered()._branchcaches.clear()
1611 self.unfiltered()._branchcaches.clear()
1611 self.invalidatevolatilesets()
1612 self.invalidatevolatilesets()
1612 self._sparsesignaturecache.clear()
1613 self._sparsesignaturecache.clear()
1613
1614
1614 def invalidatevolatilesets(self):
1615 def invalidatevolatilesets(self):
1615 self.filteredrevcache.clear()
1616 self.filteredrevcache.clear()
1616 obsolete.clearobscaches(self)
1617 obsolete.clearobscaches(self)
1617
1618
1618 def invalidatedirstate(self):
1619 def invalidatedirstate(self):
1619 '''Invalidates the dirstate, causing the next call to dirstate
1620 '''Invalidates the dirstate, causing the next call to dirstate
1620 to check if it was modified since the last time it was read,
1621 to check if it was modified since the last time it was read,
1621 rereading it if it has.
1622 rereading it if it has.
1622
1623
1623 This is different to dirstate.invalidate() that it doesn't always
1624 This is different to dirstate.invalidate() that it doesn't always
1624 rereads the dirstate. Use dirstate.invalidate() if you want to
1625 rereads the dirstate. Use dirstate.invalidate() if you want to
1625 explicitly read the dirstate again (i.e. restoring it to a previous
1626 explicitly read the dirstate again (i.e. restoring it to a previous
1626 known good state).'''
1627 known good state).'''
1627 if hasunfilteredcache(self, 'dirstate'):
1628 if hasunfilteredcache(self, 'dirstate'):
1628 for k in self.dirstate._filecache:
1629 for k in self.dirstate._filecache:
1629 try:
1630 try:
1630 delattr(self.dirstate, k)
1631 delattr(self.dirstate, k)
1631 except AttributeError:
1632 except AttributeError:
1632 pass
1633 pass
1633 delattr(self.unfiltered(), 'dirstate')
1634 delattr(self.unfiltered(), 'dirstate')
1634
1635
1635 def invalidate(self, clearfilecache=False):
1636 def invalidate(self, clearfilecache=False):
1636 '''Invalidates both store and non-store parts other than dirstate
1637 '''Invalidates both store and non-store parts other than dirstate
1637
1638
1638 If a transaction is running, invalidation of store is omitted,
1639 If a transaction is running, invalidation of store is omitted,
1639 because discarding in-memory changes might cause inconsistency
1640 because discarding in-memory changes might cause inconsistency
1640 (e.g. incomplete fncache causes unintentional failure, but
1641 (e.g. incomplete fncache causes unintentional failure, but
1641 redundant one doesn't).
1642 redundant one doesn't).
1642 '''
1643 '''
1643 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1644 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1644 for k in list(self._filecache.keys()):
1645 for k in list(self._filecache.keys()):
1645 # dirstate is invalidated separately in invalidatedirstate()
1646 # dirstate is invalidated separately in invalidatedirstate()
1646 if k == 'dirstate':
1647 if k == 'dirstate':
1647 continue
1648 continue
1648 if (k == 'changelog' and
1649 if (k == 'changelog' and
1649 self.currenttransaction() and
1650 self.currenttransaction() and
1650 self.changelog._delayed):
1651 self.changelog._delayed):
1651 # The changelog object may store unwritten revisions. We don't
1652 # The changelog object may store unwritten revisions. We don't
1652 # want to lose them.
1653 # want to lose them.
1653 # TODO: Solve the problem instead of working around it.
1654 # TODO: Solve the problem instead of working around it.
1654 continue
1655 continue
1655
1656
1656 if clearfilecache:
1657 if clearfilecache:
1657 del self._filecache[k]
1658 del self._filecache[k]
1658 try:
1659 try:
1659 delattr(unfiltered, k)
1660 delattr(unfiltered, k)
1660 except AttributeError:
1661 except AttributeError:
1661 pass
1662 pass
1662 self.invalidatecaches()
1663 self.invalidatecaches()
1663 if not self.currenttransaction():
1664 if not self.currenttransaction():
1664 # TODO: Changing contents of store outside transaction
1665 # TODO: Changing contents of store outside transaction
1665 # causes inconsistency. We should make in-memory store
1666 # causes inconsistency. We should make in-memory store
1666 # changes detectable, and abort if changed.
1667 # changes detectable, and abort if changed.
1667 self.store.invalidatecaches()
1668 self.store.invalidatecaches()
1668
1669
1669 def invalidateall(self):
1670 def invalidateall(self):
1670 '''Fully invalidates both store and non-store parts, causing the
1671 '''Fully invalidates both store and non-store parts, causing the
1671 subsequent operation to reread any outside changes.'''
1672 subsequent operation to reread any outside changes.'''
1672 # extension should hook this to invalidate its caches
1673 # extension should hook this to invalidate its caches
1673 self.invalidate()
1674 self.invalidate()
1674 self.invalidatedirstate()
1675 self.invalidatedirstate()
1675
1676
1676 @unfilteredmethod
1677 @unfilteredmethod
1677 def _refreshfilecachestats(self, tr):
1678 def _refreshfilecachestats(self, tr):
1678 """Reload stats of cached files so that they are flagged as valid"""
1679 """Reload stats of cached files so that they are flagged as valid"""
1679 for k, ce in self._filecache.items():
1680 for k, ce in self._filecache.items():
1680 k = pycompat.sysstr(k)
1681 k = pycompat.sysstr(k)
1681 if k == r'dirstate' or k not in self.__dict__:
1682 if k == r'dirstate' or k not in self.__dict__:
1682 continue
1683 continue
1683 ce.refresh()
1684 ce.refresh()
1684
1685
1685 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1686 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1686 inheritchecker=None, parentenvvar=None):
1687 inheritchecker=None, parentenvvar=None):
1687 parentlock = None
1688 parentlock = None
1688 # the contents of parentenvvar are used by the underlying lock to
1689 # the contents of parentenvvar are used by the underlying lock to
1689 # determine whether it can be inherited
1690 # determine whether it can be inherited
1690 if parentenvvar is not None:
1691 if parentenvvar is not None:
1691 parentlock = encoding.environ.get(parentenvvar)
1692 parentlock = encoding.environ.get(parentenvvar)
1692
1693
1693 timeout = 0
1694 timeout = 0
1694 warntimeout = 0
1695 warntimeout = 0
1695 if wait:
1696 if wait:
1696 timeout = self.ui.configint("ui", "timeout")
1697 timeout = self.ui.configint("ui", "timeout")
1697 warntimeout = self.ui.configint("ui", "timeout.warn")
1698 warntimeout = self.ui.configint("ui", "timeout.warn")
1698
1699
1699 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1700 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1700 releasefn=releasefn,
1701 releasefn=releasefn,
1701 acquirefn=acquirefn, desc=desc,
1702 acquirefn=acquirefn, desc=desc,
1702 inheritchecker=inheritchecker,
1703 inheritchecker=inheritchecker,
1703 parentlock=parentlock)
1704 parentlock=parentlock)
1704 return l
1705 return l
1705
1706
1706 def _afterlock(self, callback):
1707 def _afterlock(self, callback):
1707 """add a callback to be run when the repository is fully unlocked
1708 """add a callback to be run when the repository is fully unlocked
1708
1709
1709 The callback will be executed when the outermost lock is released
1710 The callback will be executed when the outermost lock is released
1710 (with wlock being higher level than 'lock')."""
1711 (with wlock being higher level than 'lock')."""
1711 for ref in (self._wlockref, self._lockref):
1712 for ref in (self._wlockref, self._lockref):
1712 l = ref and ref()
1713 l = ref and ref()
1713 if l and l.held:
1714 if l and l.held:
1714 l.postrelease.append(callback)
1715 l.postrelease.append(callback)
1715 break
1716 break
1716 else: # no lock have been found.
1717 else: # no lock have been found.
1717 callback()
1718 callback()
1718
1719
1719 def lock(self, wait=True):
1720 def lock(self, wait=True):
1720 '''Lock the repository store (.hg/store) and return a weak reference
1721 '''Lock the repository store (.hg/store) and return a weak reference
1721 to the lock. Use this before modifying the store (e.g. committing or
1722 to the lock. Use this before modifying the store (e.g. committing or
1722 stripping). If you are opening a transaction, get a lock as well.)
1723 stripping). If you are opening a transaction, get a lock as well.)
1723
1724
1724 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1725 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1725 'wlock' first to avoid a dead-lock hazard.'''
1726 'wlock' first to avoid a dead-lock hazard.'''
1726 l = self._currentlock(self._lockref)
1727 l = self._currentlock(self._lockref)
1727 if l is not None:
1728 if l is not None:
1728 l.lock()
1729 l.lock()
1729 return l
1730 return l
1730
1731
1731 l = self._lock(self.svfs, "lock", wait, None,
1732 l = self._lock(self.svfs, "lock", wait, None,
1732 self.invalidate, _('repository %s') % self.origroot)
1733 self.invalidate, _('repository %s') % self.origroot)
1733 self._lockref = weakref.ref(l)
1734 self._lockref = weakref.ref(l)
1734 return l
1735 return l
1735
1736
1736 def _wlockchecktransaction(self):
1737 def _wlockchecktransaction(self):
1737 if self.currenttransaction() is not None:
1738 if self.currenttransaction() is not None:
1738 raise error.LockInheritanceContractViolation(
1739 raise error.LockInheritanceContractViolation(
1739 'wlock cannot be inherited in the middle of a transaction')
1740 'wlock cannot be inherited in the middle of a transaction')
1740
1741
1741 def wlock(self, wait=True):
1742 def wlock(self, wait=True):
1742 '''Lock the non-store parts of the repository (everything under
1743 '''Lock the non-store parts of the repository (everything under
1743 .hg except .hg/store) and return a weak reference to the lock.
1744 .hg except .hg/store) and return a weak reference to the lock.
1744
1745
1745 Use this before modifying files in .hg.
1746 Use this before modifying files in .hg.
1746
1747
1747 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1748 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1748 'wlock' first to avoid a dead-lock hazard.'''
1749 'wlock' first to avoid a dead-lock hazard.'''
1749 l = self._wlockref and self._wlockref()
1750 l = self._wlockref and self._wlockref()
1750 if l is not None and l.held:
1751 if l is not None and l.held:
1751 l.lock()
1752 l.lock()
1752 return l
1753 return l
1753
1754
1754 # We do not need to check for non-waiting lock acquisition. Such
1755 # We do not need to check for non-waiting lock acquisition. Such
1755 # acquisition would not cause dead-lock as they would just fail.
1756 # acquisition would not cause dead-lock as they would just fail.
1756 if wait and (self.ui.configbool('devel', 'all-warnings')
1757 if wait and (self.ui.configbool('devel', 'all-warnings')
1757 or self.ui.configbool('devel', 'check-locks')):
1758 or self.ui.configbool('devel', 'check-locks')):
1758 if self._currentlock(self._lockref) is not None:
1759 if self._currentlock(self._lockref) is not None:
1759 self.ui.develwarn('"wlock" acquired after "lock"')
1760 self.ui.develwarn('"wlock" acquired after "lock"')
1760
1761
1761 def unlock():
1762 def unlock():
1762 if self.dirstate.pendingparentchange():
1763 if self.dirstate.pendingparentchange():
1763 self.dirstate.invalidate()
1764 self.dirstate.invalidate()
1764 else:
1765 else:
1765 self.dirstate.write(None)
1766 self.dirstate.write(None)
1766
1767
1767 self._filecache['dirstate'].refresh()
1768 self._filecache['dirstate'].refresh()
1768
1769
1769 l = self._lock(self.vfs, "wlock", wait, unlock,
1770 l = self._lock(self.vfs, "wlock", wait, unlock,
1770 self.invalidatedirstate, _('working directory of %s') %
1771 self.invalidatedirstate, _('working directory of %s') %
1771 self.origroot,
1772 self.origroot,
1772 inheritchecker=self._wlockchecktransaction,
1773 inheritchecker=self._wlockchecktransaction,
1773 parentenvvar='HG_WLOCK_LOCKER')
1774 parentenvvar='HG_WLOCK_LOCKER')
1774 self._wlockref = weakref.ref(l)
1775 self._wlockref = weakref.ref(l)
1775 return l
1776 return l
1776
1777
1777 def _currentlock(self, lockref):
1778 def _currentlock(self, lockref):
1778 """Returns the lock if it's held, or None if it's not."""
1779 """Returns the lock if it's held, or None if it's not."""
1779 if lockref is None:
1780 if lockref is None:
1780 return None
1781 return None
1781 l = lockref()
1782 l = lockref()
1782 if l is None or not l.held:
1783 if l is None or not l.held:
1783 return None
1784 return None
1784 return l
1785 return l
1785
1786
1786 def currentwlock(self):
1787 def currentwlock(self):
1787 """Returns the wlock if it's held, or None if it's not."""
1788 """Returns the wlock if it's held, or None if it's not."""
1788 return self._currentlock(self._wlockref)
1789 return self._currentlock(self._wlockref)
1789
1790
1790 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1791 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1791 """
1792 """
1792 commit an individual file as part of a larger transaction
1793 commit an individual file as part of a larger transaction
1793 """
1794 """
1794
1795
1795 fname = fctx.path()
1796 fname = fctx.path()
1796 fparent1 = manifest1.get(fname, nullid)
1797 fparent1 = manifest1.get(fname, nullid)
1797 fparent2 = manifest2.get(fname, nullid)
1798 fparent2 = manifest2.get(fname, nullid)
1798 if isinstance(fctx, context.filectx):
1799 if isinstance(fctx, context.filectx):
1799 node = fctx.filenode()
1800 node = fctx.filenode()
1800 if node in [fparent1, fparent2]:
1801 if node in [fparent1, fparent2]:
1801 self.ui.debug('reusing %s filelog entry\n' % fname)
1802 self.ui.debug('reusing %s filelog entry\n' % fname)
1802 if manifest1.flags(fname) != fctx.flags():
1803 if manifest1.flags(fname) != fctx.flags():
1803 changelist.append(fname)
1804 changelist.append(fname)
1804 return node
1805 return node
1805
1806
1806 flog = self.file(fname)
1807 flog = self.file(fname)
1807 meta = {}
1808 meta = {}
1808 copy = fctx.renamed()
1809 copy = fctx.renamed()
1809 if copy and copy[0] != fname:
1810 if copy and copy[0] != fname:
1810 # Mark the new revision of this file as a copy of another
1811 # Mark the new revision of this file as a copy of another
1811 # file. This copy data will effectively act as a parent
1812 # file. This copy data will effectively act as a parent
1812 # of this new revision. If this is a merge, the first
1813 # of this new revision. If this is a merge, the first
1813 # parent will be the nullid (meaning "look up the copy data")
1814 # parent will be the nullid (meaning "look up the copy data")
1814 # and the second one will be the other parent. For example:
1815 # and the second one will be the other parent. For example:
1815 #
1816 #
1816 # 0 --- 1 --- 3 rev1 changes file foo
1817 # 0 --- 1 --- 3 rev1 changes file foo
1817 # \ / rev2 renames foo to bar and changes it
1818 # \ / rev2 renames foo to bar and changes it
1818 # \- 2 -/ rev3 should have bar with all changes and
1819 # \- 2 -/ rev3 should have bar with all changes and
1819 # should record that bar descends from
1820 # should record that bar descends from
1820 # bar in rev2 and foo in rev1
1821 # bar in rev2 and foo in rev1
1821 #
1822 #
1822 # this allows this merge to succeed:
1823 # this allows this merge to succeed:
1823 #
1824 #
1824 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1825 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1825 # \ / merging rev3 and rev4 should use bar@rev2
1826 # \ / merging rev3 and rev4 should use bar@rev2
1826 # \- 2 --- 4 as the merge base
1827 # \- 2 --- 4 as the merge base
1827 #
1828 #
1828
1829
1829 cfname = copy[0]
1830 cfname = copy[0]
1830 crev = manifest1.get(cfname)
1831 crev = manifest1.get(cfname)
1831 newfparent = fparent2
1832 newfparent = fparent2
1832
1833
1833 if manifest2: # branch merge
1834 if manifest2: # branch merge
1834 if fparent2 == nullid or crev is None: # copied on remote side
1835 if fparent2 == nullid or crev is None: # copied on remote side
1835 if cfname in manifest2:
1836 if cfname in manifest2:
1836 crev = manifest2[cfname]
1837 crev = manifest2[cfname]
1837 newfparent = fparent1
1838 newfparent = fparent1
1838
1839
1839 # Here, we used to search backwards through history to try to find
1840 # Here, we used to search backwards through history to try to find
1840 # where the file copy came from if the source of a copy was not in
1841 # where the file copy came from if the source of a copy was not in
1841 # the parent directory. However, this doesn't actually make sense to
1842 # the parent directory. However, this doesn't actually make sense to
1842 # do (what does a copy from something not in your working copy even
1843 # do (what does a copy from something not in your working copy even
1843 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1844 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1844 # the user that copy information was dropped, so if they didn't
1845 # the user that copy information was dropped, so if they didn't
1845 # expect this outcome it can be fixed, but this is the correct
1846 # expect this outcome it can be fixed, but this is the correct
1846 # behavior in this circumstance.
1847 # behavior in this circumstance.
1847
1848
1848 if crev:
1849 if crev:
1849 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1850 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1850 meta["copy"] = cfname
1851 meta["copy"] = cfname
1851 meta["copyrev"] = hex(crev)
1852 meta["copyrev"] = hex(crev)
1852 fparent1, fparent2 = nullid, newfparent
1853 fparent1, fparent2 = nullid, newfparent
1853 else:
1854 else:
1854 self.ui.warn(_("warning: can't find ancestor for '%s' "
1855 self.ui.warn(_("warning: can't find ancestor for '%s' "
1855 "copied from '%s'!\n") % (fname, cfname))
1856 "copied from '%s'!\n") % (fname, cfname))
1856
1857
1857 elif fparent1 == nullid:
1858 elif fparent1 == nullid:
1858 fparent1, fparent2 = fparent2, nullid
1859 fparent1, fparent2 = fparent2, nullid
1859 elif fparent2 != nullid:
1860 elif fparent2 != nullid:
1860 # is one parent an ancestor of the other?
1861 # is one parent an ancestor of the other?
1861 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1862 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1862 if fparent1 in fparentancestors:
1863 if fparent1 in fparentancestors:
1863 fparent1, fparent2 = fparent2, nullid
1864 fparent1, fparent2 = fparent2, nullid
1864 elif fparent2 in fparentancestors:
1865 elif fparent2 in fparentancestors:
1865 fparent2 = nullid
1866 fparent2 = nullid
1866
1867
1867 # is the file changed?
1868 # is the file changed?
1868 text = fctx.data()
1869 text = fctx.data()
1869 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1870 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1870 changelist.append(fname)
1871 changelist.append(fname)
1871 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1872 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1872 # are just the flags changed during merge?
1873 # are just the flags changed during merge?
1873 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1874 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1874 changelist.append(fname)
1875 changelist.append(fname)
1875
1876
1876 return fparent1
1877 return fparent1
1877
1878
1878 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1879 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1879 """check for commit arguments that aren't committable"""
1880 """check for commit arguments that aren't committable"""
1880 if match.isexact() or match.prefix():
1881 if match.isexact() or match.prefix():
1881 matched = set(status.modified + status.added + status.removed)
1882 matched = set(status.modified + status.added + status.removed)
1882
1883
1883 for f in match.files():
1884 for f in match.files():
1884 f = self.dirstate.normalize(f)
1885 f = self.dirstate.normalize(f)
1885 if f == '.' or f in matched or f in wctx.substate:
1886 if f == '.' or f in matched or f in wctx.substate:
1886 continue
1887 continue
1887 if f in status.deleted:
1888 if f in status.deleted:
1888 fail(f, _('file not found!'))
1889 fail(f, _('file not found!'))
1889 if f in vdirs: # visited directory
1890 if f in vdirs: # visited directory
1890 d = f + '/'
1891 d = f + '/'
1891 for mf in matched:
1892 for mf in matched:
1892 if mf.startswith(d):
1893 if mf.startswith(d):
1893 break
1894 break
1894 else:
1895 else:
1895 fail(f, _("no match under directory!"))
1896 fail(f, _("no match under directory!"))
1896 elif f not in self.dirstate:
1897 elif f not in self.dirstate:
1897 fail(f, _("file not tracked!"))
1898 fail(f, _("file not tracked!"))
1898
1899
1899 @unfilteredmethod
1900 @unfilteredmethod
1900 def commit(self, text="", user=None, date=None, match=None, force=False,
1901 def commit(self, text="", user=None, date=None, match=None, force=False,
1901 editor=False, extra=None):
1902 editor=False, extra=None):
1902 """Add a new revision to current repository.
1903 """Add a new revision to current repository.
1903
1904
1904 Revision information is gathered from the working directory,
1905 Revision information is gathered from the working directory,
1905 match can be used to filter the committed files. If editor is
1906 match can be used to filter the committed files. If editor is
1906 supplied, it is called to get a commit message.
1907 supplied, it is called to get a commit message.
1907 """
1908 """
1908 if extra is None:
1909 if extra is None:
1909 extra = {}
1910 extra = {}
1910
1911
1911 def fail(f, msg):
1912 def fail(f, msg):
1912 raise error.Abort('%s: %s' % (f, msg))
1913 raise error.Abort('%s: %s' % (f, msg))
1913
1914
1914 if not match:
1915 if not match:
1915 match = matchmod.always(self.root, '')
1916 match = matchmod.always(self.root, '')
1916
1917
1917 if not force:
1918 if not force:
1918 vdirs = []
1919 vdirs = []
1919 match.explicitdir = vdirs.append
1920 match.explicitdir = vdirs.append
1920 match.bad = fail
1921 match.bad = fail
1921
1922
1922 wlock = lock = tr = None
1923 wlock = lock = tr = None
1923 try:
1924 try:
1924 wlock = self.wlock()
1925 wlock = self.wlock()
1925 lock = self.lock() # for recent changelog (see issue4368)
1926 lock = self.lock() # for recent changelog (see issue4368)
1926
1927
1927 wctx = self[None]
1928 wctx = self[None]
1928 merge = len(wctx.parents()) > 1
1929 merge = len(wctx.parents()) > 1
1929
1930
1930 if not force and merge and not match.always():
1931 if not force and merge and not match.always():
1931 raise error.Abort(_('cannot partially commit a merge '
1932 raise error.Abort(_('cannot partially commit a merge '
1932 '(do not specify files or patterns)'))
1933 '(do not specify files or patterns)'))
1933
1934
1934 status = self.status(match=match, clean=force)
1935 status = self.status(match=match, clean=force)
1935 if force:
1936 if force:
1936 status.modified.extend(status.clean) # mq may commit clean files
1937 status.modified.extend(status.clean) # mq may commit clean files
1937
1938
1938 # check subrepos
1939 # check subrepos
1939 subs, commitsubs, newstate = subrepoutil.precommit(
1940 subs, commitsubs, newstate = subrepoutil.precommit(
1940 self.ui, wctx, status, match, force=force)
1941 self.ui, wctx, status, match, force=force)
1941
1942
1942 # make sure all explicit patterns are matched
1943 # make sure all explicit patterns are matched
1943 if not force:
1944 if not force:
1944 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1945 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1945
1946
1946 cctx = context.workingcommitctx(self, status,
1947 cctx = context.workingcommitctx(self, status,
1947 text, user, date, extra)
1948 text, user, date, extra)
1948
1949
1949 # internal config: ui.allowemptycommit
1950 # internal config: ui.allowemptycommit
1950 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1951 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1951 or extra.get('close') or merge or cctx.files()
1952 or extra.get('close') or merge or cctx.files()
1952 or self.ui.configbool('ui', 'allowemptycommit'))
1953 or self.ui.configbool('ui', 'allowemptycommit'))
1953 if not allowemptycommit:
1954 if not allowemptycommit:
1954 return None
1955 return None
1955
1956
1956 if merge and cctx.deleted():
1957 if merge and cctx.deleted():
1957 raise error.Abort(_("cannot commit merge with missing files"))
1958 raise error.Abort(_("cannot commit merge with missing files"))
1958
1959
1959 ms = mergemod.mergestate.read(self)
1960 ms = mergemod.mergestate.read(self)
1960 mergeutil.checkunresolved(ms)
1961 mergeutil.checkunresolved(ms)
1961
1962
1962 if editor:
1963 if editor:
1963 cctx._text = editor(self, cctx, subs)
1964 cctx._text = editor(self, cctx, subs)
1964 edited = (text != cctx._text)
1965 edited = (text != cctx._text)
1965
1966
1966 # Save commit message in case this transaction gets rolled back
1967 # Save commit message in case this transaction gets rolled back
1967 # (e.g. by a pretxncommit hook). Leave the content alone on
1968 # (e.g. by a pretxncommit hook). Leave the content alone on
1968 # the assumption that the user will use the same editor again.
1969 # the assumption that the user will use the same editor again.
1969 msgfn = self.savecommitmessage(cctx._text)
1970 msgfn = self.savecommitmessage(cctx._text)
1970
1971
1971 # commit subs and write new state
1972 # commit subs and write new state
1972 if subs:
1973 if subs:
1973 for s in sorted(commitsubs):
1974 for s in sorted(commitsubs):
1974 sub = wctx.sub(s)
1975 sub = wctx.sub(s)
1975 self.ui.status(_('committing subrepository %s\n') %
1976 self.ui.status(_('committing subrepository %s\n') %
1976 subrepoutil.subrelpath(sub))
1977 subrepoutil.subrelpath(sub))
1977 sr = sub.commit(cctx._text, user, date)
1978 sr = sub.commit(cctx._text, user, date)
1978 newstate[s] = (newstate[s][0], sr)
1979 newstate[s] = (newstate[s][0], sr)
1979 subrepoutil.writestate(self, newstate)
1980 subrepoutil.writestate(self, newstate)
1980
1981
1981 p1, p2 = self.dirstate.parents()
1982 p1, p2 = self.dirstate.parents()
1982 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1983 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1983 try:
1984 try:
1984 self.hook("precommit", throw=True, parent1=hookp1,
1985 self.hook("precommit", throw=True, parent1=hookp1,
1985 parent2=hookp2)
1986 parent2=hookp2)
1986 tr = self.transaction('commit')
1987 tr = self.transaction('commit')
1987 ret = self.commitctx(cctx, True)
1988 ret = self.commitctx(cctx, True)
1988 except: # re-raises
1989 except: # re-raises
1989 if edited:
1990 if edited:
1990 self.ui.write(
1991 self.ui.write(
1991 _('note: commit message saved in %s\n') % msgfn)
1992 _('note: commit message saved in %s\n') % msgfn)
1992 raise
1993 raise
1993 # update bookmarks, dirstate and mergestate
1994 # update bookmarks, dirstate and mergestate
1994 bookmarks.update(self, [p1, p2], ret)
1995 bookmarks.update(self, [p1, p2], ret)
1995 cctx.markcommitted(ret)
1996 cctx.markcommitted(ret)
1996 ms.reset()
1997 ms.reset()
1997 tr.close()
1998 tr.close()
1998
1999
1999 finally:
2000 finally:
2000 lockmod.release(tr, lock, wlock)
2001 lockmod.release(tr, lock, wlock)
2001
2002
2002 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2003 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2003 # hack for command that use a temporary commit (eg: histedit)
2004 # hack for command that use a temporary commit (eg: histedit)
2004 # temporary commit got stripped before hook release
2005 # temporary commit got stripped before hook release
2005 if self.changelog.hasnode(ret):
2006 if self.changelog.hasnode(ret):
2006 self.hook("commit", node=node, parent1=parent1,
2007 self.hook("commit", node=node, parent1=parent1,
2007 parent2=parent2)
2008 parent2=parent2)
2008 self._afterlock(commithook)
2009 self._afterlock(commithook)
2009 return ret
2010 return ret
2010
2011
2011 @unfilteredmethod
2012 @unfilteredmethod
2012 def commitctx(self, ctx, error=False):
2013 def commitctx(self, ctx, error=False):
2013 """Add a new revision to current repository.
2014 """Add a new revision to current repository.
2014 Revision information is passed via the context argument.
2015 Revision information is passed via the context argument.
2015 """
2016 """
2016
2017
2017 tr = None
2018 tr = None
2018 p1, p2 = ctx.p1(), ctx.p2()
2019 p1, p2 = ctx.p1(), ctx.p2()
2019 user = ctx.user()
2020 user = ctx.user()
2020
2021
2021 lock = self.lock()
2022 lock = self.lock()
2022 try:
2023 try:
2023 tr = self.transaction("commit")
2024 tr = self.transaction("commit")
2024 trp = weakref.proxy(tr)
2025 trp = weakref.proxy(tr)
2025
2026
2026 if ctx.manifestnode():
2027 if ctx.manifestnode():
2027 # reuse an existing manifest revision
2028 # reuse an existing manifest revision
2028 mn = ctx.manifestnode()
2029 mn = ctx.manifestnode()
2029 files = ctx.files()
2030 files = ctx.files()
2030 elif ctx.files():
2031 elif ctx.files():
2031 m1ctx = p1.manifestctx()
2032 m1ctx = p1.manifestctx()
2032 m2ctx = p2.manifestctx()
2033 m2ctx = p2.manifestctx()
2033 mctx = m1ctx.copy()
2034 mctx = m1ctx.copy()
2034
2035
2035 m = mctx.read()
2036 m = mctx.read()
2036 m1 = m1ctx.read()
2037 m1 = m1ctx.read()
2037 m2 = m2ctx.read()
2038 m2 = m2ctx.read()
2038
2039
2039 # check in files
2040 # check in files
2040 added = []
2041 added = []
2041 changed = []
2042 changed = []
2042 removed = list(ctx.removed())
2043 removed = list(ctx.removed())
2043 linkrev = len(self)
2044 linkrev = len(self)
2044 self.ui.note(_("committing files:\n"))
2045 self.ui.note(_("committing files:\n"))
2045 for f in sorted(ctx.modified() + ctx.added()):
2046 for f in sorted(ctx.modified() + ctx.added()):
2046 self.ui.note(f + "\n")
2047 self.ui.note(f + "\n")
2047 try:
2048 try:
2048 fctx = ctx[f]
2049 fctx = ctx[f]
2049 if fctx is None:
2050 if fctx is None:
2050 removed.append(f)
2051 removed.append(f)
2051 else:
2052 else:
2052 added.append(f)
2053 added.append(f)
2053 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2054 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2054 trp, changed)
2055 trp, changed)
2055 m.setflag(f, fctx.flags())
2056 m.setflag(f, fctx.flags())
2056 except OSError as inst:
2057 except OSError as inst:
2057 self.ui.warn(_("trouble committing %s!\n") % f)
2058 self.ui.warn(_("trouble committing %s!\n") % f)
2058 raise
2059 raise
2059 except IOError as inst:
2060 except IOError as inst:
2060 errcode = getattr(inst, 'errno', errno.ENOENT)
2061 errcode = getattr(inst, 'errno', errno.ENOENT)
2061 if error or errcode and errcode != errno.ENOENT:
2062 if error or errcode and errcode != errno.ENOENT:
2062 self.ui.warn(_("trouble committing %s!\n") % f)
2063 self.ui.warn(_("trouble committing %s!\n") % f)
2063 raise
2064 raise
2064
2065
2065 # update manifest
2066 # update manifest
2066 self.ui.note(_("committing manifest\n"))
2067 self.ui.note(_("committing manifest\n"))
2067 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2068 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2068 drop = [f for f in removed if f in m]
2069 drop = [f for f in removed if f in m]
2069 for f in drop:
2070 for f in drop:
2070 del m[f]
2071 del m[f]
2071 mn = mctx.write(trp, linkrev,
2072 mn = mctx.write(trp, linkrev,
2072 p1.manifestnode(), p2.manifestnode(),
2073 p1.manifestnode(), p2.manifestnode(),
2073 added, drop)
2074 added, drop)
2074 files = changed + removed
2075 files = changed + removed
2075 else:
2076 else:
2076 mn = p1.manifestnode()
2077 mn = p1.manifestnode()
2077 files = []
2078 files = []
2078
2079
2079 # update changelog
2080 # update changelog
2080 self.ui.note(_("committing changelog\n"))
2081 self.ui.note(_("committing changelog\n"))
2081 self.changelog.delayupdate(tr)
2082 self.changelog.delayupdate(tr)
2082 n = self.changelog.add(mn, files, ctx.description(),
2083 n = self.changelog.add(mn, files, ctx.description(),
2083 trp, p1.node(), p2.node(),
2084 trp, p1.node(), p2.node(),
2084 user, ctx.date(), ctx.extra().copy())
2085 user, ctx.date(), ctx.extra().copy())
2085 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2086 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2086 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2087 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2087 parent2=xp2)
2088 parent2=xp2)
2088 # set the new commit is proper phase
2089 # set the new commit is proper phase
2089 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2090 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2090 if targetphase:
2091 if targetphase:
2091 # retract boundary do not alter parent changeset.
2092 # retract boundary do not alter parent changeset.
2092 # if a parent have higher the resulting phase will
2093 # if a parent have higher the resulting phase will
2093 # be compliant anyway
2094 # be compliant anyway
2094 #
2095 #
2095 # if minimal phase was 0 we don't need to retract anything
2096 # if minimal phase was 0 we don't need to retract anything
2096 phases.registernew(self, tr, targetphase, [n])
2097 phases.registernew(self, tr, targetphase, [n])
2097 tr.close()
2098 tr.close()
2098 return n
2099 return n
2099 finally:
2100 finally:
2100 if tr:
2101 if tr:
2101 tr.release()
2102 tr.release()
2102 lock.release()
2103 lock.release()
2103
2104
2104 @unfilteredmethod
2105 @unfilteredmethod
2105 def destroying(self):
2106 def destroying(self):
2106 '''Inform the repository that nodes are about to be destroyed.
2107 '''Inform the repository that nodes are about to be destroyed.
2107 Intended for use by strip and rollback, so there's a common
2108 Intended for use by strip and rollback, so there's a common
2108 place for anything that has to be done before destroying history.
2109 place for anything that has to be done before destroying history.
2109
2110
2110 This is mostly useful for saving state that is in memory and waiting
2111 This is mostly useful for saving state that is in memory and waiting
2111 to be flushed when the current lock is released. Because a call to
2112 to be flushed when the current lock is released. Because a call to
2112 destroyed is imminent, the repo will be invalidated causing those
2113 destroyed is imminent, the repo will be invalidated causing those
2113 changes to stay in memory (waiting for the next unlock), or vanish
2114 changes to stay in memory (waiting for the next unlock), or vanish
2114 completely.
2115 completely.
2115 '''
2116 '''
2116 # When using the same lock to commit and strip, the phasecache is left
2117 # When using the same lock to commit and strip, the phasecache is left
2117 # dirty after committing. Then when we strip, the repo is invalidated,
2118 # dirty after committing. Then when we strip, the repo is invalidated,
2118 # causing those changes to disappear.
2119 # causing those changes to disappear.
2119 if '_phasecache' in vars(self):
2120 if '_phasecache' in vars(self):
2120 self._phasecache.write()
2121 self._phasecache.write()
2121
2122
2122 @unfilteredmethod
2123 @unfilteredmethod
2123 def destroyed(self):
2124 def destroyed(self):
2124 '''Inform the repository that nodes have been destroyed.
2125 '''Inform the repository that nodes have been destroyed.
2125 Intended for use by strip and rollback, so there's a common
2126 Intended for use by strip and rollback, so there's a common
2126 place for anything that has to be done after destroying history.
2127 place for anything that has to be done after destroying history.
2127 '''
2128 '''
2128 # When one tries to:
2129 # When one tries to:
2129 # 1) destroy nodes thus calling this method (e.g. strip)
2130 # 1) destroy nodes thus calling this method (e.g. strip)
2130 # 2) use phasecache somewhere (e.g. commit)
2131 # 2) use phasecache somewhere (e.g. commit)
2131 #
2132 #
2132 # then 2) will fail because the phasecache contains nodes that were
2133 # then 2) will fail because the phasecache contains nodes that were
2133 # removed. We can either remove phasecache from the filecache,
2134 # removed. We can either remove phasecache from the filecache,
2134 # causing it to reload next time it is accessed, or simply filter
2135 # causing it to reload next time it is accessed, or simply filter
2135 # the removed nodes now and write the updated cache.
2136 # the removed nodes now and write the updated cache.
2136 self._phasecache.filterunknown(self)
2137 self._phasecache.filterunknown(self)
2137 self._phasecache.write()
2138 self._phasecache.write()
2138
2139
2139 # refresh all repository caches
2140 # refresh all repository caches
2140 self.updatecaches()
2141 self.updatecaches()
2141
2142
2142 # Ensure the persistent tag cache is updated. Doing it now
2143 # Ensure the persistent tag cache is updated. Doing it now
2143 # means that the tag cache only has to worry about destroyed
2144 # means that the tag cache only has to worry about destroyed
2144 # heads immediately after a strip/rollback. That in turn
2145 # heads immediately after a strip/rollback. That in turn
2145 # guarantees that "cachetip == currenttip" (comparing both rev
2146 # guarantees that "cachetip == currenttip" (comparing both rev
2146 # and node) always means no nodes have been added or destroyed.
2147 # and node) always means no nodes have been added or destroyed.
2147
2148
2148 # XXX this is suboptimal when qrefresh'ing: we strip the current
2149 # XXX this is suboptimal when qrefresh'ing: we strip the current
2149 # head, refresh the tag cache, then immediately add a new head.
2150 # head, refresh the tag cache, then immediately add a new head.
2150 # But I think doing it this way is necessary for the "instant
2151 # But I think doing it this way is necessary for the "instant
2151 # tag cache retrieval" case to work.
2152 # tag cache retrieval" case to work.
2152 self.invalidate()
2153 self.invalidate()
2153
2154
2154 def status(self, node1='.', node2=None, match=None,
2155 def status(self, node1='.', node2=None, match=None,
2155 ignored=False, clean=False, unknown=False,
2156 ignored=False, clean=False, unknown=False,
2156 listsubrepos=False):
2157 listsubrepos=False):
2157 '''a convenience method that calls node1.status(node2)'''
2158 '''a convenience method that calls node1.status(node2)'''
2158 return self[node1].status(node2, match, ignored, clean, unknown,
2159 return self[node1].status(node2, match, ignored, clean, unknown,
2159 listsubrepos)
2160 listsubrepos)
2160
2161
2161 def addpostdsstatus(self, ps):
2162 def addpostdsstatus(self, ps):
2162 """Add a callback to run within the wlock, at the point at which status
2163 """Add a callback to run within the wlock, at the point at which status
2163 fixups happen.
2164 fixups happen.
2164
2165
2165 On status completion, callback(wctx, status) will be called with the
2166 On status completion, callback(wctx, status) will be called with the
2166 wlock held, unless the dirstate has changed from underneath or the wlock
2167 wlock held, unless the dirstate has changed from underneath or the wlock
2167 couldn't be grabbed.
2168 couldn't be grabbed.
2168
2169
2169 Callbacks should not capture and use a cached copy of the dirstate --
2170 Callbacks should not capture and use a cached copy of the dirstate --
2170 it might change in the meanwhile. Instead, they should access the
2171 it might change in the meanwhile. Instead, they should access the
2171 dirstate via wctx.repo().dirstate.
2172 dirstate via wctx.repo().dirstate.
2172
2173
2173 This list is emptied out after each status run -- extensions should
2174 This list is emptied out after each status run -- extensions should
2174 make sure it adds to this list each time dirstate.status is called.
2175 make sure it adds to this list each time dirstate.status is called.
2175 Extensions should also make sure they don't call this for statuses
2176 Extensions should also make sure they don't call this for statuses
2176 that don't involve the dirstate.
2177 that don't involve the dirstate.
2177 """
2178 """
2178
2179
2179 # The list is located here for uniqueness reasons -- it is actually
2180 # The list is located here for uniqueness reasons -- it is actually
2180 # managed by the workingctx, but that isn't unique per-repo.
2181 # managed by the workingctx, but that isn't unique per-repo.
2181 self._postdsstatus.append(ps)
2182 self._postdsstatus.append(ps)
2182
2183
2183 def postdsstatus(self):
2184 def postdsstatus(self):
2184 """Used by workingctx to get the list of post-dirstate-status hooks."""
2185 """Used by workingctx to get the list of post-dirstate-status hooks."""
2185 return self._postdsstatus
2186 return self._postdsstatus
2186
2187
2187 def clearpostdsstatus(self):
2188 def clearpostdsstatus(self):
2188 """Used by workingctx to clear post-dirstate-status hooks."""
2189 """Used by workingctx to clear post-dirstate-status hooks."""
2189 del self._postdsstatus[:]
2190 del self._postdsstatus[:]
2190
2191
2191 def heads(self, start=None):
2192 def heads(self, start=None):
2192 if start is None:
2193 if start is None:
2193 cl = self.changelog
2194 cl = self.changelog
2194 headrevs = reversed(cl.headrevs())
2195 headrevs = reversed(cl.headrevs())
2195 return [cl.node(rev) for rev in headrevs]
2196 return [cl.node(rev) for rev in headrevs]
2196
2197
2197 heads = self.changelog.heads(start)
2198 heads = self.changelog.heads(start)
2198 # sort the output in rev descending order
2199 # sort the output in rev descending order
2199 return sorted(heads, key=self.changelog.rev, reverse=True)
2200 return sorted(heads, key=self.changelog.rev, reverse=True)
2200
2201
2201 def branchheads(self, branch=None, start=None, closed=False):
2202 def branchheads(self, branch=None, start=None, closed=False):
2202 '''return a (possibly filtered) list of heads for the given branch
2203 '''return a (possibly filtered) list of heads for the given branch
2203
2204
2204 Heads are returned in topological order, from newest to oldest.
2205 Heads are returned in topological order, from newest to oldest.
2205 If branch is None, use the dirstate branch.
2206 If branch is None, use the dirstate branch.
2206 If start is not None, return only heads reachable from start.
2207 If start is not None, return only heads reachable from start.
2207 If closed is True, return heads that are marked as closed as well.
2208 If closed is True, return heads that are marked as closed as well.
2208 '''
2209 '''
2209 if branch is None:
2210 if branch is None:
2210 branch = self[None].branch()
2211 branch = self[None].branch()
2211 branches = self.branchmap()
2212 branches = self.branchmap()
2212 if branch not in branches:
2213 if branch not in branches:
2213 return []
2214 return []
2214 # the cache returns heads ordered lowest to highest
2215 # the cache returns heads ordered lowest to highest
2215 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2216 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2216 if start is not None:
2217 if start is not None:
2217 # filter out the heads that cannot be reached from startrev
2218 # filter out the heads that cannot be reached from startrev
2218 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2219 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2219 bheads = [h for h in bheads if h in fbheads]
2220 bheads = [h for h in bheads if h in fbheads]
2220 return bheads
2221 return bheads
2221
2222
2222 def branches(self, nodes):
2223 def branches(self, nodes):
2223 if not nodes:
2224 if not nodes:
2224 nodes = [self.changelog.tip()]
2225 nodes = [self.changelog.tip()]
2225 b = []
2226 b = []
2226 for n in nodes:
2227 for n in nodes:
2227 t = n
2228 t = n
2228 while True:
2229 while True:
2229 p = self.changelog.parents(n)
2230 p = self.changelog.parents(n)
2230 if p[1] != nullid or p[0] == nullid:
2231 if p[1] != nullid or p[0] == nullid:
2231 b.append((t, n, p[0], p[1]))
2232 b.append((t, n, p[0], p[1]))
2232 break
2233 break
2233 n = p[0]
2234 n = p[0]
2234 return b
2235 return b
2235
2236
2236 def between(self, pairs):
2237 def between(self, pairs):
2237 r = []
2238 r = []
2238
2239
2239 for top, bottom in pairs:
2240 for top, bottom in pairs:
2240 n, l, i = top, [], 0
2241 n, l, i = top, [], 0
2241 f = 1
2242 f = 1
2242
2243
2243 while n != bottom and n != nullid:
2244 while n != bottom and n != nullid:
2244 p = self.changelog.parents(n)[0]
2245 p = self.changelog.parents(n)[0]
2245 if i == f:
2246 if i == f:
2246 l.append(n)
2247 l.append(n)
2247 f = f * 2
2248 f = f * 2
2248 n = p
2249 n = p
2249 i += 1
2250 i += 1
2250
2251
2251 r.append(l)
2252 r.append(l)
2252
2253
2253 return r
2254 return r
2254
2255
2255 def checkpush(self, pushop):
2256 def checkpush(self, pushop):
2256 """Extensions can override this function if additional checks have
2257 """Extensions can override this function if additional checks have
2257 to be performed before pushing, or call it if they override push
2258 to be performed before pushing, or call it if they override push
2258 command.
2259 command.
2259 """
2260 """
2260
2261
2261 @unfilteredpropertycache
2262 @unfilteredpropertycache
2262 def prepushoutgoinghooks(self):
2263 def prepushoutgoinghooks(self):
2263 """Return util.hooks consists of a pushop with repo, remote, outgoing
2264 """Return util.hooks consists of a pushop with repo, remote, outgoing
2264 methods, which are called before pushing changesets.
2265 methods, which are called before pushing changesets.
2265 """
2266 """
2266 return util.hooks()
2267 return util.hooks()
2267
2268
2268 def pushkey(self, namespace, key, old, new):
2269 def pushkey(self, namespace, key, old, new):
2269 try:
2270 try:
2270 tr = self.currenttransaction()
2271 tr = self.currenttransaction()
2271 hookargs = {}
2272 hookargs = {}
2272 if tr is not None:
2273 if tr is not None:
2273 hookargs.update(tr.hookargs)
2274 hookargs.update(tr.hookargs)
2274 hookargs = pycompat.strkwargs(hookargs)
2275 hookargs = pycompat.strkwargs(hookargs)
2275 hookargs[r'namespace'] = namespace
2276 hookargs[r'namespace'] = namespace
2276 hookargs[r'key'] = key
2277 hookargs[r'key'] = key
2277 hookargs[r'old'] = old
2278 hookargs[r'old'] = old
2278 hookargs[r'new'] = new
2279 hookargs[r'new'] = new
2279 self.hook('prepushkey', throw=True, **hookargs)
2280 self.hook('prepushkey', throw=True, **hookargs)
2280 except error.HookAbort as exc:
2281 except error.HookAbort as exc:
2281 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2282 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2282 if exc.hint:
2283 if exc.hint:
2283 self.ui.write_err(_("(%s)\n") % exc.hint)
2284 self.ui.write_err(_("(%s)\n") % exc.hint)
2284 return False
2285 return False
2285 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2286 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2286 ret = pushkey.push(self, namespace, key, old, new)
2287 ret = pushkey.push(self, namespace, key, old, new)
2287 def runhook():
2288 def runhook():
2288 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2289 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2289 ret=ret)
2290 ret=ret)
2290 self._afterlock(runhook)
2291 self._afterlock(runhook)
2291 return ret
2292 return ret
2292
2293
2293 def listkeys(self, namespace):
2294 def listkeys(self, namespace):
2294 self.hook('prelistkeys', throw=True, namespace=namespace)
2295 self.hook('prelistkeys', throw=True, namespace=namespace)
2295 self.ui.debug('listing keys for "%s"\n' % namespace)
2296 self.ui.debug('listing keys for "%s"\n' % namespace)
2296 values = pushkey.list(self, namespace)
2297 values = pushkey.list(self, namespace)
2297 self.hook('listkeys', namespace=namespace, values=values)
2298 self.hook('listkeys', namespace=namespace, values=values)
2298 return values
2299 return values
2299
2300
2300 def debugwireargs(self, one, two, three=None, four=None, five=None):
2301 def debugwireargs(self, one, two, three=None, four=None, five=None):
2301 '''used to test argument passing over the wire'''
2302 '''used to test argument passing over the wire'''
2302 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2303 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2303 pycompat.bytestr(four),
2304 pycompat.bytestr(four),
2304 pycompat.bytestr(five))
2305 pycompat.bytestr(five))
2305
2306
2306 def savecommitmessage(self, text):
2307 def savecommitmessage(self, text):
2307 fp = self.vfs('last-message.txt', 'wb')
2308 fp = self.vfs('last-message.txt', 'wb')
2308 try:
2309 try:
2309 fp.write(text)
2310 fp.write(text)
2310 finally:
2311 finally:
2311 fp.close()
2312 fp.close()
2312 return self.pathto(fp.name[len(self.root) + 1:])
2313 return self.pathto(fp.name[len(self.root) + 1:])
2313
2314
2314 # used to avoid circular references so destructors work
2315 # used to avoid circular references so destructors work
2315 def aftertrans(files):
2316 def aftertrans(files):
2316 renamefiles = [tuple(t) for t in files]
2317 renamefiles = [tuple(t) for t in files]
2317 def a():
2318 def a():
2318 for vfs, src, dest in renamefiles:
2319 for vfs, src, dest in renamefiles:
2319 # if src and dest refer to a same file, vfs.rename is a no-op,
2320 # if src and dest refer to a same file, vfs.rename is a no-op,
2320 # leaving both src and dest on disk. delete dest to make sure
2321 # leaving both src and dest on disk. delete dest to make sure
2321 # the rename couldn't be such a no-op.
2322 # the rename couldn't be such a no-op.
2322 vfs.tryunlink(dest)
2323 vfs.tryunlink(dest)
2323 try:
2324 try:
2324 vfs.rename(src, dest)
2325 vfs.rename(src, dest)
2325 except OSError: # journal file does not yet exist
2326 except OSError: # journal file does not yet exist
2326 pass
2327 pass
2327 return a
2328 return a
2328
2329
2329 def undoname(fn):
2330 def undoname(fn):
2330 base, name = os.path.split(fn)
2331 base, name = os.path.split(fn)
2331 assert name.startswith('journal')
2332 assert name.startswith('journal')
2332 return os.path.join(base, name.replace('journal', 'undo', 1))
2333 return os.path.join(base, name.replace('journal', 'undo', 1))
2333
2334
2334 def instance(ui, path, create):
2335 def instance(ui, path, create):
2335 return localrepository(ui, util.urllocalpath(path), create)
2336 return localrepository(ui, util.urllocalpath(path), create)
2336
2337
2337 def islocal(path):
2338 def islocal(path):
2338 return True
2339 return True
2339
2340
2340 def newreporequirements(repo):
2341 def newreporequirements(repo):
2341 """Determine the set of requirements for a new local repository.
2342 """Determine the set of requirements for a new local repository.
2342
2343
2343 Extensions can wrap this function to specify custom requirements for
2344 Extensions can wrap this function to specify custom requirements for
2344 new repositories.
2345 new repositories.
2345 """
2346 """
2346 ui = repo.ui
2347 ui = repo.ui
2347 requirements = {'revlogv1'}
2348 requirements = {'revlogv1'}
2348 if ui.configbool('format', 'usestore'):
2349 if ui.configbool('format', 'usestore'):
2349 requirements.add('store')
2350 requirements.add('store')
2350 if ui.configbool('format', 'usefncache'):
2351 if ui.configbool('format', 'usefncache'):
2351 requirements.add('fncache')
2352 requirements.add('fncache')
2352 if ui.configbool('format', 'dotencode'):
2353 if ui.configbool('format', 'dotencode'):
2353 requirements.add('dotencode')
2354 requirements.add('dotencode')
2354
2355
2355 compengine = ui.config('experimental', 'format.compression')
2356 compengine = ui.config('experimental', 'format.compression')
2356 if compengine not in util.compengines:
2357 if compengine not in util.compengines:
2357 raise error.Abort(_('compression engine %s defined by '
2358 raise error.Abort(_('compression engine %s defined by '
2358 'experimental.format.compression not available') %
2359 'experimental.format.compression not available') %
2359 compengine,
2360 compengine,
2360 hint=_('run "hg debuginstall" to list available '
2361 hint=_('run "hg debuginstall" to list available '
2361 'compression engines'))
2362 'compression engines'))
2362
2363
2363 # zlib is the historical default and doesn't need an explicit requirement.
2364 # zlib is the historical default and doesn't need an explicit requirement.
2364 if compengine != 'zlib':
2365 if compengine != 'zlib':
2365 requirements.add('exp-compression-%s' % compengine)
2366 requirements.add('exp-compression-%s' % compengine)
2366
2367
2367 if scmutil.gdinitconfig(ui):
2368 if scmutil.gdinitconfig(ui):
2368 requirements.add('generaldelta')
2369 requirements.add('generaldelta')
2369 if ui.configbool('experimental', 'treemanifest'):
2370 if ui.configbool('experimental', 'treemanifest'):
2370 requirements.add('treemanifest')
2371 requirements.add('treemanifest')
2371
2372
2372 revlogv2 = ui.config('experimental', 'revlogv2')
2373 revlogv2 = ui.config('experimental', 'revlogv2')
2373 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2374 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2374 requirements.remove('revlogv1')
2375 requirements.remove('revlogv1')
2375 # generaldelta is implied by revlogv2.
2376 # generaldelta is implied by revlogv2.
2376 requirements.discard('generaldelta')
2377 requirements.discard('generaldelta')
2377 requirements.add(REVLOGV2_REQUIREMENT)
2378 requirements.add(REVLOGV2_REQUIREMENT)
2378
2379
2379 return requirements
2380 return requirements
@@ -1,2237 +1,2237 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 dagop,
14 dagop,
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 obsutil,
22 obsutil,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 pycompat,
25 pycompat,
26 registrar,
26 registrar,
27 repoview,
27 repoview,
28 revsetlang,
28 revsetlang,
29 scmutil,
29 scmutil,
30 smartset,
30 smartset,
31 stack as stackmod,
31 stack as stackmod,
32 util,
32 util,
33 )
33 )
34 from .utils import (
34 from .utils import (
35 dateutil,
35 dateutil,
36 stringutil,
36 stringutil,
37 )
37 )
38
38
39 # helpers for processing parsed tree
39 # helpers for processing parsed tree
40 getsymbol = revsetlang.getsymbol
40 getsymbol = revsetlang.getsymbol
41 getstring = revsetlang.getstring
41 getstring = revsetlang.getstring
42 getinteger = revsetlang.getinteger
42 getinteger = revsetlang.getinteger
43 getboolean = revsetlang.getboolean
43 getboolean = revsetlang.getboolean
44 getlist = revsetlang.getlist
44 getlist = revsetlang.getlist
45 getrange = revsetlang.getrange
45 getrange = revsetlang.getrange
46 getargs = revsetlang.getargs
46 getargs = revsetlang.getargs
47 getargsdict = revsetlang.getargsdict
47 getargsdict = revsetlang.getargsdict
48
48
49 baseset = smartset.baseset
49 baseset = smartset.baseset
50 generatorset = smartset.generatorset
50 generatorset = smartset.generatorset
51 spanset = smartset.spanset
51 spanset = smartset.spanset
52 fullreposet = smartset.fullreposet
52 fullreposet = smartset.fullreposet
53
53
54 # Constants for ordering requirement, used in getset():
54 # Constants for ordering requirement, used in getset():
55 #
55 #
56 # If 'define', any nested functions and operations MAY change the ordering of
56 # If 'define', any nested functions and operations MAY change the ordering of
57 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
57 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
58 # it). If 'follow', any nested functions and operations MUST take the ordering
58 # it). If 'follow', any nested functions and operations MUST take the ordering
59 # specified by the first operand to the '&' operator.
59 # specified by the first operand to the '&' operator.
60 #
60 #
61 # For instance,
61 # For instance,
62 #
62 #
63 # X & (Y | Z)
63 # X & (Y | Z)
64 # ^ ^^^^^^^
64 # ^ ^^^^^^^
65 # | follow
65 # | follow
66 # define
66 # define
67 #
67 #
68 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
68 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
69 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
69 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
70 #
70 #
71 # 'any' means the order doesn't matter. For instance,
71 # 'any' means the order doesn't matter. For instance,
72 #
72 #
73 # (X & !Y) | ancestors(Z)
73 # (X & !Y) | ancestors(Z)
74 # ^ ^
74 # ^ ^
75 # any any
75 # any any
76 #
76 #
77 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
77 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
78 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
78 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
79 # since 'ancestors' does not care about the order of its argument.
79 # since 'ancestors' does not care about the order of its argument.
80 #
80 #
81 # Currently, most revsets do not care about the order, so 'define' is
81 # Currently, most revsets do not care about the order, so 'define' is
82 # equivalent to 'follow' for them, and the resulting order is based on the
82 # equivalent to 'follow' for them, and the resulting order is based on the
83 # 'subset' parameter passed down to them:
83 # 'subset' parameter passed down to them:
84 #
84 #
85 # m = revset.match(...)
85 # m = revset.match(...)
86 # m(repo, subset, order=defineorder)
86 # m(repo, subset, order=defineorder)
87 # ^^^^^^
87 # ^^^^^^
88 # For most revsets, 'define' means using the order this subset provides
88 # For most revsets, 'define' means using the order this subset provides
89 #
89 #
90 # There are a few revsets that always redefine the order if 'define' is
90 # There are a few revsets that always redefine the order if 'define' is
91 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
91 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
92 anyorder = 'any' # don't care the order, could be even random-shuffled
92 anyorder = 'any' # don't care the order, could be even random-shuffled
93 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
93 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
94 followorder = 'follow' # MUST follow the current order
94 followorder = 'follow' # MUST follow the current order
95
95
96 # helpers
96 # helpers
97
97
98 def getset(repo, subset, x, order=defineorder):
98 def getset(repo, subset, x, order=defineorder):
99 if not x:
99 if not x:
100 raise error.ParseError(_("missing argument"))
100 raise error.ParseError(_("missing argument"))
101 return methods[x[0]](repo, subset, *x[1:], order=order)
101 return methods[x[0]](repo, subset, *x[1:], order=order)
102
102
103 def _getrevsource(repo, r):
103 def _getrevsource(repo, r):
104 extra = repo[r].extra()
104 extra = repo[r].extra()
105 for label in ('source', 'transplant_source', 'rebase_source'):
105 for label in ('source', 'transplant_source', 'rebase_source'):
106 if label in extra:
106 if label in extra:
107 try:
107 try:
108 return repo[extra[label]].rev()
108 return repo[extra[label]].rev()
109 except error.RepoLookupError:
109 except error.RepoLookupError:
110 pass
110 pass
111 return None
111 return None
112
112
113 def _sortedb(xs):
113 def _sortedb(xs):
114 return sorted(util.rapply(pycompat.maybebytestr, xs))
114 return sorted(util.rapply(pycompat.maybebytestr, xs))
115
115
116 # operator methods
116 # operator methods
117
117
118 def stringset(repo, subset, x, order):
118 def stringset(repo, subset, x, order):
119 if not x:
119 if not x:
120 raise error.ParseError(_("empty string is not a valid revision"))
120 raise error.ParseError(_("empty string is not a valid revision"))
121 x = scmutil.intrev(scmutil.revsymbol(repo, x))
121 x = scmutil.intrev(scmutil.revsymbol(repo, x))
122 if (x in subset
122 if (x in subset
123 or x == node.nullrev and isinstance(subset, fullreposet)):
123 or x == node.nullrev and isinstance(subset, fullreposet)):
124 return baseset([x])
124 return baseset([x])
125 return baseset()
125 return baseset()
126
126
127 def rangeset(repo, subset, x, y, order):
127 def rangeset(repo, subset, x, y, order):
128 m = getset(repo, fullreposet(repo), x)
128 m = getset(repo, fullreposet(repo), x)
129 n = getset(repo, fullreposet(repo), y)
129 n = getset(repo, fullreposet(repo), y)
130
130
131 if not m or not n:
131 if not m or not n:
132 return baseset()
132 return baseset()
133 return _makerangeset(repo, subset, m.first(), n.last(), order)
133 return _makerangeset(repo, subset, m.first(), n.last(), order)
134
134
135 def rangeall(repo, subset, x, order):
135 def rangeall(repo, subset, x, order):
136 assert x is None
136 assert x is None
137 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
137 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
138
138
139 def rangepre(repo, subset, y, order):
139 def rangepre(repo, subset, y, order):
140 # ':y' can't be rewritten to '0:y' since '0' may be hidden
140 # ':y' can't be rewritten to '0:y' since '0' may be hidden
141 n = getset(repo, fullreposet(repo), y)
141 n = getset(repo, fullreposet(repo), y)
142 if not n:
142 if not n:
143 return baseset()
143 return baseset()
144 return _makerangeset(repo, subset, 0, n.last(), order)
144 return _makerangeset(repo, subset, 0, n.last(), order)
145
145
146 def rangepost(repo, subset, x, order):
146 def rangepost(repo, subset, x, order):
147 m = getset(repo, fullreposet(repo), x)
147 m = getset(repo, fullreposet(repo), x)
148 if not m:
148 if not m:
149 return baseset()
149 return baseset()
150 return _makerangeset(repo, subset, m.first(), repo.changelog.tiprev(),
150 return _makerangeset(repo, subset, m.first(), repo.changelog.tiprev(),
151 order)
151 order)
152
152
153 def _makerangeset(repo, subset, m, n, order):
153 def _makerangeset(repo, subset, m, n, order):
154 if m == n:
154 if m == n:
155 r = baseset([m])
155 r = baseset([m])
156 elif n == node.wdirrev:
156 elif n == node.wdirrev:
157 r = spanset(repo, m, len(repo)) + baseset([n])
157 r = spanset(repo, m, len(repo)) + baseset([n])
158 elif m == node.wdirrev:
158 elif m == node.wdirrev:
159 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
159 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
160 elif m < n:
160 elif m < n:
161 r = spanset(repo, m, n + 1)
161 r = spanset(repo, m, n + 1)
162 else:
162 else:
163 r = spanset(repo, m, n - 1)
163 r = spanset(repo, m, n - 1)
164
164
165 if order == defineorder:
165 if order == defineorder:
166 return r & subset
166 return r & subset
167 else:
167 else:
168 # carrying the sorting over when possible would be more efficient
168 # carrying the sorting over when possible would be more efficient
169 return subset & r
169 return subset & r
170
170
171 def dagrange(repo, subset, x, y, order):
171 def dagrange(repo, subset, x, y, order):
172 r = fullreposet(repo)
172 r = fullreposet(repo)
173 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
173 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
174 includepath=True)
174 includepath=True)
175 return subset & xs
175 return subset & xs
176
176
177 def andset(repo, subset, x, y, order):
177 def andset(repo, subset, x, y, order):
178 if order == anyorder:
178 if order == anyorder:
179 yorder = anyorder
179 yorder = anyorder
180 else:
180 else:
181 yorder = followorder
181 yorder = followorder
182 return getset(repo, getset(repo, subset, x, order), y, yorder)
182 return getset(repo, getset(repo, subset, x, order), y, yorder)
183
183
184 def andsmallyset(repo, subset, x, y, order):
184 def andsmallyset(repo, subset, x, y, order):
185 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
185 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
186 if order == anyorder:
186 if order == anyorder:
187 yorder = anyorder
187 yorder = anyorder
188 else:
188 else:
189 yorder = followorder
189 yorder = followorder
190 return getset(repo, getset(repo, subset, y, yorder), x, order)
190 return getset(repo, getset(repo, subset, y, yorder), x, order)
191
191
192 def differenceset(repo, subset, x, y, order):
192 def differenceset(repo, subset, x, y, order):
193 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
193 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
194
194
195 def _orsetlist(repo, subset, xs, order):
195 def _orsetlist(repo, subset, xs, order):
196 assert xs
196 assert xs
197 if len(xs) == 1:
197 if len(xs) == 1:
198 return getset(repo, subset, xs[0], order)
198 return getset(repo, subset, xs[0], order)
199 p = len(xs) // 2
199 p = len(xs) // 2
200 a = _orsetlist(repo, subset, xs[:p], order)
200 a = _orsetlist(repo, subset, xs[:p], order)
201 b = _orsetlist(repo, subset, xs[p:], order)
201 b = _orsetlist(repo, subset, xs[p:], order)
202 return a + b
202 return a + b
203
203
204 def orset(repo, subset, x, order):
204 def orset(repo, subset, x, order):
205 xs = getlist(x)
205 xs = getlist(x)
206 if order == followorder:
206 if order == followorder:
207 # slow path to take the subset order
207 # slow path to take the subset order
208 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
208 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
209 else:
209 else:
210 return _orsetlist(repo, subset, xs, order)
210 return _orsetlist(repo, subset, xs, order)
211
211
212 def notset(repo, subset, x, order):
212 def notset(repo, subset, x, order):
213 return subset - getset(repo, subset, x, anyorder)
213 return subset - getset(repo, subset, x, anyorder)
214
214
215 def relationset(repo, subset, x, y, order):
215 def relationset(repo, subset, x, y, order):
216 raise error.ParseError(_("can't use a relation in this context"))
216 raise error.ParseError(_("can't use a relation in this context"))
217
217
218 def relsubscriptset(repo, subset, x, y, z, order):
218 def relsubscriptset(repo, subset, x, y, z, order):
219 # this is pretty basic implementation of 'x#y[z]' operator, still
219 # this is pretty basic implementation of 'x#y[z]' operator, still
220 # experimental so undocumented. see the wiki for further ideas.
220 # experimental so undocumented. see the wiki for further ideas.
221 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
221 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
222 rel = getsymbol(y)
222 rel = getsymbol(y)
223 n = getinteger(z, _("relation subscript must be an integer"))
223 n = getinteger(z, _("relation subscript must be an integer"))
224
224
225 # TODO: perhaps this should be a table of relation functions
225 # TODO: perhaps this should be a table of relation functions
226 if rel in ('g', 'generations'):
226 if rel in ('g', 'generations'):
227 # TODO: support range, rewrite tests, and drop startdepth argument
227 # TODO: support range, rewrite tests, and drop startdepth argument
228 # from ancestors() and descendants() predicates
228 # from ancestors() and descendants() predicates
229 if n <= 0:
229 if n <= 0:
230 n = -n
230 n = -n
231 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
231 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
232 else:
232 else:
233 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
233 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
234
234
235 raise error.UnknownIdentifier(rel, ['generations'])
235 raise error.UnknownIdentifier(rel, ['generations'])
236
236
237 def subscriptset(repo, subset, x, y, order):
237 def subscriptset(repo, subset, x, y, order):
238 raise error.ParseError(_("can't use a subscript in this context"))
238 raise error.ParseError(_("can't use a subscript in this context"))
239
239
240 def listset(repo, subset, *xs, **opts):
240 def listset(repo, subset, *xs, **opts):
241 raise error.ParseError(_("can't use a list in this context"),
241 raise error.ParseError(_("can't use a list in this context"),
242 hint=_('see hg help "revsets.x or y"'))
242 hint=_('see hg help "revsets.x or y"'))
243
243
244 def keyvaluepair(repo, subset, k, v, order):
244 def keyvaluepair(repo, subset, k, v, order):
245 raise error.ParseError(_("can't use a key-value pair in this context"))
245 raise error.ParseError(_("can't use a key-value pair in this context"))
246
246
247 def func(repo, subset, a, b, order):
247 def func(repo, subset, a, b, order):
248 f = getsymbol(a)
248 f = getsymbol(a)
249 if f in symbols:
249 if f in symbols:
250 func = symbols[f]
250 func = symbols[f]
251 if getattr(func, '_takeorder', False):
251 if getattr(func, '_takeorder', False):
252 return func(repo, subset, b, order)
252 return func(repo, subset, b, order)
253 return func(repo, subset, b)
253 return func(repo, subset, b)
254
254
255 keep = lambda fn: getattr(fn, '__doc__', None) is not None
255 keep = lambda fn: getattr(fn, '__doc__', None) is not None
256
256
257 syms = [s for (s, fn) in symbols.items() if keep(fn)]
257 syms = [s for (s, fn) in symbols.items() if keep(fn)]
258 raise error.UnknownIdentifier(f, syms)
258 raise error.UnknownIdentifier(f, syms)
259
259
260 # functions
260 # functions
261
261
262 # symbols are callables like:
262 # symbols are callables like:
263 # fn(repo, subset, x)
263 # fn(repo, subset, x)
264 # with:
264 # with:
265 # repo - current repository instance
265 # repo - current repository instance
266 # subset - of revisions to be examined
266 # subset - of revisions to be examined
267 # x - argument in tree form
267 # x - argument in tree form
268 symbols = revsetlang.symbols
268 symbols = revsetlang.symbols
269
269
270 # symbols which can't be used for a DoS attack for any given input
270 # symbols which can't be used for a DoS attack for any given input
271 # (e.g. those which accept regexes as plain strings shouldn't be included)
271 # (e.g. those which accept regexes as plain strings shouldn't be included)
272 # functions that just return a lot of changesets (like all) don't count here
272 # functions that just return a lot of changesets (like all) don't count here
273 safesymbols = set()
273 safesymbols = set()
274
274
275 predicate = registrar.revsetpredicate()
275 predicate = registrar.revsetpredicate()
276
276
277 @predicate('_destupdate')
277 @predicate('_destupdate')
278 def _destupdate(repo, subset, x):
278 def _destupdate(repo, subset, x):
279 # experimental revset for update destination
279 # experimental revset for update destination
280 args = getargsdict(x, 'limit', 'clean')
280 args = getargsdict(x, 'limit', 'clean')
281 return subset & baseset([destutil.destupdate(repo,
281 return subset & baseset([destutil.destupdate(repo,
282 **pycompat.strkwargs(args))[0]])
282 **pycompat.strkwargs(args))[0]])
283
283
284 @predicate('_destmerge')
284 @predicate('_destmerge')
285 def _destmerge(repo, subset, x):
285 def _destmerge(repo, subset, x):
286 # experimental revset for merge destination
286 # experimental revset for merge destination
287 sourceset = None
287 sourceset = None
288 if x is not None:
288 if x is not None:
289 sourceset = getset(repo, fullreposet(repo), x)
289 sourceset = getset(repo, fullreposet(repo), x)
290 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
290 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
291
291
292 @predicate('adds(pattern)', safe=True, weight=30)
292 @predicate('adds(pattern)', safe=True, weight=30)
293 def adds(repo, subset, x):
293 def adds(repo, subset, x):
294 """Changesets that add a file matching pattern.
294 """Changesets that add a file matching pattern.
295
295
296 The pattern without explicit kind like ``glob:`` is expected to be
296 The pattern without explicit kind like ``glob:`` is expected to be
297 relative to the current directory and match against a file or a
297 relative to the current directory and match against a file or a
298 directory.
298 directory.
299 """
299 """
300 # i18n: "adds" is a keyword
300 # i18n: "adds" is a keyword
301 pat = getstring(x, _("adds requires a pattern"))
301 pat = getstring(x, _("adds requires a pattern"))
302 return checkstatus(repo, subset, pat, 1)
302 return checkstatus(repo, subset, pat, 1)
303
303
304 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
304 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
305 def ancestor(repo, subset, x):
305 def ancestor(repo, subset, x):
306 """A greatest common ancestor of the changesets.
306 """A greatest common ancestor of the changesets.
307
307
308 Accepts 0 or more changesets.
308 Accepts 0 or more changesets.
309 Will return empty list when passed no args.
309 Will return empty list when passed no args.
310 Greatest common ancestor of a single changeset is that changeset.
310 Greatest common ancestor of a single changeset is that changeset.
311 """
311 """
312 # i18n: "ancestor" is a keyword
312 # i18n: "ancestor" is a keyword
313 l = getlist(x)
313 l = getlist(x)
314 rl = fullreposet(repo)
314 rl = fullreposet(repo)
315 anc = None
315 anc = None
316
316
317 # (getset(repo, rl, i) for i in l) generates a list of lists
317 # (getset(repo, rl, i) for i in l) generates a list of lists
318 for revs in (getset(repo, rl, i) for i in l):
318 for revs in (getset(repo, rl, i) for i in l):
319 for r in revs:
319 for r in revs:
320 if anc is None:
320 if anc is None:
321 anc = repo[r]
321 anc = repo[r]
322 else:
322 else:
323 anc = anc.ancestor(repo[r])
323 anc = anc.ancestor(repo[r])
324
324
325 if anc is not None and anc.rev() in subset:
325 if anc is not None and anc.rev() in subset:
326 return baseset([anc.rev()])
326 return baseset([anc.rev()])
327 return baseset()
327 return baseset()
328
328
329 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
329 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
330 stopdepth=None):
330 stopdepth=None):
331 heads = getset(repo, fullreposet(repo), x)
331 heads = getset(repo, fullreposet(repo), x)
332 if not heads:
332 if not heads:
333 return baseset()
333 return baseset()
334 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
334 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
335 return subset & s
335 return subset & s
336
336
337 @predicate('ancestors(set[, depth])', safe=True)
337 @predicate('ancestors(set[, depth])', safe=True)
338 def ancestors(repo, subset, x):
338 def ancestors(repo, subset, x):
339 """Changesets that are ancestors of changesets in set, including the
339 """Changesets that are ancestors of changesets in set, including the
340 given changesets themselves.
340 given changesets themselves.
341
341
342 If depth is specified, the result only includes changesets up to
342 If depth is specified, the result only includes changesets up to
343 the specified generation.
343 the specified generation.
344 """
344 """
345 # startdepth is for internal use only until we can decide the UI
345 # startdepth is for internal use only until we can decide the UI
346 args = getargsdict(x, 'ancestors', 'set depth startdepth')
346 args = getargsdict(x, 'ancestors', 'set depth startdepth')
347 if 'set' not in args:
347 if 'set' not in args:
348 # i18n: "ancestors" is a keyword
348 # i18n: "ancestors" is a keyword
349 raise error.ParseError(_('ancestors takes at least 1 argument'))
349 raise error.ParseError(_('ancestors takes at least 1 argument'))
350 startdepth = stopdepth = None
350 startdepth = stopdepth = None
351 if 'startdepth' in args:
351 if 'startdepth' in args:
352 n = getinteger(args['startdepth'],
352 n = getinteger(args['startdepth'],
353 "ancestors expects an integer startdepth")
353 "ancestors expects an integer startdepth")
354 if n < 0:
354 if n < 0:
355 raise error.ParseError("negative startdepth")
355 raise error.ParseError("negative startdepth")
356 startdepth = n
356 startdepth = n
357 if 'depth' in args:
357 if 'depth' in args:
358 # i18n: "ancestors" is a keyword
358 # i18n: "ancestors" is a keyword
359 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
359 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
360 if n < 0:
360 if n < 0:
361 raise error.ParseError(_("negative depth"))
361 raise error.ParseError(_("negative depth"))
362 stopdepth = n + 1
362 stopdepth = n + 1
363 return _ancestors(repo, subset, args['set'],
363 return _ancestors(repo, subset, args['set'],
364 startdepth=startdepth, stopdepth=stopdepth)
364 startdepth=startdepth, stopdepth=stopdepth)
365
365
366 @predicate('_firstancestors', safe=True)
366 @predicate('_firstancestors', safe=True)
367 def _firstancestors(repo, subset, x):
367 def _firstancestors(repo, subset, x):
368 # ``_firstancestors(set)``
368 # ``_firstancestors(set)``
369 # Like ``ancestors(set)`` but follows only the first parents.
369 # Like ``ancestors(set)`` but follows only the first parents.
370 return _ancestors(repo, subset, x, followfirst=True)
370 return _ancestors(repo, subset, x, followfirst=True)
371
371
372 def _childrenspec(repo, subset, x, n, order):
372 def _childrenspec(repo, subset, x, n, order):
373 """Changesets that are the Nth child of a changeset
373 """Changesets that are the Nth child of a changeset
374 in set.
374 in set.
375 """
375 """
376 cs = set()
376 cs = set()
377 for r in getset(repo, fullreposet(repo), x):
377 for r in getset(repo, fullreposet(repo), x):
378 for i in range(n):
378 for i in range(n):
379 c = repo[r].children()
379 c = repo[r].children()
380 if len(c) == 0:
380 if len(c) == 0:
381 break
381 break
382 if len(c) > 1:
382 if len(c) > 1:
383 raise error.RepoLookupError(
383 raise error.RepoLookupError(
384 _("revision in set has more than one child"))
384 _("revision in set has more than one child"))
385 r = c[0].rev()
385 r = c[0].rev()
386 else:
386 else:
387 cs.add(r)
387 cs.add(r)
388 return subset & cs
388 return subset & cs
389
389
390 def ancestorspec(repo, subset, x, n, order):
390 def ancestorspec(repo, subset, x, n, order):
391 """``set~n``
391 """``set~n``
392 Changesets that are the Nth ancestor (first parents only) of a changeset
392 Changesets that are the Nth ancestor (first parents only) of a changeset
393 in set.
393 in set.
394 """
394 """
395 n = getinteger(n, _("~ expects a number"))
395 n = getinteger(n, _("~ expects a number"))
396 if n < 0:
396 if n < 0:
397 # children lookup
397 # children lookup
398 return _childrenspec(repo, subset, x, -n, order)
398 return _childrenspec(repo, subset, x, -n, order)
399 ps = set()
399 ps = set()
400 cl = repo.changelog
400 cl = repo.changelog
401 for r in getset(repo, fullreposet(repo), x):
401 for r in getset(repo, fullreposet(repo), x):
402 for i in range(n):
402 for i in range(n):
403 try:
403 try:
404 r = cl.parentrevs(r)[0]
404 r = cl.parentrevs(r)[0]
405 except error.WdirUnsupported:
405 except error.WdirUnsupported:
406 r = repo[r].parents()[0].rev()
406 r = repo[r].parents()[0].rev()
407 ps.add(r)
407 ps.add(r)
408 return subset & ps
408 return subset & ps
409
409
410 @predicate('author(string)', safe=True, weight=10)
410 @predicate('author(string)', safe=True, weight=10)
411 def author(repo, subset, x):
411 def author(repo, subset, x):
412 """Alias for ``user(string)``.
412 """Alias for ``user(string)``.
413 """
413 """
414 # i18n: "author" is a keyword
414 # i18n: "author" is a keyword
415 n = getstring(x, _("author requires a string"))
415 n = getstring(x, _("author requires a string"))
416 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
416 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
417 return subset.filter(lambda x: matcher(repo[x].user()),
417 return subset.filter(lambda x: matcher(repo[x].user()),
418 condrepr=('<user %r>', n))
418 condrepr=('<user %r>', n))
419
419
420 @predicate('bisect(string)', safe=True)
420 @predicate('bisect(string)', safe=True)
421 def bisect(repo, subset, x):
421 def bisect(repo, subset, x):
422 """Changesets marked in the specified bisect status:
422 """Changesets marked in the specified bisect status:
423
423
424 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
424 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
425 - ``goods``, ``bads`` : csets topologically good/bad
425 - ``goods``, ``bads`` : csets topologically good/bad
426 - ``range`` : csets taking part in the bisection
426 - ``range`` : csets taking part in the bisection
427 - ``pruned`` : csets that are goods, bads or skipped
427 - ``pruned`` : csets that are goods, bads or skipped
428 - ``untested`` : csets whose fate is yet unknown
428 - ``untested`` : csets whose fate is yet unknown
429 - ``ignored`` : csets ignored due to DAG topology
429 - ``ignored`` : csets ignored due to DAG topology
430 - ``current`` : the cset currently being bisected
430 - ``current`` : the cset currently being bisected
431 """
431 """
432 # i18n: "bisect" is a keyword
432 # i18n: "bisect" is a keyword
433 status = getstring(x, _("bisect requires a string")).lower()
433 status = getstring(x, _("bisect requires a string")).lower()
434 state = set(hbisect.get(repo, status))
434 state = set(hbisect.get(repo, status))
435 return subset & state
435 return subset & state
436
436
437 # Backward-compatibility
437 # Backward-compatibility
438 # - no help entry so that we do not advertise it any more
438 # - no help entry so that we do not advertise it any more
439 @predicate('bisected', safe=True)
439 @predicate('bisected', safe=True)
440 def bisected(repo, subset, x):
440 def bisected(repo, subset, x):
441 return bisect(repo, subset, x)
441 return bisect(repo, subset, x)
442
442
443 @predicate('bookmark([name])', safe=True)
443 @predicate('bookmark([name])', safe=True)
444 def bookmark(repo, subset, x):
444 def bookmark(repo, subset, x):
445 """The named bookmark or all bookmarks.
445 """The named bookmark or all bookmarks.
446
446
447 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
447 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
448 """
448 """
449 # i18n: "bookmark" is a keyword
449 # i18n: "bookmark" is a keyword
450 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
450 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
451 if args:
451 if args:
452 bm = getstring(args[0],
452 bm = getstring(args[0],
453 # i18n: "bookmark" is a keyword
453 # i18n: "bookmark" is a keyword
454 _('the argument to bookmark must be a string'))
454 _('the argument to bookmark must be a string'))
455 kind, pattern, matcher = stringutil.stringmatcher(bm)
455 kind, pattern, matcher = stringutil.stringmatcher(bm)
456 bms = set()
456 bms = set()
457 if kind == 'literal':
457 if kind == 'literal':
458 bmrev = repo._bookmarks.get(pattern, None)
458 bmrev = repo._bookmarks.get(pattern, None)
459 if not bmrev:
459 if not bmrev:
460 raise error.RepoLookupError(_("bookmark '%s' does not exist")
460 raise error.RepoLookupError(_("bookmark '%s' does not exist")
461 % pattern)
461 % pattern)
462 bms.add(repo[bmrev].rev())
462 bms.add(repo[bmrev].rev())
463 else:
463 else:
464 matchrevs = set()
464 matchrevs = set()
465 for name, bmrev in repo._bookmarks.iteritems():
465 for name, bmrev in repo._bookmarks.iteritems():
466 if matcher(name):
466 if matcher(name):
467 matchrevs.add(bmrev)
467 matchrevs.add(bmrev)
468 if not matchrevs:
468 if not matchrevs:
469 raise error.RepoLookupError(_("no bookmarks exist"
469 raise error.RepoLookupError(_("no bookmarks exist"
470 " that match '%s'") % pattern)
470 " that match '%s'") % pattern)
471 for bmrev in matchrevs:
471 for bmrev in matchrevs:
472 bms.add(repo[bmrev].rev())
472 bms.add(repo[bmrev].rev())
473 else:
473 else:
474 bms = {repo[r].rev() for r in repo._bookmarks.values()}
474 bms = {repo[r].rev() for r in repo._bookmarks.values()}
475 bms -= {node.nullrev}
475 bms -= {node.nullrev}
476 return subset & bms
476 return subset & bms
477
477
478 @predicate('branch(string or set)', safe=True, weight=10)
478 @predicate('branch(string or set)', safe=True, weight=10)
479 def branch(repo, subset, x):
479 def branch(repo, subset, x):
480 """
480 """
481 All changesets belonging to the given branch or the branches of the given
481 All changesets belonging to the given branch or the branches of the given
482 changesets.
482 changesets.
483
483
484 Pattern matching is supported for `string`. See
484 Pattern matching is supported for `string`. See
485 :hg:`help revisions.patterns`.
485 :hg:`help revisions.patterns`.
486 """
486 """
487 getbi = repo.revbranchcache().branchinfo
487 getbi = repo.revbranchcache().branchinfo
488 def getbranch(r):
488 def getbranch(r):
489 try:
489 try:
490 return getbi(r)[0]
490 return getbi(r)[0]
491 except error.WdirUnsupported:
491 except error.WdirUnsupported:
492 return repo[r].branch()
492 return repo[r].branch()
493
493
494 try:
494 try:
495 b = getstring(x, '')
495 b = getstring(x, '')
496 except error.ParseError:
496 except error.ParseError:
497 # not a string, but another revspec, e.g. tip()
497 # not a string, but another revspec, e.g. tip()
498 pass
498 pass
499 else:
499 else:
500 kind, pattern, matcher = stringutil.stringmatcher(b)
500 kind, pattern, matcher = stringutil.stringmatcher(b)
501 if kind == 'literal':
501 if kind == 'literal':
502 # note: falls through to the revspec case if no branch with
502 # note: falls through to the revspec case if no branch with
503 # this name exists and pattern kind is not specified explicitly
503 # this name exists and pattern kind is not specified explicitly
504 if pattern in repo.branchmap():
504 if pattern in repo.branchmap():
505 return subset.filter(lambda r: matcher(getbranch(r)),
505 return subset.filter(lambda r: matcher(getbranch(r)),
506 condrepr=('<branch %r>', b))
506 condrepr=('<branch %r>', b))
507 if b.startswith('literal:'):
507 if b.startswith('literal:'):
508 raise error.RepoLookupError(_("branch '%s' does not exist")
508 raise error.RepoLookupError(_("branch '%s' does not exist")
509 % pattern)
509 % pattern)
510 else:
510 else:
511 return subset.filter(lambda r: matcher(getbranch(r)),
511 return subset.filter(lambda r: matcher(getbranch(r)),
512 condrepr=('<branch %r>', b))
512 condrepr=('<branch %r>', b))
513
513
514 s = getset(repo, fullreposet(repo), x)
514 s = getset(repo, fullreposet(repo), x)
515 b = set()
515 b = set()
516 for r in s:
516 for r in s:
517 b.add(getbranch(r))
517 b.add(getbranch(r))
518 c = s.__contains__
518 c = s.__contains__
519 return subset.filter(lambda r: c(r) or getbranch(r) in b,
519 return subset.filter(lambda r: c(r) or getbranch(r) in b,
520 condrepr=lambda: '<branch %r>' % _sortedb(b))
520 condrepr=lambda: '<branch %r>' % _sortedb(b))
521
521
522 @predicate('phasedivergent()', safe=True)
522 @predicate('phasedivergent()', safe=True)
523 def phasedivergent(repo, subset, x):
523 def phasedivergent(repo, subset, x):
524 """Mutable changesets marked as successors of public changesets.
524 """Mutable changesets marked as successors of public changesets.
525
525
526 Only non-public and non-obsolete changesets can be `phasedivergent`.
526 Only non-public and non-obsolete changesets can be `phasedivergent`.
527 (EXPERIMENTAL)
527 (EXPERIMENTAL)
528 """
528 """
529 # i18n: "phasedivergent" is a keyword
529 # i18n: "phasedivergent" is a keyword
530 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
530 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
531 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
531 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
532 return subset & phasedivergent
532 return subset & phasedivergent
533
533
534 @predicate('bundle()', safe=True)
534 @predicate('bundle()', safe=True)
535 def bundle(repo, subset, x):
535 def bundle(repo, subset, x):
536 """Changesets in the bundle.
536 """Changesets in the bundle.
537
537
538 Bundle must be specified by the -R option."""
538 Bundle must be specified by the -R option."""
539
539
540 try:
540 try:
541 bundlerevs = repo.changelog.bundlerevs
541 bundlerevs = repo.changelog.bundlerevs
542 except AttributeError:
542 except AttributeError:
543 raise error.Abort(_("no bundle provided - specify with -R"))
543 raise error.Abort(_("no bundle provided - specify with -R"))
544 return subset & bundlerevs
544 return subset & bundlerevs
545
545
546 def checkstatus(repo, subset, pat, field):
546 def checkstatus(repo, subset, pat, field):
547 hasset = matchmod.patkind(pat) == 'set'
547 hasset = matchmod.patkind(pat) == 'set'
548
548
549 mcache = [None]
549 mcache = [None]
550 def matches(x):
550 def matches(x):
551 c = repo[x]
551 c = repo[x]
552 if not mcache[0] or hasset:
552 if not mcache[0] or hasset:
553 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
553 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
554 m = mcache[0]
554 m = mcache[0]
555 fname = None
555 fname = None
556 if not m.anypats() and len(m.files()) == 1:
556 if not m.anypats() and len(m.files()) == 1:
557 fname = m.files()[0]
557 fname = m.files()[0]
558 if fname is not None:
558 if fname is not None:
559 if fname not in c.files():
559 if fname not in c.files():
560 return False
560 return False
561 else:
561 else:
562 for f in c.files():
562 for f in c.files():
563 if m(f):
563 if m(f):
564 break
564 break
565 else:
565 else:
566 return False
566 return False
567 files = repo.status(c.p1().node(), c.node())[field]
567 files = repo.status(c.p1().node(), c.node())[field]
568 if fname is not None:
568 if fname is not None:
569 if fname in files:
569 if fname in files:
570 return True
570 return True
571 else:
571 else:
572 for f in files:
572 for f in files:
573 if m(f):
573 if m(f):
574 return True
574 return True
575
575
576 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
576 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
577
577
578 def _children(repo, subset, parentset):
578 def _children(repo, subset, parentset):
579 if not parentset:
579 if not parentset:
580 return baseset()
580 return baseset()
581 cs = set()
581 cs = set()
582 pr = repo.changelog.parentrevs
582 pr = repo.changelog.parentrevs
583 minrev = parentset.min()
583 minrev = parentset.min()
584 nullrev = node.nullrev
584 nullrev = node.nullrev
585 for r in subset:
585 for r in subset:
586 if r <= minrev:
586 if r <= minrev:
587 continue
587 continue
588 p1, p2 = pr(r)
588 p1, p2 = pr(r)
589 if p1 in parentset:
589 if p1 in parentset:
590 cs.add(r)
590 cs.add(r)
591 if p2 != nullrev and p2 in parentset:
591 if p2 != nullrev and p2 in parentset:
592 cs.add(r)
592 cs.add(r)
593 return baseset(cs)
593 return baseset(cs)
594
594
595 @predicate('children(set)', safe=True)
595 @predicate('children(set)', safe=True)
596 def children(repo, subset, x):
596 def children(repo, subset, x):
597 """Child changesets of changesets in set.
597 """Child changesets of changesets in set.
598 """
598 """
599 s = getset(repo, fullreposet(repo), x)
599 s = getset(repo, fullreposet(repo), x)
600 cs = _children(repo, subset, s)
600 cs = _children(repo, subset, s)
601 return subset & cs
601 return subset & cs
602
602
603 @predicate('closed()', safe=True, weight=10)
603 @predicate('closed()', safe=True, weight=10)
604 def closed(repo, subset, x):
604 def closed(repo, subset, x):
605 """Changeset is closed.
605 """Changeset is closed.
606 """
606 """
607 # i18n: "closed" is a keyword
607 # i18n: "closed" is a keyword
608 getargs(x, 0, 0, _("closed takes no arguments"))
608 getargs(x, 0, 0, _("closed takes no arguments"))
609 return subset.filter(lambda r: repo[r].closesbranch(),
609 return subset.filter(lambda r: repo[r].closesbranch(),
610 condrepr='<branch closed>')
610 condrepr='<branch closed>')
611
611
612 @predicate('contains(pattern)', weight=100)
612 @predicate('contains(pattern)', weight=100)
613 def contains(repo, subset, x):
613 def contains(repo, subset, x):
614 """The revision's manifest contains a file matching pattern (but might not
614 """The revision's manifest contains a file matching pattern (but might not
615 modify it). See :hg:`help patterns` for information about file patterns.
615 modify it). See :hg:`help patterns` for information about file patterns.
616
616
617 The pattern without explicit kind like ``glob:`` is expected to be
617 The pattern without explicit kind like ``glob:`` is expected to be
618 relative to the current directory and match against a file exactly
618 relative to the current directory and match against a file exactly
619 for efficiency.
619 for efficiency.
620 """
620 """
621 # i18n: "contains" is a keyword
621 # i18n: "contains" is a keyword
622 pat = getstring(x, _("contains requires a pattern"))
622 pat = getstring(x, _("contains requires a pattern"))
623
623
624 def matches(x):
624 def matches(x):
625 if not matchmod.patkind(pat):
625 if not matchmod.patkind(pat):
626 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
626 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
627 if pats in repo[x]:
627 if pats in repo[x]:
628 return True
628 return True
629 else:
629 else:
630 c = repo[x]
630 c = repo[x]
631 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
631 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
632 for f in c.manifest():
632 for f in c.manifest():
633 if m(f):
633 if m(f):
634 return True
634 return True
635 return False
635 return False
636
636
637 return subset.filter(matches, condrepr=('<contains %r>', pat))
637 return subset.filter(matches, condrepr=('<contains %r>', pat))
638
638
639 @predicate('converted([id])', safe=True)
639 @predicate('converted([id])', safe=True)
640 def converted(repo, subset, x):
640 def converted(repo, subset, x):
641 """Changesets converted from the given identifier in the old repository if
641 """Changesets converted from the given identifier in the old repository if
642 present, or all converted changesets if no identifier is specified.
642 present, or all converted changesets if no identifier is specified.
643 """
643 """
644
644
645 # There is exactly no chance of resolving the revision, so do a simple
645 # There is exactly no chance of resolving the revision, so do a simple
646 # string compare and hope for the best
646 # string compare and hope for the best
647
647
648 rev = None
648 rev = None
649 # i18n: "converted" is a keyword
649 # i18n: "converted" is a keyword
650 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
650 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
651 if l:
651 if l:
652 # i18n: "converted" is a keyword
652 # i18n: "converted" is a keyword
653 rev = getstring(l[0], _('converted requires a revision'))
653 rev = getstring(l[0], _('converted requires a revision'))
654
654
655 def _matchvalue(r):
655 def _matchvalue(r):
656 source = repo[r].extra().get('convert_revision', None)
656 source = repo[r].extra().get('convert_revision', None)
657 return source is not None and (rev is None or source.startswith(rev))
657 return source is not None and (rev is None or source.startswith(rev))
658
658
659 return subset.filter(lambda r: _matchvalue(r),
659 return subset.filter(lambda r: _matchvalue(r),
660 condrepr=('<converted %r>', rev))
660 condrepr=('<converted %r>', rev))
661
661
662 @predicate('date(interval)', safe=True, weight=10)
662 @predicate('date(interval)', safe=True, weight=10)
663 def date(repo, subset, x):
663 def date(repo, subset, x):
664 """Changesets within the interval, see :hg:`help dates`.
664 """Changesets within the interval, see :hg:`help dates`.
665 """
665 """
666 # i18n: "date" is a keyword
666 # i18n: "date" is a keyword
667 ds = getstring(x, _("date requires a string"))
667 ds = getstring(x, _("date requires a string"))
668 dm = dateutil.matchdate(ds)
668 dm = dateutil.matchdate(ds)
669 return subset.filter(lambda x: dm(repo[x].date()[0]),
669 return subset.filter(lambda x: dm(repo[x].date()[0]),
670 condrepr=('<date %r>', ds))
670 condrepr=('<date %r>', ds))
671
671
672 @predicate('desc(string)', safe=True, weight=10)
672 @predicate('desc(string)', safe=True, weight=10)
673 def desc(repo, subset, x):
673 def desc(repo, subset, x):
674 """Search commit message for string. The match is case-insensitive.
674 """Search commit message for string. The match is case-insensitive.
675
675
676 Pattern matching is supported for `string`. See
676 Pattern matching is supported for `string`. See
677 :hg:`help revisions.patterns`.
677 :hg:`help revisions.patterns`.
678 """
678 """
679 # i18n: "desc" is a keyword
679 # i18n: "desc" is a keyword
680 ds = getstring(x, _("desc requires a string"))
680 ds = getstring(x, _("desc requires a string"))
681
681
682 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
682 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
683
683
684 return subset.filter(lambda r: matcher(repo[r].description()),
684 return subset.filter(lambda r: matcher(repo[r].description()),
685 condrepr=('<desc %r>', ds))
685 condrepr=('<desc %r>', ds))
686
686
687 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
687 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
688 stopdepth=None):
688 stopdepth=None):
689 roots = getset(repo, fullreposet(repo), x)
689 roots = getset(repo, fullreposet(repo), x)
690 if not roots:
690 if not roots:
691 return baseset()
691 return baseset()
692 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
692 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
693 return subset & s
693 return subset & s
694
694
695 @predicate('descendants(set[, depth])', safe=True)
695 @predicate('descendants(set[, depth])', safe=True)
696 def descendants(repo, subset, x):
696 def descendants(repo, subset, x):
697 """Changesets which are descendants of changesets in set, including the
697 """Changesets which are descendants of changesets in set, including the
698 given changesets themselves.
698 given changesets themselves.
699
699
700 If depth is specified, the result only includes changesets up to
700 If depth is specified, the result only includes changesets up to
701 the specified generation.
701 the specified generation.
702 """
702 """
703 # startdepth is for internal use only until we can decide the UI
703 # startdepth is for internal use only until we can decide the UI
704 args = getargsdict(x, 'descendants', 'set depth startdepth')
704 args = getargsdict(x, 'descendants', 'set depth startdepth')
705 if 'set' not in args:
705 if 'set' not in args:
706 # i18n: "descendants" is a keyword
706 # i18n: "descendants" is a keyword
707 raise error.ParseError(_('descendants takes at least 1 argument'))
707 raise error.ParseError(_('descendants takes at least 1 argument'))
708 startdepth = stopdepth = None
708 startdepth = stopdepth = None
709 if 'startdepth' in args:
709 if 'startdepth' in args:
710 n = getinteger(args['startdepth'],
710 n = getinteger(args['startdepth'],
711 "descendants expects an integer startdepth")
711 "descendants expects an integer startdepth")
712 if n < 0:
712 if n < 0:
713 raise error.ParseError("negative startdepth")
713 raise error.ParseError("negative startdepth")
714 startdepth = n
714 startdepth = n
715 if 'depth' in args:
715 if 'depth' in args:
716 # i18n: "descendants" is a keyword
716 # i18n: "descendants" is a keyword
717 n = getinteger(args['depth'], _("descendants expects an integer depth"))
717 n = getinteger(args['depth'], _("descendants expects an integer depth"))
718 if n < 0:
718 if n < 0:
719 raise error.ParseError(_("negative depth"))
719 raise error.ParseError(_("negative depth"))
720 stopdepth = n + 1
720 stopdepth = n + 1
721 return _descendants(repo, subset, args['set'],
721 return _descendants(repo, subset, args['set'],
722 startdepth=startdepth, stopdepth=stopdepth)
722 startdepth=startdepth, stopdepth=stopdepth)
723
723
724 @predicate('_firstdescendants', safe=True)
724 @predicate('_firstdescendants', safe=True)
725 def _firstdescendants(repo, subset, x):
725 def _firstdescendants(repo, subset, x):
726 # ``_firstdescendants(set)``
726 # ``_firstdescendants(set)``
727 # Like ``descendants(set)`` but follows only the first parents.
727 # Like ``descendants(set)`` but follows only the first parents.
728 return _descendants(repo, subset, x, followfirst=True)
728 return _descendants(repo, subset, x, followfirst=True)
729
729
730 @predicate('destination([set])', safe=True, weight=10)
730 @predicate('destination([set])', safe=True, weight=10)
731 def destination(repo, subset, x):
731 def destination(repo, subset, x):
732 """Changesets that were created by a graft, transplant or rebase operation,
732 """Changesets that were created by a graft, transplant or rebase operation,
733 with the given revisions specified as the source. Omitting the optional set
733 with the given revisions specified as the source. Omitting the optional set
734 is the same as passing all().
734 is the same as passing all().
735 """
735 """
736 if x is not None:
736 if x is not None:
737 sources = getset(repo, fullreposet(repo), x)
737 sources = getset(repo, fullreposet(repo), x)
738 else:
738 else:
739 sources = fullreposet(repo)
739 sources = fullreposet(repo)
740
740
741 dests = set()
741 dests = set()
742
742
743 # subset contains all of the possible destinations that can be returned, so
743 # subset contains all of the possible destinations that can be returned, so
744 # iterate over them and see if their source(s) were provided in the arg set.
744 # iterate over them and see if their source(s) were provided in the arg set.
745 # Even if the immediate src of r is not in the arg set, src's source (or
745 # Even if the immediate src of r is not in the arg set, src's source (or
746 # further back) may be. Scanning back further than the immediate src allows
746 # further back) may be. Scanning back further than the immediate src allows
747 # transitive transplants and rebases to yield the same results as transitive
747 # transitive transplants and rebases to yield the same results as transitive
748 # grafts.
748 # grafts.
749 for r in subset:
749 for r in subset:
750 src = _getrevsource(repo, r)
750 src = _getrevsource(repo, r)
751 lineage = None
751 lineage = None
752
752
753 while src is not None:
753 while src is not None:
754 if lineage is None:
754 if lineage is None:
755 lineage = list()
755 lineage = list()
756
756
757 lineage.append(r)
757 lineage.append(r)
758
758
759 # The visited lineage is a match if the current source is in the arg
759 # The visited lineage is a match if the current source is in the arg
760 # set. Since every candidate dest is visited by way of iterating
760 # set. Since every candidate dest is visited by way of iterating
761 # subset, any dests further back in the lineage will be tested by a
761 # subset, any dests further back in the lineage will be tested by a
762 # different iteration over subset. Likewise, if the src was already
762 # different iteration over subset. Likewise, if the src was already
763 # selected, the current lineage can be selected without going back
763 # selected, the current lineage can be selected without going back
764 # further.
764 # further.
765 if src in sources or src in dests:
765 if src in sources or src in dests:
766 dests.update(lineage)
766 dests.update(lineage)
767 break
767 break
768
768
769 r = src
769 r = src
770 src = _getrevsource(repo, r)
770 src = _getrevsource(repo, r)
771
771
772 return subset.filter(dests.__contains__,
772 return subset.filter(dests.__contains__,
773 condrepr=lambda: '<destination %r>' % _sortedb(dests))
773 condrepr=lambda: '<destination %r>' % _sortedb(dests))
774
774
775 @predicate('contentdivergent()', safe=True)
775 @predicate('contentdivergent()', safe=True)
776 def contentdivergent(repo, subset, x):
776 def contentdivergent(repo, subset, x):
777 """
777 """
778 Final successors of changesets with an alternative set of final
778 Final successors of changesets with an alternative set of final
779 successors. (EXPERIMENTAL)
779 successors. (EXPERIMENTAL)
780 """
780 """
781 # i18n: "contentdivergent" is a keyword
781 # i18n: "contentdivergent" is a keyword
782 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
782 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
783 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
783 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
784 return subset & contentdivergent
784 return subset & contentdivergent
785
785
786 @predicate('extdata(source)', safe=False, weight=100)
786 @predicate('extdata(source)', safe=False, weight=100)
787 def extdata(repo, subset, x):
787 def extdata(repo, subset, x):
788 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
788 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
789 # i18n: "extdata" is a keyword
789 # i18n: "extdata" is a keyword
790 args = getargsdict(x, 'extdata', 'source')
790 args = getargsdict(x, 'extdata', 'source')
791 source = getstring(args.get('source'),
791 source = getstring(args.get('source'),
792 # i18n: "extdata" is a keyword
792 # i18n: "extdata" is a keyword
793 _('extdata takes at least 1 string argument'))
793 _('extdata takes at least 1 string argument'))
794 data = scmutil.extdatasource(repo, source)
794 data = scmutil.extdatasource(repo, source)
795 return subset & baseset(data)
795 return subset & baseset(data)
796
796
797 @predicate('extinct()', safe=True)
797 @predicate('extinct()', safe=True)
798 def extinct(repo, subset, x):
798 def extinct(repo, subset, x):
799 """Obsolete changesets with obsolete descendants only.
799 """Obsolete changesets with obsolete descendants only.
800 """
800 """
801 # i18n: "extinct" is a keyword
801 # i18n: "extinct" is a keyword
802 getargs(x, 0, 0, _("extinct takes no arguments"))
802 getargs(x, 0, 0, _("extinct takes no arguments"))
803 extincts = obsmod.getrevs(repo, 'extinct')
803 extincts = obsmod.getrevs(repo, 'extinct')
804 return subset & extincts
804 return subset & extincts
805
805
806 @predicate('extra(label, [value])', safe=True)
806 @predicate('extra(label, [value])', safe=True)
807 def extra(repo, subset, x):
807 def extra(repo, subset, x):
808 """Changesets with the given label in the extra metadata, with the given
808 """Changesets with the given label in the extra metadata, with the given
809 optional value.
809 optional value.
810
810
811 Pattern matching is supported for `value`. See
811 Pattern matching is supported for `value`. See
812 :hg:`help revisions.patterns`.
812 :hg:`help revisions.patterns`.
813 """
813 """
814 args = getargsdict(x, 'extra', 'label value')
814 args = getargsdict(x, 'extra', 'label value')
815 if 'label' not in args:
815 if 'label' not in args:
816 # i18n: "extra" is a keyword
816 # i18n: "extra" is a keyword
817 raise error.ParseError(_('extra takes at least 1 argument'))
817 raise error.ParseError(_('extra takes at least 1 argument'))
818 # i18n: "extra" is a keyword
818 # i18n: "extra" is a keyword
819 label = getstring(args['label'], _('first argument to extra must be '
819 label = getstring(args['label'], _('first argument to extra must be '
820 'a string'))
820 'a string'))
821 value = None
821 value = None
822
822
823 if 'value' in args:
823 if 'value' in args:
824 # i18n: "extra" is a keyword
824 # i18n: "extra" is a keyword
825 value = getstring(args['value'], _('second argument to extra must be '
825 value = getstring(args['value'], _('second argument to extra must be '
826 'a string'))
826 'a string'))
827 kind, value, matcher = stringutil.stringmatcher(value)
827 kind, value, matcher = stringutil.stringmatcher(value)
828
828
829 def _matchvalue(r):
829 def _matchvalue(r):
830 extra = repo[r].extra()
830 extra = repo[r].extra()
831 return label in extra and (value is None or matcher(extra[label]))
831 return label in extra and (value is None or matcher(extra[label]))
832
832
833 return subset.filter(lambda r: _matchvalue(r),
833 return subset.filter(lambda r: _matchvalue(r),
834 condrepr=('<extra[%r] %r>', label, value))
834 condrepr=('<extra[%r] %r>', label, value))
835
835
836 @predicate('filelog(pattern)', safe=True)
836 @predicate('filelog(pattern)', safe=True)
837 def filelog(repo, subset, x):
837 def filelog(repo, subset, x):
838 """Changesets connected to the specified filelog.
838 """Changesets connected to the specified filelog.
839
839
840 For performance reasons, visits only revisions mentioned in the file-level
840 For performance reasons, visits only revisions mentioned in the file-level
841 filelog, rather than filtering through all changesets (much faster, but
841 filelog, rather than filtering through all changesets (much faster, but
842 doesn't include deletes or duplicate changes). For a slower, more accurate
842 doesn't include deletes or duplicate changes). For a slower, more accurate
843 result, use ``file()``.
843 result, use ``file()``.
844
844
845 The pattern without explicit kind like ``glob:`` is expected to be
845 The pattern without explicit kind like ``glob:`` is expected to be
846 relative to the current directory and match against a file exactly
846 relative to the current directory and match against a file exactly
847 for efficiency.
847 for efficiency.
848
848
849 If some linkrev points to revisions filtered by the current repoview, we'll
849 If some linkrev points to revisions filtered by the current repoview, we'll
850 work around it to return a non-filtered value.
850 work around it to return a non-filtered value.
851 """
851 """
852
852
853 # i18n: "filelog" is a keyword
853 # i18n: "filelog" is a keyword
854 pat = getstring(x, _("filelog requires a pattern"))
854 pat = getstring(x, _("filelog requires a pattern"))
855 s = set()
855 s = set()
856 cl = repo.changelog
856 cl = repo.changelog
857
857
858 if not matchmod.patkind(pat):
858 if not matchmod.patkind(pat):
859 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
859 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
860 files = [f]
860 files = [f]
861 else:
861 else:
862 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
862 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
863 files = (f for f in repo[None] if m(f))
863 files = (f for f in repo[None] if m(f))
864
864
865 for f in files:
865 for f in files:
866 fl = repo.file(f)
866 fl = repo.file(f)
867 known = {}
867 known = {}
868 scanpos = 0
868 scanpos = 0
869 for fr in list(fl):
869 for fr in list(fl):
870 fn = fl.node(fr)
870 fn = fl.node(fr)
871 if fn in known:
871 if fn in known:
872 s.add(known[fn])
872 s.add(known[fn])
873 continue
873 continue
874
874
875 lr = fl.linkrev(fr)
875 lr = fl.linkrev(fr)
876 if lr in cl:
876 if lr in cl:
877 s.add(lr)
877 s.add(lr)
878 elif scanpos is not None:
878 elif scanpos is not None:
879 # lowest matching changeset is filtered, scan further
879 # lowest matching changeset is filtered, scan further
880 # ahead in changelog
880 # ahead in changelog
881 start = max(lr, scanpos) + 1
881 start = max(lr, scanpos) + 1
882 scanpos = None
882 scanpos = None
883 for r in cl.revs(start):
883 for r in cl.revs(start):
884 # minimize parsing of non-matching entries
884 # minimize parsing of non-matching entries
885 if f in cl.revision(r) and f in cl.readfiles(r):
885 if f in cl.revision(r) and f in cl.readfiles(r):
886 try:
886 try:
887 # try to use manifest delta fastpath
887 # try to use manifest delta fastpath
888 n = repo[r].filenode(f)
888 n = repo[r].filenode(f)
889 if n not in known:
889 if n not in known:
890 if n == fn:
890 if n == fn:
891 s.add(r)
891 s.add(r)
892 scanpos = r
892 scanpos = r
893 break
893 break
894 else:
894 else:
895 known[n] = r
895 known[n] = r
896 except error.ManifestLookupError:
896 except error.ManifestLookupError:
897 # deletion in changelog
897 # deletion in changelog
898 continue
898 continue
899
899
900 return subset & s
900 return subset & s
901
901
902 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
902 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
903 def first(repo, subset, x, order):
903 def first(repo, subset, x, order):
904 """An alias for limit().
904 """An alias for limit().
905 """
905 """
906 return limit(repo, subset, x, order)
906 return limit(repo, subset, x, order)
907
907
908 def _follow(repo, subset, x, name, followfirst=False):
908 def _follow(repo, subset, x, name, followfirst=False):
909 args = getargsdict(x, name, 'file startrev')
909 args = getargsdict(x, name, 'file startrev')
910 revs = None
910 revs = None
911 if 'startrev' in args:
911 if 'startrev' in args:
912 revs = getset(repo, fullreposet(repo), args['startrev'])
912 revs = getset(repo, fullreposet(repo), args['startrev'])
913 if 'file' in args:
913 if 'file' in args:
914 x = getstring(args['file'], _("%s expected a pattern") % name)
914 x = getstring(args['file'], _("%s expected a pattern") % name)
915 if revs is None:
915 if revs is None:
916 revs = [None]
916 revs = [None]
917 fctxs = []
917 fctxs = []
918 for r in revs:
918 for r in revs:
919 ctx = mctx = repo[r]
919 ctx = mctx = repo[r]
920 if r is None:
920 if r is None:
921 ctx = repo['.']
921 ctx = repo['.']
922 m = matchmod.match(repo.root, repo.getcwd(), [x],
922 m = matchmod.match(repo.root, repo.getcwd(), [x],
923 ctx=mctx, default='path')
923 ctx=mctx, default='path')
924 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
924 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
925 s = dagop.filerevancestors(fctxs, followfirst)
925 s = dagop.filerevancestors(fctxs, followfirst)
926 else:
926 else:
927 if revs is None:
927 if revs is None:
928 revs = baseset([repo['.'].rev()])
928 revs = baseset([repo['.'].rev()])
929 s = dagop.revancestors(repo, revs, followfirst)
929 s = dagop.revancestors(repo, revs, followfirst)
930
930
931 return subset & s
931 return subset & s
932
932
933 @predicate('follow([file[, startrev]])', safe=True)
933 @predicate('follow([file[, startrev]])', safe=True)
934 def follow(repo, subset, x):
934 def follow(repo, subset, x):
935 """
935 """
936 An alias for ``::.`` (ancestors of the working directory's first parent).
936 An alias for ``::.`` (ancestors of the working directory's first parent).
937 If file pattern is specified, the histories of files matching given
937 If file pattern is specified, the histories of files matching given
938 pattern in the revision given by startrev are followed, including copies.
938 pattern in the revision given by startrev are followed, including copies.
939 """
939 """
940 return _follow(repo, subset, x, 'follow')
940 return _follow(repo, subset, x, 'follow')
941
941
942 @predicate('_followfirst', safe=True)
942 @predicate('_followfirst', safe=True)
943 def _followfirst(repo, subset, x):
943 def _followfirst(repo, subset, x):
944 # ``followfirst([file[, startrev]])``
944 # ``followfirst([file[, startrev]])``
945 # Like ``follow([file[, startrev]])`` but follows only the first parent
945 # Like ``follow([file[, startrev]])`` but follows only the first parent
946 # of every revisions or files revisions.
946 # of every revisions or files revisions.
947 return _follow(repo, subset, x, '_followfirst', followfirst=True)
947 return _follow(repo, subset, x, '_followfirst', followfirst=True)
948
948
949 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
949 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
950 safe=True)
950 safe=True)
951 def followlines(repo, subset, x):
951 def followlines(repo, subset, x):
952 """Changesets modifying `file` in line range ('fromline', 'toline').
952 """Changesets modifying `file` in line range ('fromline', 'toline').
953
953
954 Line range corresponds to 'file' content at 'startrev' and should hence be
954 Line range corresponds to 'file' content at 'startrev' and should hence be
955 consistent with file size. If startrev is not specified, working directory's
955 consistent with file size. If startrev is not specified, working directory's
956 parent is used.
956 parent is used.
957
957
958 By default, ancestors of 'startrev' are returned. If 'descend' is True,
958 By default, ancestors of 'startrev' are returned. If 'descend' is True,
959 descendants of 'startrev' are returned though renames are (currently) not
959 descendants of 'startrev' are returned though renames are (currently) not
960 followed in this direction.
960 followed in this direction.
961 """
961 """
962 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
962 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
963 if len(args['lines']) != 1:
963 if len(args['lines']) != 1:
964 raise error.ParseError(_("followlines requires a line range"))
964 raise error.ParseError(_("followlines requires a line range"))
965
965
966 rev = '.'
966 rev = '.'
967 if 'startrev' in args:
967 if 'startrev' in args:
968 revs = getset(repo, fullreposet(repo), args['startrev'])
968 revs = getset(repo, fullreposet(repo), args['startrev'])
969 if len(revs) != 1:
969 if len(revs) != 1:
970 raise error.ParseError(
970 raise error.ParseError(
971 # i18n: "followlines" is a keyword
971 # i18n: "followlines" is a keyword
972 _("followlines expects exactly one revision"))
972 _("followlines expects exactly one revision"))
973 rev = revs.last()
973 rev = revs.last()
974
974
975 pat = getstring(args['file'], _("followlines requires a pattern"))
975 pat = getstring(args['file'], _("followlines requires a pattern"))
976 # i18n: "followlines" is a keyword
976 # i18n: "followlines" is a keyword
977 msg = _("followlines expects exactly one file")
977 msg = _("followlines expects exactly one file")
978 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
978 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
979 # i18n: "followlines" is a keyword
979 # i18n: "followlines" is a keyword
980 lr = getrange(args['lines'][0], _("followlines expects a line range"))
980 lr = getrange(args['lines'][0], _("followlines expects a line range"))
981 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
981 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
982 for a in lr]
982 for a in lr]
983 fromline, toline = util.processlinerange(fromline, toline)
983 fromline, toline = util.processlinerange(fromline, toline)
984
984
985 fctx = repo[rev].filectx(fname)
985 fctx = repo[rev].filectx(fname)
986 descend = False
986 descend = False
987 if 'descend' in args:
987 if 'descend' in args:
988 descend = getboolean(args['descend'],
988 descend = getboolean(args['descend'],
989 # i18n: "descend" is a keyword
989 # i18n: "descend" is a keyword
990 _("descend argument must be a boolean"))
990 _("descend argument must be a boolean"))
991 if descend:
991 if descend:
992 rs = generatorset(
992 rs = generatorset(
993 (c.rev() for c, _linerange
993 (c.rev() for c, _linerange
994 in dagop.blockdescendants(fctx, fromline, toline)),
994 in dagop.blockdescendants(fctx, fromline, toline)),
995 iterasc=True)
995 iterasc=True)
996 else:
996 else:
997 rs = generatorset(
997 rs = generatorset(
998 (c.rev() for c, _linerange
998 (c.rev() for c, _linerange
999 in dagop.blockancestors(fctx, fromline, toline)),
999 in dagop.blockancestors(fctx, fromline, toline)),
1000 iterasc=False)
1000 iterasc=False)
1001 return subset & rs
1001 return subset & rs
1002
1002
1003 @predicate('all()', safe=True)
1003 @predicate('all()', safe=True)
1004 def getall(repo, subset, x):
1004 def getall(repo, subset, x):
1005 """All changesets, the same as ``0:tip``.
1005 """All changesets, the same as ``0:tip``.
1006 """
1006 """
1007 # i18n: "all" is a keyword
1007 # i18n: "all" is a keyword
1008 getargs(x, 0, 0, _("all takes no arguments"))
1008 getargs(x, 0, 0, _("all takes no arguments"))
1009 return subset & spanset(repo) # drop "null" if any
1009 return subset & spanset(repo) # drop "null" if any
1010
1010
1011 @predicate('grep(regex)', weight=10)
1011 @predicate('grep(regex)', weight=10)
1012 def grep(repo, subset, x):
1012 def grep(repo, subset, x):
1013 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1013 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1014 to ensure special escape characters are handled correctly. Unlike
1014 to ensure special escape characters are handled correctly. Unlike
1015 ``keyword(string)``, the match is case-sensitive.
1015 ``keyword(string)``, the match is case-sensitive.
1016 """
1016 """
1017 try:
1017 try:
1018 # i18n: "grep" is a keyword
1018 # i18n: "grep" is a keyword
1019 gr = re.compile(getstring(x, _("grep requires a string")))
1019 gr = re.compile(getstring(x, _("grep requires a string")))
1020 except re.error as e:
1020 except re.error as e:
1021 raise error.ParseError(
1021 raise error.ParseError(
1022 _('invalid match pattern: %s') % stringutil.forcebytestr(e))
1022 _('invalid match pattern: %s') % stringutil.forcebytestr(e))
1023
1023
1024 def matches(x):
1024 def matches(x):
1025 c = repo[x]
1025 c = repo[x]
1026 for e in c.files() + [c.user(), c.description()]:
1026 for e in c.files() + [c.user(), c.description()]:
1027 if gr.search(e):
1027 if gr.search(e):
1028 return True
1028 return True
1029 return False
1029 return False
1030
1030
1031 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1031 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1032
1032
1033 @predicate('_matchfiles', safe=True)
1033 @predicate('_matchfiles', safe=True)
1034 def _matchfiles(repo, subset, x):
1034 def _matchfiles(repo, subset, x):
1035 # _matchfiles takes a revset list of prefixed arguments:
1035 # _matchfiles takes a revset list of prefixed arguments:
1036 #
1036 #
1037 # [p:foo, i:bar, x:baz]
1037 # [p:foo, i:bar, x:baz]
1038 #
1038 #
1039 # builds a match object from them and filters subset. Allowed
1039 # builds a match object from them and filters subset. Allowed
1040 # prefixes are 'p:' for regular patterns, 'i:' for include
1040 # prefixes are 'p:' for regular patterns, 'i:' for include
1041 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1041 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1042 # a revision identifier, or the empty string to reference the
1042 # a revision identifier, or the empty string to reference the
1043 # working directory, from which the match object is
1043 # working directory, from which the match object is
1044 # initialized. Use 'd:' to set the default matching mode, default
1044 # initialized. Use 'd:' to set the default matching mode, default
1045 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1045 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1046
1046
1047 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1047 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1048 pats, inc, exc = [], [], []
1048 pats, inc, exc = [], [], []
1049 rev, default = None, None
1049 rev, default = None, None
1050 for arg in l:
1050 for arg in l:
1051 s = getstring(arg, "_matchfiles requires string arguments")
1051 s = getstring(arg, "_matchfiles requires string arguments")
1052 prefix, value = s[:2], s[2:]
1052 prefix, value = s[:2], s[2:]
1053 if prefix == 'p:':
1053 if prefix == 'p:':
1054 pats.append(value)
1054 pats.append(value)
1055 elif prefix == 'i:':
1055 elif prefix == 'i:':
1056 inc.append(value)
1056 inc.append(value)
1057 elif prefix == 'x:':
1057 elif prefix == 'x:':
1058 exc.append(value)
1058 exc.append(value)
1059 elif prefix == 'r:':
1059 elif prefix == 'r:':
1060 if rev is not None:
1060 if rev is not None:
1061 raise error.ParseError('_matchfiles expected at most one '
1061 raise error.ParseError('_matchfiles expected at most one '
1062 'revision')
1062 'revision')
1063 if value == '': # empty means working directory
1063 if value == '': # empty means working directory
1064 rev = node.wdirrev
1064 rev = node.wdirrev
1065 else:
1065 else:
1066 rev = value
1066 rev = value
1067 elif prefix == 'd:':
1067 elif prefix == 'd:':
1068 if default is not None:
1068 if default is not None:
1069 raise error.ParseError('_matchfiles expected at most one '
1069 raise error.ParseError('_matchfiles expected at most one '
1070 'default mode')
1070 'default mode')
1071 default = value
1071 default = value
1072 else:
1072 else:
1073 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1073 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1074 if not default:
1074 if not default:
1075 default = 'glob'
1075 default = 'glob'
1076 hasset = any(matchmod.patkind(p) == 'set' for p in pats + inc + exc)
1076 hasset = any(matchmod.patkind(p) == 'set' for p in pats + inc + exc)
1077
1077
1078 mcache = [None]
1078 mcache = [None]
1079
1079
1080 # This directly read the changelog data as creating changectx for all
1080 # This directly read the changelog data as creating changectx for all
1081 # revisions is quite expensive.
1081 # revisions is quite expensive.
1082 getfiles = repo.changelog.readfiles
1082 getfiles = repo.changelog.readfiles
1083 wdirrev = node.wdirrev
1083 wdirrev = node.wdirrev
1084 def matches(x):
1084 def matches(x):
1085 if x == wdirrev:
1085 if x == wdirrev:
1086 files = repo[x].files()
1086 files = repo[x].files()
1087 else:
1087 else:
1088 files = getfiles(x)
1088 files = getfiles(x)
1089
1089
1090 if not mcache[0] or (hasset and rev is None):
1090 if not mcache[0] or (hasset and rev is None):
1091 r = x if rev is None else rev
1091 r = x if rev is None else rev
1092 mcache[0] = matchmod.match(repo.root, repo.getcwd(), pats,
1092 mcache[0] = matchmod.match(repo.root, repo.getcwd(), pats,
1093 include=inc, exclude=exc, ctx=repo[r],
1093 include=inc, exclude=exc, ctx=repo[r],
1094 default=default)
1094 default=default)
1095 m = mcache[0]
1095 m = mcache[0]
1096
1096
1097 for f in files:
1097 for f in files:
1098 if m(f):
1098 if m(f):
1099 return True
1099 return True
1100 return False
1100 return False
1101
1101
1102 return subset.filter(matches,
1102 return subset.filter(matches,
1103 condrepr=('<matchfiles patterns=%r, include=%r '
1103 condrepr=('<matchfiles patterns=%r, include=%r '
1104 'exclude=%r, default=%r, rev=%r>',
1104 'exclude=%r, default=%r, rev=%r>',
1105 pats, inc, exc, default, rev))
1105 pats, inc, exc, default, rev))
1106
1106
1107 @predicate('file(pattern)', safe=True, weight=10)
1107 @predicate('file(pattern)', safe=True, weight=10)
1108 def hasfile(repo, subset, x):
1108 def hasfile(repo, subset, x):
1109 """Changesets affecting files matched by pattern.
1109 """Changesets affecting files matched by pattern.
1110
1110
1111 For a faster but less accurate result, consider using ``filelog()``
1111 For a faster but less accurate result, consider using ``filelog()``
1112 instead.
1112 instead.
1113
1113
1114 This predicate uses ``glob:`` as the default kind of pattern.
1114 This predicate uses ``glob:`` as the default kind of pattern.
1115 """
1115 """
1116 # i18n: "file" is a keyword
1116 # i18n: "file" is a keyword
1117 pat = getstring(x, _("file requires a pattern"))
1117 pat = getstring(x, _("file requires a pattern"))
1118 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1118 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1119
1119
1120 @predicate('head()', safe=True)
1120 @predicate('head()', safe=True)
1121 def head(repo, subset, x):
1121 def head(repo, subset, x):
1122 """Changeset is a named branch head.
1122 """Changeset is a named branch head.
1123 """
1123 """
1124 # i18n: "head" is a keyword
1124 # i18n: "head" is a keyword
1125 getargs(x, 0, 0, _("head takes no arguments"))
1125 getargs(x, 0, 0, _("head takes no arguments"))
1126 hs = set()
1126 hs = set()
1127 cl = repo.changelog
1127 cl = repo.changelog
1128 for ls in repo.branchmap().itervalues():
1128 for ls in repo.branchmap().itervalues():
1129 hs.update(cl.rev(h) for h in ls)
1129 hs.update(cl.rev(h) for h in ls)
1130 return subset & baseset(hs)
1130 return subset & baseset(hs)
1131
1131
1132 @predicate('heads(set)', safe=True)
1132 @predicate('heads(set)', safe=True)
1133 def heads(repo, subset, x):
1133 def heads(repo, subset, x):
1134 """Members of set with no children in set.
1134 """Members of set with no children in set.
1135 """
1135 """
1136 s = getset(repo, subset, x)
1136 s = getset(repo, subset, x)
1137 ps = parents(repo, subset, x)
1137 ps = parents(repo, subset, x)
1138 return s - ps
1138 return s - ps
1139
1139
1140 @predicate('hidden()', safe=True)
1140 @predicate('hidden()', safe=True)
1141 def hidden(repo, subset, x):
1141 def hidden(repo, subset, x):
1142 """Hidden changesets.
1142 """Hidden changesets.
1143 """
1143 """
1144 # i18n: "hidden" is a keyword
1144 # i18n: "hidden" is a keyword
1145 getargs(x, 0, 0, _("hidden takes no arguments"))
1145 getargs(x, 0, 0, _("hidden takes no arguments"))
1146 hiddenrevs = repoview.filterrevs(repo, 'visible')
1146 hiddenrevs = repoview.filterrevs(repo, 'visible')
1147 return subset & hiddenrevs
1147 return subset & hiddenrevs
1148
1148
1149 @predicate('keyword(string)', safe=True, weight=10)
1149 @predicate('keyword(string)', safe=True, weight=10)
1150 def keyword(repo, subset, x):
1150 def keyword(repo, subset, x):
1151 """Search commit message, user name, and names of changed files for
1151 """Search commit message, user name, and names of changed files for
1152 string. The match is case-insensitive.
1152 string. The match is case-insensitive.
1153
1153
1154 For a regular expression or case sensitive search of these fields, use
1154 For a regular expression or case sensitive search of these fields, use
1155 ``grep(regex)``.
1155 ``grep(regex)``.
1156 """
1156 """
1157 # i18n: "keyword" is a keyword
1157 # i18n: "keyword" is a keyword
1158 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1158 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1159
1159
1160 def matches(r):
1160 def matches(r):
1161 c = repo[r]
1161 c = repo[r]
1162 return any(kw in encoding.lower(t)
1162 return any(kw in encoding.lower(t)
1163 for t in c.files() + [c.user(), c.description()])
1163 for t in c.files() + [c.user(), c.description()])
1164
1164
1165 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1165 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1166
1166
1167 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1167 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1168 def limit(repo, subset, x, order):
1168 def limit(repo, subset, x, order):
1169 """First n members of set, defaulting to 1, starting from offset.
1169 """First n members of set, defaulting to 1, starting from offset.
1170 """
1170 """
1171 args = getargsdict(x, 'limit', 'set n offset')
1171 args = getargsdict(x, 'limit', 'set n offset')
1172 if 'set' not in args:
1172 if 'set' not in args:
1173 # i18n: "limit" is a keyword
1173 # i18n: "limit" is a keyword
1174 raise error.ParseError(_("limit requires one to three arguments"))
1174 raise error.ParseError(_("limit requires one to three arguments"))
1175 # i18n: "limit" is a keyword
1175 # i18n: "limit" is a keyword
1176 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1176 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1177 if lim < 0:
1177 if lim < 0:
1178 raise error.ParseError(_("negative number to select"))
1178 raise error.ParseError(_("negative number to select"))
1179 # i18n: "limit" is a keyword
1179 # i18n: "limit" is a keyword
1180 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1180 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1181 if ofs < 0:
1181 if ofs < 0:
1182 raise error.ParseError(_("negative offset"))
1182 raise error.ParseError(_("negative offset"))
1183 os = getset(repo, fullreposet(repo), args['set'])
1183 os = getset(repo, fullreposet(repo), args['set'])
1184 ls = os.slice(ofs, ofs + lim)
1184 ls = os.slice(ofs, ofs + lim)
1185 if order == followorder and lim > 1:
1185 if order == followorder and lim > 1:
1186 return subset & ls
1186 return subset & ls
1187 return ls & subset
1187 return ls & subset
1188
1188
1189 @predicate('last(set, [n])', safe=True, takeorder=True)
1189 @predicate('last(set, [n])', safe=True, takeorder=True)
1190 def last(repo, subset, x, order):
1190 def last(repo, subset, x, order):
1191 """Last n members of set, defaulting to 1.
1191 """Last n members of set, defaulting to 1.
1192 """
1192 """
1193 # i18n: "last" is a keyword
1193 # i18n: "last" is a keyword
1194 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1194 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1195 lim = 1
1195 lim = 1
1196 if len(l) == 2:
1196 if len(l) == 2:
1197 # i18n: "last" is a keyword
1197 # i18n: "last" is a keyword
1198 lim = getinteger(l[1], _("last expects a number"))
1198 lim = getinteger(l[1], _("last expects a number"))
1199 if lim < 0:
1199 if lim < 0:
1200 raise error.ParseError(_("negative number to select"))
1200 raise error.ParseError(_("negative number to select"))
1201 os = getset(repo, fullreposet(repo), l[0])
1201 os = getset(repo, fullreposet(repo), l[0])
1202 os.reverse()
1202 os.reverse()
1203 ls = os.slice(0, lim)
1203 ls = os.slice(0, lim)
1204 if order == followorder and lim > 1:
1204 if order == followorder and lim > 1:
1205 return subset & ls
1205 return subset & ls
1206 ls.reverse()
1206 ls.reverse()
1207 return ls & subset
1207 return ls & subset
1208
1208
1209 @predicate('max(set)', safe=True)
1209 @predicate('max(set)', safe=True)
1210 def maxrev(repo, subset, x):
1210 def maxrev(repo, subset, x):
1211 """Changeset with highest revision number in set.
1211 """Changeset with highest revision number in set.
1212 """
1212 """
1213 os = getset(repo, fullreposet(repo), x)
1213 os = getset(repo, fullreposet(repo), x)
1214 try:
1214 try:
1215 m = os.max()
1215 m = os.max()
1216 if m in subset:
1216 if m in subset:
1217 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1217 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1218 except ValueError:
1218 except ValueError:
1219 # os.max() throws a ValueError when the collection is empty.
1219 # os.max() throws a ValueError when the collection is empty.
1220 # Same as python's max().
1220 # Same as python's max().
1221 pass
1221 pass
1222 return baseset(datarepr=('<max %r, %r>', subset, os))
1222 return baseset(datarepr=('<max %r, %r>', subset, os))
1223
1223
1224 @predicate('merge()', safe=True)
1224 @predicate('merge()', safe=True)
1225 def merge(repo, subset, x):
1225 def merge(repo, subset, x):
1226 """Changeset is a merge changeset.
1226 """Changeset is a merge changeset.
1227 """
1227 """
1228 # i18n: "merge" is a keyword
1228 # i18n: "merge" is a keyword
1229 getargs(x, 0, 0, _("merge takes no arguments"))
1229 getargs(x, 0, 0, _("merge takes no arguments"))
1230 cl = repo.changelog
1230 cl = repo.changelog
1231 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1231 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1232 condrepr='<merge>')
1232 condrepr='<merge>')
1233
1233
1234 @predicate('branchpoint()', safe=True)
1234 @predicate('branchpoint()', safe=True)
1235 def branchpoint(repo, subset, x):
1235 def branchpoint(repo, subset, x):
1236 """Changesets with more than one child.
1236 """Changesets with more than one child.
1237 """
1237 """
1238 # i18n: "branchpoint" is a keyword
1238 # i18n: "branchpoint" is a keyword
1239 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1239 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1240 cl = repo.changelog
1240 cl = repo.changelog
1241 if not subset:
1241 if not subset:
1242 return baseset()
1242 return baseset()
1243 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1243 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1244 # (and if it is not, it should.)
1244 # (and if it is not, it should.)
1245 baserev = min(subset)
1245 baserev = min(subset)
1246 parentscount = [0]*(len(repo) - baserev)
1246 parentscount = [0]*(len(repo) - baserev)
1247 for r in cl.revs(start=baserev + 1):
1247 for r in cl.revs(start=baserev + 1):
1248 for p in cl.parentrevs(r):
1248 for p in cl.parentrevs(r):
1249 if p >= baserev:
1249 if p >= baserev:
1250 parentscount[p - baserev] += 1
1250 parentscount[p - baserev] += 1
1251 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1251 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1252 condrepr='<branchpoint>')
1252 condrepr='<branchpoint>')
1253
1253
1254 @predicate('min(set)', safe=True)
1254 @predicate('min(set)', safe=True)
1255 def minrev(repo, subset, x):
1255 def minrev(repo, subset, x):
1256 """Changeset with lowest revision number in set.
1256 """Changeset with lowest revision number in set.
1257 """
1257 """
1258 os = getset(repo, fullreposet(repo), x)
1258 os = getset(repo, fullreposet(repo), x)
1259 try:
1259 try:
1260 m = os.min()
1260 m = os.min()
1261 if m in subset:
1261 if m in subset:
1262 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1262 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1263 except ValueError:
1263 except ValueError:
1264 # os.min() throws a ValueError when the collection is empty.
1264 # os.min() throws a ValueError when the collection is empty.
1265 # Same as python's min().
1265 # Same as python's min().
1266 pass
1266 pass
1267 return baseset(datarepr=('<min %r, %r>', subset, os))
1267 return baseset(datarepr=('<min %r, %r>', subset, os))
1268
1268
1269 @predicate('modifies(pattern)', safe=True, weight=30)
1269 @predicate('modifies(pattern)', safe=True, weight=30)
1270 def modifies(repo, subset, x):
1270 def modifies(repo, subset, x):
1271 """Changesets modifying files matched by pattern.
1271 """Changesets modifying files matched by pattern.
1272
1272
1273 The pattern without explicit kind like ``glob:`` is expected to be
1273 The pattern without explicit kind like ``glob:`` is expected to be
1274 relative to the current directory and match against a file or a
1274 relative to the current directory and match against a file or a
1275 directory.
1275 directory.
1276 """
1276 """
1277 # i18n: "modifies" is a keyword
1277 # i18n: "modifies" is a keyword
1278 pat = getstring(x, _("modifies requires a pattern"))
1278 pat = getstring(x, _("modifies requires a pattern"))
1279 return checkstatus(repo, subset, pat, 0)
1279 return checkstatus(repo, subset, pat, 0)
1280
1280
1281 @predicate('named(namespace)')
1281 @predicate('named(namespace)')
1282 def named(repo, subset, x):
1282 def named(repo, subset, x):
1283 """The changesets in a given namespace.
1283 """The changesets in a given namespace.
1284
1284
1285 Pattern matching is supported for `namespace`. See
1285 Pattern matching is supported for `namespace`. See
1286 :hg:`help revisions.patterns`.
1286 :hg:`help revisions.patterns`.
1287 """
1287 """
1288 # i18n: "named" is a keyword
1288 # i18n: "named" is a keyword
1289 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1289 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1290
1290
1291 ns = getstring(args[0],
1291 ns = getstring(args[0],
1292 # i18n: "named" is a keyword
1292 # i18n: "named" is a keyword
1293 _('the argument to named must be a string'))
1293 _('the argument to named must be a string'))
1294 kind, pattern, matcher = stringutil.stringmatcher(ns)
1294 kind, pattern, matcher = stringutil.stringmatcher(ns)
1295 namespaces = set()
1295 namespaces = set()
1296 if kind == 'literal':
1296 if kind == 'literal':
1297 if pattern not in repo.names:
1297 if pattern not in repo.names:
1298 raise error.RepoLookupError(_("namespace '%s' does not exist")
1298 raise error.RepoLookupError(_("namespace '%s' does not exist")
1299 % ns)
1299 % ns)
1300 namespaces.add(repo.names[pattern])
1300 namespaces.add(repo.names[pattern])
1301 else:
1301 else:
1302 for name, ns in repo.names.iteritems():
1302 for name, ns in repo.names.iteritems():
1303 if matcher(name):
1303 if matcher(name):
1304 namespaces.add(ns)
1304 namespaces.add(ns)
1305 if not namespaces:
1305 if not namespaces:
1306 raise error.RepoLookupError(_("no namespace exists"
1306 raise error.RepoLookupError(_("no namespace exists"
1307 " that match '%s'") % pattern)
1307 " that match '%s'") % pattern)
1308
1308
1309 names = set()
1309 names = set()
1310 for ns in namespaces:
1310 for ns in namespaces:
1311 for name in ns.listnames(repo):
1311 for name in ns.listnames(repo):
1312 if name not in ns.deprecated:
1312 if name not in ns.deprecated:
1313 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1313 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1314
1314
1315 names -= {node.nullrev}
1315 names -= {node.nullrev}
1316 return subset & names
1316 return subset & names
1317
1317
1318 @predicate('id(string)', safe=True)
1318 @predicate('id(string)', safe=True)
1319 def node_(repo, subset, x):
1319 def node_(repo, subset, x):
1320 """Revision non-ambiguously specified by the given hex string prefix.
1320 """Revision non-ambiguously specified by the given hex string prefix.
1321 """
1321 """
1322 # i18n: "id" is a keyword
1322 # i18n: "id" is a keyword
1323 l = getargs(x, 1, 1, _("id requires one argument"))
1323 l = getargs(x, 1, 1, _("id requires one argument"))
1324 # i18n: "id" is a keyword
1324 # i18n: "id" is a keyword
1325 n = getstring(l[0], _("id requires a string"))
1325 n = getstring(l[0], _("id requires a string"))
1326 if len(n) == 40:
1326 if len(n) == 40:
1327 try:
1327 try:
1328 rn = repo.changelog.rev(node.bin(n))
1328 rn = repo.changelog.rev(node.bin(n))
1329 except error.WdirUnsupported:
1329 except error.WdirUnsupported:
1330 rn = node.wdirrev
1330 rn = node.wdirrev
1331 except (LookupError, TypeError):
1331 except (LookupError, TypeError):
1332 rn = None
1332 rn = None
1333 else:
1333 else:
1334 rn = None
1334 rn = None
1335 try:
1335 try:
1336 pm = repo.changelog._partialmatch(n)
1336 pm = repo.changelog._partialmatch(n)
1337 if pm is not None:
1337 if pm is not None:
1338 rn = repo.changelog.rev(pm)
1338 rn = repo.changelog.rev(pm)
1339 except error.WdirUnsupported:
1339 except error.WdirUnsupported:
1340 rn = node.wdirrev
1340 rn = node.wdirrev
1341
1341
1342 if rn is None:
1342 if rn is None:
1343 return baseset()
1343 return baseset()
1344 result = baseset([rn])
1344 result = baseset([rn])
1345 return result & subset
1345 return result & subset
1346
1346
1347 @predicate('obsolete()', safe=True)
1347 @predicate('obsolete()', safe=True)
1348 def obsolete(repo, subset, x):
1348 def obsolete(repo, subset, x):
1349 """Mutable changeset with a newer version."""
1349 """Mutable changeset with a newer version."""
1350 # i18n: "obsolete" is a keyword
1350 # i18n: "obsolete" is a keyword
1351 getargs(x, 0, 0, _("obsolete takes no arguments"))
1351 getargs(x, 0, 0, _("obsolete takes no arguments"))
1352 obsoletes = obsmod.getrevs(repo, 'obsolete')
1352 obsoletes = obsmod.getrevs(repo, 'obsolete')
1353 return subset & obsoletes
1353 return subset & obsoletes
1354
1354
1355 @predicate('only(set, [set])', safe=True)
1355 @predicate('only(set, [set])', safe=True)
1356 def only(repo, subset, x):
1356 def only(repo, subset, x):
1357 """Changesets that are ancestors of the first set that are not ancestors
1357 """Changesets that are ancestors of the first set that are not ancestors
1358 of any other head in the repo. If a second set is specified, the result
1358 of any other head in the repo. If a second set is specified, the result
1359 is ancestors of the first set that are not ancestors of the second set
1359 is ancestors of the first set that are not ancestors of the second set
1360 (i.e. ::<set1> - ::<set2>).
1360 (i.e. ::<set1> - ::<set2>).
1361 """
1361 """
1362 cl = repo.changelog
1362 cl = repo.changelog
1363 # i18n: "only" is a keyword
1363 # i18n: "only" is a keyword
1364 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1364 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1365 include = getset(repo, fullreposet(repo), args[0])
1365 include = getset(repo, fullreposet(repo), args[0])
1366 if len(args) == 1:
1366 if len(args) == 1:
1367 if not include:
1367 if not include:
1368 return baseset()
1368 return baseset()
1369
1369
1370 descendants = set(dagop.revdescendants(repo, include, False))
1370 descendants = set(dagop.revdescendants(repo, include, False))
1371 exclude = [rev for rev in cl.headrevs()
1371 exclude = [rev for rev in cl.headrevs()
1372 if not rev in descendants and not rev in include]
1372 if not rev in descendants and not rev in include]
1373 else:
1373 else:
1374 exclude = getset(repo, fullreposet(repo), args[1])
1374 exclude = getset(repo, fullreposet(repo), args[1])
1375
1375
1376 results = set(cl.findmissingrevs(common=exclude, heads=include))
1376 results = set(cl.findmissingrevs(common=exclude, heads=include))
1377 # XXX we should turn this into a baseset instead of a set, smartset may do
1377 # XXX we should turn this into a baseset instead of a set, smartset may do
1378 # some optimizations from the fact this is a baseset.
1378 # some optimizations from the fact this is a baseset.
1379 return subset & results
1379 return subset & results
1380
1380
1381 @predicate('origin([set])', safe=True)
1381 @predicate('origin([set])', safe=True)
1382 def origin(repo, subset, x):
1382 def origin(repo, subset, x):
1383 """
1383 """
1384 Changesets that were specified as a source for the grafts, transplants or
1384 Changesets that were specified as a source for the grafts, transplants or
1385 rebases that created the given revisions. Omitting the optional set is the
1385 rebases that created the given revisions. Omitting the optional set is the
1386 same as passing all(). If a changeset created by these operations is itself
1386 same as passing all(). If a changeset created by these operations is itself
1387 specified as a source for one of these operations, only the source changeset
1387 specified as a source for one of these operations, only the source changeset
1388 for the first operation is selected.
1388 for the first operation is selected.
1389 """
1389 """
1390 if x is not None:
1390 if x is not None:
1391 dests = getset(repo, fullreposet(repo), x)
1391 dests = getset(repo, fullreposet(repo), x)
1392 else:
1392 else:
1393 dests = fullreposet(repo)
1393 dests = fullreposet(repo)
1394
1394
1395 def _firstsrc(rev):
1395 def _firstsrc(rev):
1396 src = _getrevsource(repo, rev)
1396 src = _getrevsource(repo, rev)
1397 if src is None:
1397 if src is None:
1398 return None
1398 return None
1399
1399
1400 while True:
1400 while True:
1401 prev = _getrevsource(repo, src)
1401 prev = _getrevsource(repo, src)
1402
1402
1403 if prev is None:
1403 if prev is None:
1404 return src
1404 return src
1405 src = prev
1405 src = prev
1406
1406
1407 o = {_firstsrc(r) for r in dests}
1407 o = {_firstsrc(r) for r in dests}
1408 o -= {None}
1408 o -= {None}
1409 # XXX we should turn this into a baseset instead of a set, smartset may do
1409 # XXX we should turn this into a baseset instead of a set, smartset may do
1410 # some optimizations from the fact this is a baseset.
1410 # some optimizations from the fact this is a baseset.
1411 return subset & o
1411 return subset & o
1412
1412
1413 @predicate('outgoing([path])', safe=False, weight=10)
1413 @predicate('outgoing([path])', safe=False, weight=10)
1414 def outgoing(repo, subset, x):
1414 def outgoing(repo, subset, x):
1415 """Changesets not found in the specified destination repository, or the
1415 """Changesets not found in the specified destination repository, or the
1416 default push location.
1416 default push location.
1417 """
1417 """
1418 # Avoid cycles.
1418 # Avoid cycles.
1419 from . import (
1419 from . import (
1420 discovery,
1420 discovery,
1421 hg,
1421 hg,
1422 )
1422 )
1423 # i18n: "outgoing" is a keyword
1423 # i18n: "outgoing" is a keyword
1424 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1424 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1425 # i18n: "outgoing" is a keyword
1425 # i18n: "outgoing" is a keyword
1426 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1426 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1427 if not dest:
1427 if not dest:
1428 # ui.paths.getpath() explicitly tests for None, not just a boolean
1428 # ui.paths.getpath() explicitly tests for None, not just a boolean
1429 dest = None
1429 dest = None
1430 path = repo.ui.paths.getpath(dest, default=('default-push', 'default'))
1430 path = repo.ui.paths.getpath(dest, default=('default-push', 'default'))
1431 if not path:
1431 if not path:
1432 raise error.Abort(_('default repository not configured!'),
1432 raise error.Abort(_('default repository not configured!'),
1433 hint=_("see 'hg help config.paths'"))
1433 hint=_("see 'hg help config.paths'"))
1434 dest = path.pushloc or path.loc
1434 dest = path.pushloc or path.loc
1435 branches = path.branch, []
1435 branches = path.branch, []
1436
1436
1437 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1437 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1438 if revs:
1438 if revs:
1439 revs = [repo.lookup(rev) for rev in revs]
1439 revs = [repo.lookup(rev) for rev in revs]
1440 other = hg.peer(repo, {}, dest)
1440 other = hg.peer(repo, {}, dest)
1441 repo.ui.pushbuffer()
1441 repo.ui.pushbuffer()
1442 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1442 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1443 repo.ui.popbuffer()
1443 repo.ui.popbuffer()
1444 cl = repo.changelog
1444 cl = repo.changelog
1445 o = {cl.rev(r) for r in outgoing.missing}
1445 o = {cl.rev(r) for r in outgoing.missing}
1446 return subset & o
1446 return subset & o
1447
1447
1448 @predicate('p1([set])', safe=True)
1448 @predicate('p1([set])', safe=True)
1449 def p1(repo, subset, x):
1449 def p1(repo, subset, x):
1450 """First parent of changesets in set, or the working directory.
1450 """First parent of changesets in set, or the working directory.
1451 """
1451 """
1452 if x is None:
1452 if x is None:
1453 p = repo[x].p1().rev()
1453 p = repo[x].p1().rev()
1454 if p >= 0:
1454 if p >= 0:
1455 return subset & baseset([p])
1455 return subset & baseset([p])
1456 return baseset()
1456 return baseset()
1457
1457
1458 ps = set()
1458 ps = set()
1459 cl = repo.changelog
1459 cl = repo.changelog
1460 for r in getset(repo, fullreposet(repo), x):
1460 for r in getset(repo, fullreposet(repo), x):
1461 try:
1461 try:
1462 ps.add(cl.parentrevs(r)[0])
1462 ps.add(cl.parentrevs(r)[0])
1463 except error.WdirUnsupported:
1463 except error.WdirUnsupported:
1464 ps.add(repo[r].parents()[0].rev())
1464 ps.add(repo[r].parents()[0].rev())
1465 ps -= {node.nullrev}
1465 ps -= {node.nullrev}
1466 # XXX we should turn this into a baseset instead of a set, smartset may do
1466 # XXX we should turn this into a baseset instead of a set, smartset may do
1467 # some optimizations from the fact this is a baseset.
1467 # some optimizations from the fact this is a baseset.
1468 return subset & ps
1468 return subset & ps
1469
1469
1470 @predicate('p2([set])', safe=True)
1470 @predicate('p2([set])', safe=True)
1471 def p2(repo, subset, x):
1471 def p2(repo, subset, x):
1472 """Second parent of changesets in set, or the working directory.
1472 """Second parent of changesets in set, or the working directory.
1473 """
1473 """
1474 if x is None:
1474 if x is None:
1475 ps = repo[x].parents()
1475 ps = repo[x].parents()
1476 try:
1476 try:
1477 p = ps[1].rev()
1477 p = ps[1].rev()
1478 if p >= 0:
1478 if p >= 0:
1479 return subset & baseset([p])
1479 return subset & baseset([p])
1480 return baseset()
1480 return baseset()
1481 except IndexError:
1481 except IndexError:
1482 return baseset()
1482 return baseset()
1483
1483
1484 ps = set()
1484 ps = set()
1485 cl = repo.changelog
1485 cl = repo.changelog
1486 for r in getset(repo, fullreposet(repo), x):
1486 for r in getset(repo, fullreposet(repo), x):
1487 try:
1487 try:
1488 ps.add(cl.parentrevs(r)[1])
1488 ps.add(cl.parentrevs(r)[1])
1489 except error.WdirUnsupported:
1489 except error.WdirUnsupported:
1490 parents = repo[r].parents()
1490 parents = repo[r].parents()
1491 if len(parents) == 2:
1491 if len(parents) == 2:
1492 ps.add(parents[1])
1492 ps.add(parents[1])
1493 ps -= {node.nullrev}
1493 ps -= {node.nullrev}
1494 # XXX we should turn this into a baseset instead of a set, smartset may do
1494 # XXX we should turn this into a baseset instead of a set, smartset may do
1495 # some optimizations from the fact this is a baseset.
1495 # some optimizations from the fact this is a baseset.
1496 return subset & ps
1496 return subset & ps
1497
1497
1498 def parentpost(repo, subset, x, order):
1498 def parentpost(repo, subset, x, order):
1499 return p1(repo, subset, x)
1499 return p1(repo, subset, x)
1500
1500
1501 @predicate('parents([set])', safe=True)
1501 @predicate('parents([set])', safe=True)
1502 def parents(repo, subset, x):
1502 def parents(repo, subset, x):
1503 """
1503 """
1504 The set of all parents for all changesets in set, or the working directory.
1504 The set of all parents for all changesets in set, or the working directory.
1505 """
1505 """
1506 if x is None:
1506 if x is None:
1507 ps = set(p.rev() for p in repo[x].parents())
1507 ps = set(p.rev() for p in repo[x].parents())
1508 else:
1508 else:
1509 ps = set()
1509 ps = set()
1510 cl = repo.changelog
1510 cl = repo.changelog
1511 up = ps.update
1511 up = ps.update
1512 parentrevs = cl.parentrevs
1512 parentrevs = cl.parentrevs
1513 for r in getset(repo, fullreposet(repo), x):
1513 for r in getset(repo, fullreposet(repo), x):
1514 try:
1514 try:
1515 up(parentrevs(r))
1515 up(parentrevs(r))
1516 except error.WdirUnsupported:
1516 except error.WdirUnsupported:
1517 up(p.rev() for p in repo[r].parents())
1517 up(p.rev() for p in repo[r].parents())
1518 ps -= {node.nullrev}
1518 ps -= {node.nullrev}
1519 return subset & ps
1519 return subset & ps
1520
1520
1521 def _phase(repo, subset, *targets):
1521 def _phase(repo, subset, *targets):
1522 """helper to select all rev in <targets> phases"""
1522 """helper to select all rev in <targets> phases"""
1523 return repo._phasecache.getrevset(repo, targets, subset)
1523 return repo._phasecache.getrevset(repo, targets, subset)
1524
1524
1525 @predicate('draft()', safe=True)
1525 @predicate('draft()', safe=True)
1526 def draft(repo, subset, x):
1526 def draft(repo, subset, x):
1527 """Changeset in draft phase."""
1527 """Changeset in draft phase."""
1528 # i18n: "draft" is a keyword
1528 # i18n: "draft" is a keyword
1529 getargs(x, 0, 0, _("draft takes no arguments"))
1529 getargs(x, 0, 0, _("draft takes no arguments"))
1530 target = phases.draft
1530 target = phases.draft
1531 return _phase(repo, subset, target)
1531 return _phase(repo, subset, target)
1532
1532
1533 @predicate('secret()', safe=True)
1533 @predicate('secret()', safe=True)
1534 def secret(repo, subset, x):
1534 def secret(repo, subset, x):
1535 """Changeset in secret phase."""
1535 """Changeset in secret phase."""
1536 # i18n: "secret" is a keyword
1536 # i18n: "secret" is a keyword
1537 getargs(x, 0, 0, _("secret takes no arguments"))
1537 getargs(x, 0, 0, _("secret takes no arguments"))
1538 target = phases.secret
1538 target = phases.secret
1539 return _phase(repo, subset, target)
1539 return _phase(repo, subset, target)
1540
1540
1541 @predicate('stack([revs])', safe=True)
1541 @predicate('stack([revs])', safe=True)
1542 def stack(repo, subset, x):
1542 def stack(repo, subset, x):
1543 """Experimental revset for the stack of changesets or working directory
1543 """Experimental revset for the stack of changesets or working directory
1544 parent. (EXPERIMENTAL)
1544 parent. (EXPERIMENTAL)
1545 """
1545 """
1546 if x is None:
1546 if x is None:
1547 stacks = stackmod.getstack(repo, x)
1547 stacks = stackmod.getstack(repo, x)
1548 else:
1548 else:
1549 stacks = smartset.baseset([])
1549 stacks = smartset.baseset([])
1550 for revision in getset(repo, fullreposet(repo), x):
1550 for revision in getset(repo, fullreposet(repo), x):
1551 currentstack = stackmod.getstack(repo, revision)
1551 currentstack = stackmod.getstack(repo, revision)
1552 stacks = stacks + currentstack
1552 stacks = stacks + currentstack
1553
1553
1554 return subset & stacks
1554 return subset & stacks
1555
1555
1556 def parentspec(repo, subset, x, n, order):
1556 def parentspec(repo, subset, x, n, order):
1557 """``set^0``
1557 """``set^0``
1558 The set.
1558 The set.
1559 ``set^1`` (or ``set^``), ``set^2``
1559 ``set^1`` (or ``set^``), ``set^2``
1560 First or second parent, respectively, of all changesets in set.
1560 First or second parent, respectively, of all changesets in set.
1561 """
1561 """
1562 try:
1562 try:
1563 n = int(n[1])
1563 n = int(n[1])
1564 if n not in (0, 1, 2):
1564 if n not in (0, 1, 2):
1565 raise ValueError
1565 raise ValueError
1566 except (TypeError, ValueError):
1566 except (TypeError, ValueError):
1567 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1567 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1568 ps = set()
1568 ps = set()
1569 cl = repo.changelog
1569 cl = repo.changelog
1570 for r in getset(repo, fullreposet(repo), x):
1570 for r in getset(repo, fullreposet(repo), x):
1571 if n == 0:
1571 if n == 0:
1572 ps.add(r)
1572 ps.add(r)
1573 elif n == 1:
1573 elif n == 1:
1574 try:
1574 try:
1575 ps.add(cl.parentrevs(r)[0])
1575 ps.add(cl.parentrevs(r)[0])
1576 except error.WdirUnsupported:
1576 except error.WdirUnsupported:
1577 ps.add(repo[r].parents()[0].rev())
1577 ps.add(repo[r].parents()[0].rev())
1578 else:
1578 else:
1579 try:
1579 try:
1580 parents = cl.parentrevs(r)
1580 parents = cl.parentrevs(r)
1581 if parents[1] != node.nullrev:
1581 if parents[1] != node.nullrev:
1582 ps.add(parents[1])
1582 ps.add(parents[1])
1583 except error.WdirUnsupported:
1583 except error.WdirUnsupported:
1584 parents = repo[r].parents()
1584 parents = repo[r].parents()
1585 if len(parents) == 2:
1585 if len(parents) == 2:
1586 ps.add(parents[1].rev())
1586 ps.add(parents[1].rev())
1587 return subset & ps
1587 return subset & ps
1588
1588
1589 @predicate('present(set)', safe=True, takeorder=True)
1589 @predicate('present(set)', safe=True, takeorder=True)
1590 def present(repo, subset, x, order):
1590 def present(repo, subset, x, order):
1591 """An empty set, if any revision in set isn't found; otherwise,
1591 """An empty set, if any revision in set isn't found; otherwise,
1592 all revisions in set.
1592 all revisions in set.
1593
1593
1594 If any of specified revisions is not present in the local repository,
1594 If any of specified revisions is not present in the local repository,
1595 the query is normally aborted. But this predicate allows the query
1595 the query is normally aborted. But this predicate allows the query
1596 to continue even in such cases.
1596 to continue even in such cases.
1597 """
1597 """
1598 try:
1598 try:
1599 return getset(repo, subset, x, order)
1599 return getset(repo, subset, x, order)
1600 except error.RepoLookupError:
1600 except error.RepoLookupError:
1601 return baseset()
1601 return baseset()
1602
1602
1603 # for internal use
1603 # for internal use
1604 @predicate('_notpublic', safe=True)
1604 @predicate('_notpublic', safe=True)
1605 def _notpublic(repo, subset, x):
1605 def _notpublic(repo, subset, x):
1606 getargs(x, 0, 0, "_notpublic takes no arguments")
1606 getargs(x, 0, 0, "_notpublic takes no arguments")
1607 return _phase(repo, subset, phases.draft, phases.secret)
1607 return _phase(repo, subset, phases.draft, phases.secret)
1608
1608
1609 # for internal use
1609 # for internal use
1610 @predicate('_phaseandancestors(phasename, set)', safe=True)
1610 @predicate('_phaseandancestors(phasename, set)', safe=True)
1611 def _phaseandancestors(repo, subset, x):
1611 def _phaseandancestors(repo, subset, x):
1612 # equivalent to (phasename() & ancestors(set)) but more efficient
1612 # equivalent to (phasename() & ancestors(set)) but more efficient
1613 # phasename could be one of 'draft', 'secret', or '_notpublic'
1613 # phasename could be one of 'draft', 'secret', or '_notpublic'
1614 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1614 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1615 phasename = getsymbol(args[0])
1615 phasename = getsymbol(args[0])
1616 s = getset(repo, fullreposet(repo), args[1])
1616 s = getset(repo, fullreposet(repo), args[1])
1617
1617
1618 draft = phases.draft
1618 draft = phases.draft
1619 secret = phases.secret
1619 secret = phases.secret
1620 phasenamemap = {
1620 phasenamemap = {
1621 '_notpublic': draft,
1621 '_notpublic': draft,
1622 'draft': draft, # follow secret's ancestors
1622 'draft': draft, # follow secret's ancestors
1623 'secret': secret,
1623 'secret': secret,
1624 }
1624 }
1625 if phasename not in phasenamemap:
1625 if phasename not in phasenamemap:
1626 raise error.ParseError('%r is not a valid phasename' % phasename)
1626 raise error.ParseError('%r is not a valid phasename' % phasename)
1627
1627
1628 minimalphase = phasenamemap[phasename]
1628 minimalphase = phasenamemap[phasename]
1629 getphase = repo._phasecache.phase
1629 getphase = repo._phasecache.phase
1630
1630
1631 def cutfunc(rev):
1631 def cutfunc(rev):
1632 return getphase(repo, rev) < minimalphase
1632 return getphase(repo, rev) < minimalphase
1633
1633
1634 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1634 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1635
1635
1636 if phasename == 'draft': # need to remove secret changesets
1636 if phasename == 'draft': # need to remove secret changesets
1637 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1637 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1638 return subset & revs
1638 return subset & revs
1639
1639
1640 @predicate('public()', safe=True)
1640 @predicate('public()', safe=True)
1641 def public(repo, subset, x):
1641 def public(repo, subset, x):
1642 """Changeset in public phase."""
1642 """Changeset in public phase."""
1643 # i18n: "public" is a keyword
1643 # i18n: "public" is a keyword
1644 getargs(x, 0, 0, _("public takes no arguments"))
1644 getargs(x, 0, 0, _("public takes no arguments"))
1645 return _phase(repo, subset, phases.public)
1645 return _phase(repo, subset, phases.public)
1646
1646
1647 @predicate('remote([id [,path]])', safe=False)
1647 @predicate('remote([id [,path]])', safe=False)
1648 def remote(repo, subset, x):
1648 def remote(repo, subset, x):
1649 """Local revision that corresponds to the given identifier in a
1649 """Local revision that corresponds to the given identifier in a
1650 remote repository, if present. Here, the '.' identifier is a
1650 remote repository, if present. Here, the '.' identifier is a
1651 synonym for the current local branch.
1651 synonym for the current local branch.
1652 """
1652 """
1653
1653
1654 from . import hg # avoid start-up nasties
1654 from . import hg # avoid start-up nasties
1655 # i18n: "remote" is a keyword
1655 # i18n: "remote" is a keyword
1656 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1656 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1657
1657
1658 q = '.'
1658 q = '.'
1659 if len(l) > 0:
1659 if len(l) > 0:
1660 # i18n: "remote" is a keyword
1660 # i18n: "remote" is a keyword
1661 q = getstring(l[0], _("remote requires a string id"))
1661 q = getstring(l[0], _("remote requires a string id"))
1662 if q == '.':
1662 if q == '.':
1663 q = repo['.'].branch()
1663 q = repo['.'].branch()
1664
1664
1665 dest = ''
1665 dest = ''
1666 if len(l) > 1:
1666 if len(l) > 1:
1667 # i18n: "remote" is a keyword
1667 # i18n: "remote" is a keyword
1668 dest = getstring(l[1], _("remote requires a repository path"))
1668 dest = getstring(l[1], _("remote requires a repository path"))
1669 dest = repo.ui.expandpath(dest or 'default')
1669 dest = repo.ui.expandpath(dest or 'default')
1670 dest, branches = hg.parseurl(dest)
1670 dest, branches = hg.parseurl(dest)
1671 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1671 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1672 if revs:
1672 if revs:
1673 revs = [repo.lookup(rev) for rev in revs]
1673 revs = [repo.lookup(rev) for rev in revs]
1674 other = hg.peer(repo, {}, dest)
1674 other = hg.peer(repo, {}, dest)
1675 n = other.lookup(q)
1675 n = other.lookup(q)
1676 if n in repo:
1676 if n in repo:
1677 r = repo[n].rev()
1677 r = repo[n].rev()
1678 if r in subset:
1678 if r in subset:
1679 return baseset([r])
1679 return baseset([r])
1680 return baseset()
1680 return baseset()
1681
1681
1682 @predicate('removes(pattern)', safe=True, weight=30)
1682 @predicate('removes(pattern)', safe=True, weight=30)
1683 def removes(repo, subset, x):
1683 def removes(repo, subset, x):
1684 """Changesets which remove files matching pattern.
1684 """Changesets which remove files matching pattern.
1685
1685
1686 The pattern without explicit kind like ``glob:`` is expected to be
1686 The pattern without explicit kind like ``glob:`` is expected to be
1687 relative to the current directory and match against a file or a
1687 relative to the current directory and match against a file or a
1688 directory.
1688 directory.
1689 """
1689 """
1690 # i18n: "removes" is a keyword
1690 # i18n: "removes" is a keyword
1691 pat = getstring(x, _("removes requires a pattern"))
1691 pat = getstring(x, _("removes requires a pattern"))
1692 return checkstatus(repo, subset, pat, 2)
1692 return checkstatus(repo, subset, pat, 2)
1693
1693
1694 @predicate('rev(number)', safe=True)
1694 @predicate('rev(number)', safe=True)
1695 def rev(repo, subset, x):
1695 def rev(repo, subset, x):
1696 """Revision with the given numeric identifier.
1696 """Revision with the given numeric identifier.
1697 """
1697 """
1698 # i18n: "rev" is a keyword
1698 # i18n: "rev" is a keyword
1699 l = getargs(x, 1, 1, _("rev requires one argument"))
1699 l = getargs(x, 1, 1, _("rev requires one argument"))
1700 try:
1700 try:
1701 # i18n: "rev" is a keyword
1701 # i18n: "rev" is a keyword
1702 l = int(getstring(l[0], _("rev requires a number")))
1702 l = int(getstring(l[0], _("rev requires a number")))
1703 except (TypeError, ValueError):
1703 except (TypeError, ValueError):
1704 # i18n: "rev" is a keyword
1704 # i18n: "rev" is a keyword
1705 raise error.ParseError(_("rev expects a number"))
1705 raise error.ParseError(_("rev expects a number"))
1706 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1706 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1707 return baseset()
1707 return baseset()
1708 return subset & baseset([l])
1708 return subset & baseset([l])
1709
1709
1710 @predicate('matching(revision [, field])', safe=True)
1710 @predicate('matching(revision [, field])', safe=True)
1711 def matching(repo, subset, x):
1711 def matching(repo, subset, x):
1712 """Changesets in which a given set of fields match the set of fields in the
1712 """Changesets in which a given set of fields match the set of fields in the
1713 selected revision or set.
1713 selected revision or set.
1714
1714
1715 To match more than one field pass the list of fields to match separated
1715 To match more than one field pass the list of fields to match separated
1716 by spaces (e.g. ``author description``).
1716 by spaces (e.g. ``author description``).
1717
1717
1718 Valid fields are most regular revision fields and some special fields.
1718 Valid fields are most regular revision fields and some special fields.
1719
1719
1720 Regular revision fields are ``description``, ``author``, ``branch``,
1720 Regular revision fields are ``description``, ``author``, ``branch``,
1721 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1721 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1722 and ``diff``.
1722 and ``diff``.
1723 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1723 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1724 contents of the revision. Two revisions matching their ``diff`` will
1724 contents of the revision. Two revisions matching their ``diff`` will
1725 also match their ``files``.
1725 also match their ``files``.
1726
1726
1727 Special fields are ``summary`` and ``metadata``:
1727 Special fields are ``summary`` and ``metadata``:
1728 ``summary`` matches the first line of the description.
1728 ``summary`` matches the first line of the description.
1729 ``metadata`` is equivalent to matching ``description user date``
1729 ``metadata`` is equivalent to matching ``description user date``
1730 (i.e. it matches the main metadata fields).
1730 (i.e. it matches the main metadata fields).
1731
1731
1732 ``metadata`` is the default field which is used when no fields are
1732 ``metadata`` is the default field which is used when no fields are
1733 specified. You can match more than one field at a time.
1733 specified. You can match more than one field at a time.
1734 """
1734 """
1735 # i18n: "matching" is a keyword
1735 # i18n: "matching" is a keyword
1736 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1736 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1737
1737
1738 revs = getset(repo, fullreposet(repo), l[0])
1738 revs = getset(repo, fullreposet(repo), l[0])
1739
1739
1740 fieldlist = ['metadata']
1740 fieldlist = ['metadata']
1741 if len(l) > 1:
1741 if len(l) > 1:
1742 fieldlist = getstring(l[1],
1742 fieldlist = getstring(l[1],
1743 # i18n: "matching" is a keyword
1743 # i18n: "matching" is a keyword
1744 _("matching requires a string "
1744 _("matching requires a string "
1745 "as its second argument")).split()
1745 "as its second argument")).split()
1746
1746
1747 # Make sure that there are no repeated fields,
1747 # Make sure that there are no repeated fields,
1748 # expand the 'special' 'metadata' field type
1748 # expand the 'special' 'metadata' field type
1749 # and check the 'files' whenever we check the 'diff'
1749 # and check the 'files' whenever we check the 'diff'
1750 fields = []
1750 fields = []
1751 for field in fieldlist:
1751 for field in fieldlist:
1752 if field == 'metadata':
1752 if field == 'metadata':
1753 fields += ['user', 'description', 'date']
1753 fields += ['user', 'description', 'date']
1754 elif field == 'diff':
1754 elif field == 'diff':
1755 # a revision matching the diff must also match the files
1755 # a revision matching the diff must also match the files
1756 # since matching the diff is very costly, make sure to
1756 # since matching the diff is very costly, make sure to
1757 # also match the files first
1757 # also match the files first
1758 fields += ['files', 'diff']
1758 fields += ['files', 'diff']
1759 else:
1759 else:
1760 if field == 'author':
1760 if field == 'author':
1761 field = 'user'
1761 field = 'user'
1762 fields.append(field)
1762 fields.append(field)
1763 fields = set(fields)
1763 fields = set(fields)
1764 if 'summary' in fields and 'description' in fields:
1764 if 'summary' in fields and 'description' in fields:
1765 # If a revision matches its description it also matches its summary
1765 # If a revision matches its description it also matches its summary
1766 fields.discard('summary')
1766 fields.discard('summary')
1767
1767
1768 # We may want to match more than one field
1768 # We may want to match more than one field
1769 # Not all fields take the same amount of time to be matched
1769 # Not all fields take the same amount of time to be matched
1770 # Sort the selected fields in order of increasing matching cost
1770 # Sort the selected fields in order of increasing matching cost
1771 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1771 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1772 'files', 'description', 'substate', 'diff']
1772 'files', 'description', 'substate', 'diff']
1773 def fieldkeyfunc(f):
1773 def fieldkeyfunc(f):
1774 try:
1774 try:
1775 return fieldorder.index(f)
1775 return fieldorder.index(f)
1776 except ValueError:
1776 except ValueError:
1777 # assume an unknown field is very costly
1777 # assume an unknown field is very costly
1778 return len(fieldorder)
1778 return len(fieldorder)
1779 fields = list(fields)
1779 fields = list(fields)
1780 fields.sort(key=fieldkeyfunc)
1780 fields.sort(key=fieldkeyfunc)
1781
1781
1782 # Each field will be matched with its own "getfield" function
1782 # Each field will be matched with its own "getfield" function
1783 # which will be added to the getfieldfuncs array of functions
1783 # which will be added to the getfieldfuncs array of functions
1784 getfieldfuncs = []
1784 getfieldfuncs = []
1785 _funcs = {
1785 _funcs = {
1786 'user': lambda r: repo[r].user(),
1786 'user': lambda r: repo[r].user(),
1787 'branch': lambda r: repo[r].branch(),
1787 'branch': lambda r: repo[r].branch(),
1788 'date': lambda r: repo[r].date(),
1788 'date': lambda r: repo[r].date(),
1789 'description': lambda r: repo[r].description(),
1789 'description': lambda r: repo[r].description(),
1790 'files': lambda r: repo[r].files(),
1790 'files': lambda r: repo[r].files(),
1791 'parents': lambda r: repo[r].parents(),
1791 'parents': lambda r: repo[r].parents(),
1792 'phase': lambda r: repo[r].phase(),
1792 'phase': lambda r: repo[r].phase(),
1793 'substate': lambda r: repo[r].substate,
1793 'substate': lambda r: repo[r].substate,
1794 'summary': lambda r: repo[r].description().splitlines()[0],
1794 'summary': lambda r: repo[r].description().splitlines()[0],
1795 'diff': lambda r: list(repo[r].diff(git=True),)
1795 'diff': lambda r: list(repo[r].diff(git=True),)
1796 }
1796 }
1797 for info in fields:
1797 for info in fields:
1798 getfield = _funcs.get(info, None)
1798 getfield = _funcs.get(info, None)
1799 if getfield is None:
1799 if getfield is None:
1800 raise error.ParseError(
1800 raise error.ParseError(
1801 # i18n: "matching" is a keyword
1801 # i18n: "matching" is a keyword
1802 _("unexpected field name passed to matching: %s") % info)
1802 _("unexpected field name passed to matching: %s") % info)
1803 getfieldfuncs.append(getfield)
1803 getfieldfuncs.append(getfield)
1804 # convert the getfield array of functions into a "getinfo" function
1804 # convert the getfield array of functions into a "getinfo" function
1805 # which returns an array of field values (or a single value if there
1805 # which returns an array of field values (or a single value if there
1806 # is only one field to match)
1806 # is only one field to match)
1807 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1807 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1808
1808
1809 def matches(x):
1809 def matches(x):
1810 for rev in revs:
1810 for rev in revs:
1811 target = getinfo(rev)
1811 target = getinfo(rev)
1812 match = True
1812 match = True
1813 for n, f in enumerate(getfieldfuncs):
1813 for n, f in enumerate(getfieldfuncs):
1814 if target[n] != f(x):
1814 if target[n] != f(x):
1815 match = False
1815 match = False
1816 if match:
1816 if match:
1817 return True
1817 return True
1818 return False
1818 return False
1819
1819
1820 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1820 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1821
1821
1822 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1822 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1823 def reverse(repo, subset, x, order):
1823 def reverse(repo, subset, x, order):
1824 """Reverse order of set.
1824 """Reverse order of set.
1825 """
1825 """
1826 l = getset(repo, subset, x, order)
1826 l = getset(repo, subset, x, order)
1827 if order == defineorder:
1827 if order == defineorder:
1828 l.reverse()
1828 l.reverse()
1829 return l
1829 return l
1830
1830
1831 @predicate('roots(set)', safe=True)
1831 @predicate('roots(set)', safe=True)
1832 def roots(repo, subset, x):
1832 def roots(repo, subset, x):
1833 """Changesets in set with no parent changeset in set.
1833 """Changesets in set with no parent changeset in set.
1834 """
1834 """
1835 s = getset(repo, fullreposet(repo), x)
1835 s = getset(repo, fullreposet(repo), x)
1836 parents = repo.changelog.parentrevs
1836 parents = repo.changelog.parentrevs
1837 def filter(r):
1837 def filter(r):
1838 for p in parents(r):
1838 for p in parents(r):
1839 if 0 <= p and p in s:
1839 if 0 <= p and p in s:
1840 return False
1840 return False
1841 return True
1841 return True
1842 return subset & s.filter(filter, condrepr='<roots>')
1842 return subset & s.filter(filter, condrepr='<roots>')
1843
1843
1844 _sortkeyfuncs = {
1844 _sortkeyfuncs = {
1845 'rev': lambda c: c.rev(),
1845 'rev': lambda c: c.rev(),
1846 'branch': lambda c: c.branch(),
1846 'branch': lambda c: c.branch(),
1847 'desc': lambda c: c.description(),
1847 'desc': lambda c: c.description(),
1848 'user': lambda c: c.user(),
1848 'user': lambda c: c.user(),
1849 'author': lambda c: c.user(),
1849 'author': lambda c: c.user(),
1850 'date': lambda c: c.date()[0],
1850 'date': lambda c: c.date()[0],
1851 }
1851 }
1852
1852
1853 def _getsortargs(x):
1853 def _getsortargs(x):
1854 """Parse sort options into (set, [(key, reverse)], opts)"""
1854 """Parse sort options into (set, [(key, reverse)], opts)"""
1855 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1855 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1856 if 'set' not in args:
1856 if 'set' not in args:
1857 # i18n: "sort" is a keyword
1857 # i18n: "sort" is a keyword
1858 raise error.ParseError(_('sort requires one or two arguments'))
1858 raise error.ParseError(_('sort requires one or two arguments'))
1859 keys = "rev"
1859 keys = "rev"
1860 if 'keys' in args:
1860 if 'keys' in args:
1861 # i18n: "sort" is a keyword
1861 # i18n: "sort" is a keyword
1862 keys = getstring(args['keys'], _("sort spec must be a string"))
1862 keys = getstring(args['keys'], _("sort spec must be a string"))
1863
1863
1864 keyflags = []
1864 keyflags = []
1865 for k in keys.split():
1865 for k in keys.split():
1866 fk = k
1866 fk = k
1867 reverse = (k.startswith('-'))
1867 reverse = (k.startswith('-'))
1868 if reverse:
1868 if reverse:
1869 k = k[1:]
1869 k = k[1:]
1870 if k not in _sortkeyfuncs and k != 'topo':
1870 if k not in _sortkeyfuncs and k != 'topo':
1871 raise error.ParseError(
1871 raise error.ParseError(
1872 _("unknown sort key %r") % pycompat.bytestr(fk))
1872 _("unknown sort key %r") % pycompat.bytestr(fk))
1873 keyflags.append((k, reverse))
1873 keyflags.append((k, reverse))
1874
1874
1875 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1875 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1876 # i18n: "topo" is a keyword
1876 # i18n: "topo" is a keyword
1877 raise error.ParseError(_('topo sort order cannot be combined '
1877 raise error.ParseError(_('topo sort order cannot be combined '
1878 'with other sort keys'))
1878 'with other sort keys'))
1879
1879
1880 opts = {}
1880 opts = {}
1881 if 'topo.firstbranch' in args:
1881 if 'topo.firstbranch' in args:
1882 if any(k == 'topo' for k, reverse in keyflags):
1882 if any(k == 'topo' for k, reverse in keyflags):
1883 opts['topo.firstbranch'] = args['topo.firstbranch']
1883 opts['topo.firstbranch'] = args['topo.firstbranch']
1884 else:
1884 else:
1885 # i18n: "topo" and "topo.firstbranch" are keywords
1885 # i18n: "topo" and "topo.firstbranch" are keywords
1886 raise error.ParseError(_('topo.firstbranch can only be used '
1886 raise error.ParseError(_('topo.firstbranch can only be used '
1887 'when using the topo sort key'))
1887 'when using the topo sort key'))
1888
1888
1889 return args['set'], keyflags, opts
1889 return args['set'], keyflags, opts
1890
1890
1891 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
1891 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
1892 weight=10)
1892 weight=10)
1893 def sort(repo, subset, x, order):
1893 def sort(repo, subset, x, order):
1894 """Sort set by keys. The default sort order is ascending, specify a key
1894 """Sort set by keys. The default sort order is ascending, specify a key
1895 as ``-key`` to sort in descending order.
1895 as ``-key`` to sort in descending order.
1896
1896
1897 The keys can be:
1897 The keys can be:
1898
1898
1899 - ``rev`` for the revision number,
1899 - ``rev`` for the revision number,
1900 - ``branch`` for the branch name,
1900 - ``branch`` for the branch name,
1901 - ``desc`` for the commit message (description),
1901 - ``desc`` for the commit message (description),
1902 - ``user`` for user name (``author`` can be used as an alias),
1902 - ``user`` for user name (``author`` can be used as an alias),
1903 - ``date`` for the commit date
1903 - ``date`` for the commit date
1904 - ``topo`` for a reverse topographical sort
1904 - ``topo`` for a reverse topographical sort
1905
1905
1906 The ``topo`` sort order cannot be combined with other sort keys. This sort
1906 The ``topo`` sort order cannot be combined with other sort keys. This sort
1907 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1907 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1908 specifies what topographical branches to prioritize in the sort.
1908 specifies what topographical branches to prioritize in the sort.
1909
1909
1910 """
1910 """
1911 s, keyflags, opts = _getsortargs(x)
1911 s, keyflags, opts = _getsortargs(x)
1912 revs = getset(repo, subset, s, order)
1912 revs = getset(repo, subset, s, order)
1913
1913
1914 if not keyflags or order != defineorder:
1914 if not keyflags or order != defineorder:
1915 return revs
1915 return revs
1916 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1916 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1917 revs.sort(reverse=keyflags[0][1])
1917 revs.sort(reverse=keyflags[0][1])
1918 return revs
1918 return revs
1919 elif keyflags[0][0] == "topo":
1919 elif keyflags[0][0] == "topo":
1920 firstbranch = ()
1920 firstbranch = ()
1921 if 'topo.firstbranch' in opts:
1921 if 'topo.firstbranch' in opts:
1922 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1922 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1923 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1923 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1924 firstbranch),
1924 firstbranch),
1925 istopo=True)
1925 istopo=True)
1926 if keyflags[0][1]:
1926 if keyflags[0][1]:
1927 revs.reverse()
1927 revs.reverse()
1928 return revs
1928 return revs
1929
1929
1930 # sort() is guaranteed to be stable
1930 # sort() is guaranteed to be stable
1931 ctxs = [repo[r] for r in revs]
1931 ctxs = [repo[r] for r in revs]
1932 for k, reverse in reversed(keyflags):
1932 for k, reverse in reversed(keyflags):
1933 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1933 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1934 return baseset([c.rev() for c in ctxs])
1934 return baseset([c.rev() for c in ctxs])
1935
1935
1936 @predicate('subrepo([pattern])')
1936 @predicate('subrepo([pattern])')
1937 def subrepo(repo, subset, x):
1937 def subrepo(repo, subset, x):
1938 """Changesets that add, modify or remove the given subrepo. If no subrepo
1938 """Changesets that add, modify or remove the given subrepo. If no subrepo
1939 pattern is named, any subrepo changes are returned.
1939 pattern is named, any subrepo changes are returned.
1940 """
1940 """
1941 # i18n: "subrepo" is a keyword
1941 # i18n: "subrepo" is a keyword
1942 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1942 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1943 pat = None
1943 pat = None
1944 if len(args) != 0:
1944 if len(args) != 0:
1945 pat = getstring(args[0], _("subrepo requires a pattern"))
1945 pat = getstring(args[0], _("subrepo requires a pattern"))
1946
1946
1947 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1947 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1948
1948
1949 def submatches(names):
1949 def submatches(names):
1950 k, p, m = stringutil.stringmatcher(pat)
1950 k, p, m = stringutil.stringmatcher(pat)
1951 for name in names:
1951 for name in names:
1952 if m(name):
1952 if m(name):
1953 yield name
1953 yield name
1954
1954
1955 def matches(x):
1955 def matches(x):
1956 c = repo[x]
1956 c = repo[x]
1957 s = repo.status(c.p1().node(), c.node(), match=m)
1957 s = repo.status(c.p1().node(), c.node(), match=m)
1958
1958
1959 if pat is None:
1959 if pat is None:
1960 return s.added or s.modified or s.removed
1960 return s.added or s.modified or s.removed
1961
1961
1962 if s.added:
1962 if s.added:
1963 return any(submatches(c.substate.keys()))
1963 return any(submatches(c.substate.keys()))
1964
1964
1965 if s.modified:
1965 if s.modified:
1966 subs = set(c.p1().substate.keys())
1966 subs = set(c.p1().substate.keys())
1967 subs.update(c.substate.keys())
1967 subs.update(c.substate.keys())
1968
1968
1969 for path in submatches(subs):
1969 for path in submatches(subs):
1970 if c.p1().substate.get(path) != c.substate.get(path):
1970 if c.p1().substate.get(path) != c.substate.get(path):
1971 return True
1971 return True
1972
1972
1973 if s.removed:
1973 if s.removed:
1974 return any(submatches(c.p1().substate.keys()))
1974 return any(submatches(c.p1().substate.keys()))
1975
1975
1976 return False
1976 return False
1977
1977
1978 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1978 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1979
1979
1980 def _mapbynodefunc(repo, s, f):
1980 def _mapbynodefunc(repo, s, f):
1981 """(repo, smartset, [node] -> [node]) -> smartset
1981 """(repo, smartset, [node] -> [node]) -> smartset
1982
1982
1983 Helper method to map a smartset to another smartset given a function only
1983 Helper method to map a smartset to another smartset given a function only
1984 talking about nodes. Handles converting between rev numbers and nodes, and
1984 talking about nodes. Handles converting between rev numbers and nodes, and
1985 filtering.
1985 filtering.
1986 """
1986 """
1987 cl = repo.unfiltered().changelog
1987 cl = repo.unfiltered().changelog
1988 torev = cl.rev
1988 torev = cl.rev
1989 tonode = cl.node
1989 tonode = cl.node
1990 nodemap = cl.nodemap
1990 nodemap = cl.nodemap
1991 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
1991 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
1992 return smartset.baseset(result - repo.changelog.filteredrevs)
1992 return smartset.baseset(result - repo.changelog.filteredrevs)
1993
1993
1994 @predicate('successors(set)', safe=True)
1994 @predicate('successors(set)', safe=True)
1995 def successors(repo, subset, x):
1995 def successors(repo, subset, x):
1996 """All successors for set, including the given set themselves"""
1996 """All successors for set, including the given set themselves"""
1997 s = getset(repo, fullreposet(repo), x)
1997 s = getset(repo, fullreposet(repo), x)
1998 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
1998 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
1999 d = _mapbynodefunc(repo, s, f)
1999 d = _mapbynodefunc(repo, s, f)
2000 return subset & d
2000 return subset & d
2001
2001
2002 def _substringmatcher(pattern, casesensitive=True):
2002 def _substringmatcher(pattern, casesensitive=True):
2003 kind, pattern, matcher = stringutil.stringmatcher(
2003 kind, pattern, matcher = stringutil.stringmatcher(
2004 pattern, casesensitive=casesensitive)
2004 pattern, casesensitive=casesensitive)
2005 if kind == 'literal':
2005 if kind == 'literal':
2006 if not casesensitive:
2006 if not casesensitive:
2007 pattern = encoding.lower(pattern)
2007 pattern = encoding.lower(pattern)
2008 matcher = lambda s: pattern in encoding.lower(s)
2008 matcher = lambda s: pattern in encoding.lower(s)
2009 else:
2009 else:
2010 matcher = lambda s: pattern in s
2010 matcher = lambda s: pattern in s
2011 return kind, pattern, matcher
2011 return kind, pattern, matcher
2012
2012
2013 @predicate('tag([name])', safe=True)
2013 @predicate('tag([name])', safe=True)
2014 def tag(repo, subset, x):
2014 def tag(repo, subset, x):
2015 """The specified tag by name, or all tagged revisions if no name is given.
2015 """The specified tag by name, or all tagged revisions if no name is given.
2016
2016
2017 Pattern matching is supported for `name`. See
2017 Pattern matching is supported for `name`. See
2018 :hg:`help revisions.patterns`.
2018 :hg:`help revisions.patterns`.
2019 """
2019 """
2020 # i18n: "tag" is a keyword
2020 # i18n: "tag" is a keyword
2021 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2021 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2022 cl = repo.changelog
2022 cl = repo.changelog
2023 if args:
2023 if args:
2024 pattern = getstring(args[0],
2024 pattern = getstring(args[0],
2025 # i18n: "tag" is a keyword
2025 # i18n: "tag" is a keyword
2026 _('the argument to tag must be a string'))
2026 _('the argument to tag must be a string'))
2027 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2027 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2028 if kind == 'literal':
2028 if kind == 'literal':
2029 # avoid resolving all tags
2029 # avoid resolving all tags
2030 tn = repo._tagscache.tags.get(pattern, None)
2030 tn = repo._tagscache.tags.get(pattern, None)
2031 if tn is None:
2031 if tn is None:
2032 raise error.RepoLookupError(_("tag '%s' does not exist")
2032 raise error.RepoLookupError(_("tag '%s' does not exist")
2033 % pattern)
2033 % pattern)
2034 s = {repo[tn].rev()}
2034 s = {repo[tn].rev()}
2035 else:
2035 else:
2036 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2036 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2037 else:
2037 else:
2038 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2038 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2039 return subset & s
2039 return subset & s
2040
2040
2041 @predicate('tagged', safe=True)
2041 @predicate('tagged', safe=True)
2042 def tagged(repo, subset, x):
2042 def tagged(repo, subset, x):
2043 return tag(repo, subset, x)
2043 return tag(repo, subset, x)
2044
2044
2045 @predicate('orphan()', safe=True)
2045 @predicate('orphan()', safe=True)
2046 def orphan(repo, subset, x):
2046 def orphan(repo, subset, x):
2047 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2047 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2048 """
2048 """
2049 # i18n: "orphan" is a keyword
2049 # i18n: "orphan" is a keyword
2050 getargs(x, 0, 0, _("orphan takes no arguments"))
2050 getargs(x, 0, 0, _("orphan takes no arguments"))
2051 orphan = obsmod.getrevs(repo, 'orphan')
2051 orphan = obsmod.getrevs(repo, 'orphan')
2052 return subset & orphan
2052 return subset & orphan
2053
2053
2054
2054
2055 @predicate('user(string)', safe=True, weight=10)
2055 @predicate('user(string)', safe=True, weight=10)
2056 def user(repo, subset, x):
2056 def user(repo, subset, x):
2057 """User name contains string. The match is case-insensitive.
2057 """User name contains string. The match is case-insensitive.
2058
2058
2059 Pattern matching is supported for `string`. See
2059 Pattern matching is supported for `string`. See
2060 :hg:`help revisions.patterns`.
2060 :hg:`help revisions.patterns`.
2061 """
2061 """
2062 return author(repo, subset, x)
2062 return author(repo, subset, x)
2063
2063
2064 @predicate('wdir()', safe=True, weight=0)
2064 @predicate('wdir()', safe=True, weight=0)
2065 def wdir(repo, subset, x):
2065 def wdir(repo, subset, x):
2066 """Working directory. (EXPERIMENTAL)"""
2066 """Working directory. (EXPERIMENTAL)"""
2067 # i18n: "wdir" is a keyword
2067 # i18n: "wdir" is a keyword
2068 getargs(x, 0, 0, _("wdir takes no arguments"))
2068 getargs(x, 0, 0, _("wdir takes no arguments"))
2069 if node.wdirrev in subset or isinstance(subset, fullreposet):
2069 if node.wdirrev in subset or isinstance(subset, fullreposet):
2070 return baseset([node.wdirrev])
2070 return baseset([node.wdirrev])
2071 return baseset()
2071 return baseset()
2072
2072
2073 def _orderedlist(repo, subset, x):
2073 def _orderedlist(repo, subset, x):
2074 s = getstring(x, "internal error")
2074 s = getstring(x, "internal error")
2075 if not s:
2075 if not s:
2076 return baseset()
2076 return baseset()
2077 # remove duplicates here. it's difficult for caller to deduplicate sets
2077 # remove duplicates here. it's difficult for caller to deduplicate sets
2078 # because different symbols can point to the same rev.
2078 # because different symbols can point to the same rev.
2079 cl = repo.changelog
2079 cl = repo.changelog
2080 ls = []
2080 ls = []
2081 seen = set()
2081 seen = set()
2082 for t in s.split('\0'):
2082 for t in s.split('\0'):
2083 try:
2083 try:
2084 # fast path for integer revision
2084 # fast path for integer revision
2085 r = int(t)
2085 r = int(t)
2086 if ('%d' % r) != t or r not in cl:
2086 if ('%d' % r) != t or r not in cl:
2087 raise ValueError
2087 raise ValueError
2088 revs = [r]
2088 revs = [r]
2089 except ValueError:
2089 except ValueError:
2090 revs = stringset(repo, subset, t, defineorder)
2090 revs = stringset(repo, subset, t, defineorder)
2091
2091
2092 for r in revs:
2092 for r in revs:
2093 if r in seen:
2093 if r in seen:
2094 continue
2094 continue
2095 if (r in subset
2095 if (r in subset
2096 or r == node.nullrev and isinstance(subset, fullreposet)):
2096 or r == node.nullrev and isinstance(subset, fullreposet)):
2097 ls.append(r)
2097 ls.append(r)
2098 seen.add(r)
2098 seen.add(r)
2099 return baseset(ls)
2099 return baseset(ls)
2100
2100
2101 # for internal use
2101 # for internal use
2102 @predicate('_list', safe=True, takeorder=True)
2102 @predicate('_list', safe=True, takeorder=True)
2103 def _list(repo, subset, x, order):
2103 def _list(repo, subset, x, order):
2104 if order == followorder:
2104 if order == followorder:
2105 # slow path to take the subset order
2105 # slow path to take the subset order
2106 return subset & _orderedlist(repo, fullreposet(repo), x)
2106 return subset & _orderedlist(repo, fullreposet(repo), x)
2107 else:
2107 else:
2108 return _orderedlist(repo, subset, x)
2108 return _orderedlist(repo, subset, x)
2109
2109
2110 def _orderedintlist(repo, subset, x):
2110 def _orderedintlist(repo, subset, x):
2111 s = getstring(x, "internal error")
2111 s = getstring(x, "internal error")
2112 if not s:
2112 if not s:
2113 return baseset()
2113 return baseset()
2114 ls = [int(r) for r in s.split('\0')]
2114 ls = [int(r) for r in s.split('\0')]
2115 s = subset
2115 s = subset
2116 return baseset([r for r in ls if r in s])
2116 return baseset([r for r in ls if r in s])
2117
2117
2118 # for internal use
2118 # for internal use
2119 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2119 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2120 def _intlist(repo, subset, x, order):
2120 def _intlist(repo, subset, x, order):
2121 if order == followorder:
2121 if order == followorder:
2122 # slow path to take the subset order
2122 # slow path to take the subset order
2123 return subset & _orderedintlist(repo, fullreposet(repo), x)
2123 return subset & _orderedintlist(repo, fullreposet(repo), x)
2124 else:
2124 else:
2125 return _orderedintlist(repo, subset, x)
2125 return _orderedintlist(repo, subset, x)
2126
2126
2127 def _orderedhexlist(repo, subset, x):
2127 def _orderedhexlist(repo, subset, x):
2128 s = getstring(x, "internal error")
2128 s = getstring(x, "internal error")
2129 if not s:
2129 if not s:
2130 return baseset()
2130 return baseset()
2131 cl = repo.changelog
2131 cl = repo.changelog
2132 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2132 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2133 s = subset
2133 s = subset
2134 return baseset([r for r in ls if r in s])
2134 return baseset([r for r in ls if r in s])
2135
2135
2136 # for internal use
2136 # for internal use
2137 @predicate('_hexlist', safe=True, takeorder=True)
2137 @predicate('_hexlist', safe=True, takeorder=True)
2138 def _hexlist(repo, subset, x, order):
2138 def _hexlist(repo, subset, x, order):
2139 if order == followorder:
2139 if order == followorder:
2140 # slow path to take the subset order
2140 # slow path to take the subset order
2141 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2141 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2142 else:
2142 else:
2143 return _orderedhexlist(repo, subset, x)
2143 return _orderedhexlist(repo, subset, x)
2144
2144
2145 methods = {
2145 methods = {
2146 "range": rangeset,
2146 "range": rangeset,
2147 "rangeall": rangeall,
2147 "rangeall": rangeall,
2148 "rangepre": rangepre,
2148 "rangepre": rangepre,
2149 "rangepost": rangepost,
2149 "rangepost": rangepost,
2150 "dagrange": dagrange,
2150 "dagrange": dagrange,
2151 "string": stringset,
2151 "string": stringset,
2152 "symbol": stringset,
2152 "symbol": stringset,
2153 "and": andset,
2153 "and": andset,
2154 "andsmally": andsmallyset,
2154 "andsmally": andsmallyset,
2155 "or": orset,
2155 "or": orset,
2156 "not": notset,
2156 "not": notset,
2157 "difference": differenceset,
2157 "difference": differenceset,
2158 "relation": relationset,
2158 "relation": relationset,
2159 "relsubscript": relsubscriptset,
2159 "relsubscript": relsubscriptset,
2160 "subscript": subscriptset,
2160 "subscript": subscriptset,
2161 "list": listset,
2161 "list": listset,
2162 "keyvalue": keyvaluepair,
2162 "keyvalue": keyvaluepair,
2163 "func": func,
2163 "func": func,
2164 "ancestor": ancestorspec,
2164 "ancestor": ancestorspec,
2165 "parent": parentspec,
2165 "parent": parentspec,
2166 "parentpost": parentpost,
2166 "parentpost": parentpost,
2167 }
2167 }
2168
2168
2169 def lookupfn(repo):
2169 def lookupfn(repo):
2170 return lambda symbol: scmutil.isrevsymbol(repo, symbol)
2170 return lambda symbol: scmutil.isrevsymbol(repo, symbol)
2171
2171
2172 def match(ui, spec, repo=None):
2172 def match(ui, spec, lookup=None):
2173 """Create a matcher for a single revision spec"""
2173 """Create a matcher for a single revision spec"""
2174 return matchany(ui, [spec], repo=repo)
2174 return matchany(ui, [spec], lookup=None)
2175
2175
2176 def matchany(ui, specs, repo=None, localalias=None):
2176 def matchany(ui, specs, lookup=None, localalias=None):
2177 """Create a matcher that will include any revisions matching one of the
2177 """Create a matcher that will include any revisions matching one of the
2178 given specs
2178 given specs
2179
2179
2180 If lookup function is not None, the parser will first attempt to handle
2181 old-style ranges, which may contain operator characters.
2182
2180 If localalias is not None, it is a dict {name: definitionstring}. It takes
2183 If localalias is not None, it is a dict {name: definitionstring}. It takes
2181 precedence over [revsetalias] config section.
2184 precedence over [revsetalias] config section.
2182 """
2185 """
2183 if not specs:
2186 if not specs:
2184 def mfunc(repo, subset=None):
2187 def mfunc(repo, subset=None):
2185 return baseset()
2188 return baseset()
2186 return mfunc
2189 return mfunc
2187 if not all(specs):
2190 if not all(specs):
2188 raise error.ParseError(_("empty query"))
2191 raise error.ParseError(_("empty query"))
2189 lookup = None
2190 if repo:
2191 lookup = lookupfn(repo)
2192 if len(specs) == 1:
2192 if len(specs) == 1:
2193 tree = revsetlang.parse(specs[0], lookup)
2193 tree = revsetlang.parse(specs[0], lookup)
2194 else:
2194 else:
2195 tree = ('or',
2195 tree = ('or',
2196 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2196 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2197
2197
2198 aliases = []
2198 aliases = []
2199 warn = None
2199 warn = None
2200 if ui:
2200 if ui:
2201 aliases.extend(ui.configitems('revsetalias'))
2201 aliases.extend(ui.configitems('revsetalias'))
2202 warn = ui.warn
2202 warn = ui.warn
2203 if localalias:
2203 if localalias:
2204 aliases.extend(localalias.items())
2204 aliases.extend(localalias.items())
2205 if aliases:
2205 if aliases:
2206 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2206 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2207 tree = revsetlang.foldconcat(tree)
2207 tree = revsetlang.foldconcat(tree)
2208 tree = revsetlang.analyze(tree)
2208 tree = revsetlang.analyze(tree)
2209 tree = revsetlang.optimize(tree)
2209 tree = revsetlang.optimize(tree)
2210 return makematcher(tree)
2210 return makematcher(tree)
2211
2211
2212 def makematcher(tree):
2212 def makematcher(tree):
2213 """Create a matcher from an evaluatable tree"""
2213 """Create a matcher from an evaluatable tree"""
2214 def mfunc(repo, subset=None, order=None):
2214 def mfunc(repo, subset=None, order=None):
2215 if order is None:
2215 if order is None:
2216 if subset is None:
2216 if subset is None:
2217 order = defineorder # 'x'
2217 order = defineorder # 'x'
2218 else:
2218 else:
2219 order = followorder # 'subset & x'
2219 order = followorder # 'subset & x'
2220 if subset is None:
2220 if subset is None:
2221 subset = fullreposet(repo)
2221 subset = fullreposet(repo)
2222 return getset(repo, subset, tree, order)
2222 return getset(repo, subset, tree, order)
2223 return mfunc
2223 return mfunc
2224
2224
2225 def loadpredicate(ui, extname, registrarobj):
2225 def loadpredicate(ui, extname, registrarobj):
2226 """Load revset predicates from specified registrarobj
2226 """Load revset predicates from specified registrarobj
2227 """
2227 """
2228 for name, func in registrarobj._table.iteritems():
2228 for name, func in registrarobj._table.iteritems():
2229 symbols[name] = func
2229 symbols[name] = func
2230 if func._safe:
2230 if func._safe:
2231 safesymbols.add(name)
2231 safesymbols.add(name)
2232
2232
2233 # load built-in predicates explicitly to setup safesymbols
2233 # load built-in predicates explicitly to setup safesymbols
2234 loadpredicate(None, None, predicate)
2234 loadpredicate(None, None, predicate)
2235
2235
2236 # tell hggettext to extract docstrings from these functions:
2236 # tell hggettext to extract docstrings from these functions:
2237 i18nfunctions = symbols.values()
2237 i18nfunctions = symbols.values()
@@ -1,676 +1,676 b''
1 # templatefuncs.py - common template functions
1 # templatefuncs.py - common template functions
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 color,
14 color,
15 encoding,
15 encoding,
16 error,
16 error,
17 minirst,
17 minirst,
18 obsutil,
18 obsutil,
19 pycompat,
19 pycompat,
20 registrar,
20 registrar,
21 revset as revsetmod,
21 revset as revsetmod,
22 revsetlang,
22 revsetlang,
23 scmutil,
23 scmutil,
24 templatefilters,
24 templatefilters,
25 templatekw,
25 templatekw,
26 templateutil,
26 templateutil,
27 util,
27 util,
28 )
28 )
29 from .utils import (
29 from .utils import (
30 dateutil,
30 dateutil,
31 stringutil,
31 stringutil,
32 )
32 )
33
33
34 evalrawexp = templateutil.evalrawexp
34 evalrawexp = templateutil.evalrawexp
35 evalfuncarg = templateutil.evalfuncarg
35 evalfuncarg = templateutil.evalfuncarg
36 evalboolean = templateutil.evalboolean
36 evalboolean = templateutil.evalboolean
37 evaldate = templateutil.evaldate
37 evaldate = templateutil.evaldate
38 evalinteger = templateutil.evalinteger
38 evalinteger = templateutil.evalinteger
39 evalstring = templateutil.evalstring
39 evalstring = templateutil.evalstring
40 evalstringliteral = templateutil.evalstringliteral
40 evalstringliteral = templateutil.evalstringliteral
41
41
42 # dict of template built-in functions
42 # dict of template built-in functions
43 funcs = {}
43 funcs = {}
44 templatefunc = registrar.templatefunc(funcs)
44 templatefunc = registrar.templatefunc(funcs)
45
45
46 @templatefunc('date(date[, fmt])')
46 @templatefunc('date(date[, fmt])')
47 def date(context, mapping, args):
47 def date(context, mapping, args):
48 """Format a date. See :hg:`help dates` for formatting
48 """Format a date. See :hg:`help dates` for formatting
49 strings. The default is a Unix date format, including the timezone:
49 strings. The default is a Unix date format, including the timezone:
50 "Mon Sep 04 15:13:13 2006 0700"."""
50 "Mon Sep 04 15:13:13 2006 0700"."""
51 if not (1 <= len(args) <= 2):
51 if not (1 <= len(args) <= 2):
52 # i18n: "date" is a keyword
52 # i18n: "date" is a keyword
53 raise error.ParseError(_("date expects one or two arguments"))
53 raise error.ParseError(_("date expects one or two arguments"))
54
54
55 date = evaldate(context, mapping, args[0],
55 date = evaldate(context, mapping, args[0],
56 # i18n: "date" is a keyword
56 # i18n: "date" is a keyword
57 _("date expects a date information"))
57 _("date expects a date information"))
58 fmt = None
58 fmt = None
59 if len(args) == 2:
59 if len(args) == 2:
60 fmt = evalstring(context, mapping, args[1])
60 fmt = evalstring(context, mapping, args[1])
61 if fmt is None:
61 if fmt is None:
62 return dateutil.datestr(date)
62 return dateutil.datestr(date)
63 else:
63 else:
64 return dateutil.datestr(date, fmt)
64 return dateutil.datestr(date, fmt)
65
65
66 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
66 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
67 def dict_(context, mapping, args):
67 def dict_(context, mapping, args):
68 """Construct a dict from key-value pairs. A key may be omitted if
68 """Construct a dict from key-value pairs. A key may be omitted if
69 a value expression can provide an unambiguous name."""
69 a value expression can provide an unambiguous name."""
70 data = util.sortdict()
70 data = util.sortdict()
71
71
72 for v in args['args']:
72 for v in args['args']:
73 k = templateutil.findsymbolicname(v)
73 k = templateutil.findsymbolicname(v)
74 if not k:
74 if not k:
75 raise error.ParseError(_('dict key cannot be inferred'))
75 raise error.ParseError(_('dict key cannot be inferred'))
76 if k in data or k in args['kwargs']:
76 if k in data or k in args['kwargs']:
77 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
77 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
78 data[k] = evalfuncarg(context, mapping, v)
78 data[k] = evalfuncarg(context, mapping, v)
79
79
80 data.update((k, evalfuncarg(context, mapping, v))
80 data.update((k, evalfuncarg(context, mapping, v))
81 for k, v in args['kwargs'].iteritems())
81 for k, v in args['kwargs'].iteritems())
82 return templateutil.hybriddict(data)
82 return templateutil.hybriddict(data)
83
83
84 @templatefunc('diff([includepattern [, excludepattern]])')
84 @templatefunc('diff([includepattern [, excludepattern]])')
85 def diff(context, mapping, args):
85 def diff(context, mapping, args):
86 """Show a diff, optionally
86 """Show a diff, optionally
87 specifying files to include or exclude."""
87 specifying files to include or exclude."""
88 if len(args) > 2:
88 if len(args) > 2:
89 # i18n: "diff" is a keyword
89 # i18n: "diff" is a keyword
90 raise error.ParseError(_("diff expects zero, one, or two arguments"))
90 raise error.ParseError(_("diff expects zero, one, or two arguments"))
91
91
92 def getpatterns(i):
92 def getpatterns(i):
93 if i < len(args):
93 if i < len(args):
94 s = evalstring(context, mapping, args[i]).strip()
94 s = evalstring(context, mapping, args[i]).strip()
95 if s:
95 if s:
96 return [s]
96 return [s]
97 return []
97 return []
98
98
99 ctx = context.resource(mapping, 'ctx')
99 ctx = context.resource(mapping, 'ctx')
100 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
100 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
101
101
102 return ''.join(chunks)
102 return ''.join(chunks)
103
103
104 @templatefunc('extdata(source)', argspec='source')
104 @templatefunc('extdata(source)', argspec='source')
105 def extdata(context, mapping, args):
105 def extdata(context, mapping, args):
106 """Show a text read from the specified extdata source. (EXPERIMENTAL)"""
106 """Show a text read from the specified extdata source. (EXPERIMENTAL)"""
107 if 'source' not in args:
107 if 'source' not in args:
108 # i18n: "extdata" is a keyword
108 # i18n: "extdata" is a keyword
109 raise error.ParseError(_('extdata expects one argument'))
109 raise error.ParseError(_('extdata expects one argument'))
110
110
111 source = evalstring(context, mapping, args['source'])
111 source = evalstring(context, mapping, args['source'])
112 cache = context.resource(mapping, 'cache').setdefault('extdata', {})
112 cache = context.resource(mapping, 'cache').setdefault('extdata', {})
113 ctx = context.resource(mapping, 'ctx')
113 ctx = context.resource(mapping, 'ctx')
114 if source in cache:
114 if source in cache:
115 data = cache[source]
115 data = cache[source]
116 else:
116 else:
117 data = cache[source] = scmutil.extdatasource(ctx.repo(), source)
117 data = cache[source] = scmutil.extdatasource(ctx.repo(), source)
118 return data.get(ctx.rev(), '')
118 return data.get(ctx.rev(), '')
119
119
120 @templatefunc('files(pattern)')
120 @templatefunc('files(pattern)')
121 def files(context, mapping, args):
121 def files(context, mapping, args):
122 """All files of the current changeset matching the pattern. See
122 """All files of the current changeset matching the pattern. See
123 :hg:`help patterns`."""
123 :hg:`help patterns`."""
124 if not len(args) == 1:
124 if not len(args) == 1:
125 # i18n: "files" is a keyword
125 # i18n: "files" is a keyword
126 raise error.ParseError(_("files expects one argument"))
126 raise error.ParseError(_("files expects one argument"))
127
127
128 raw = evalstring(context, mapping, args[0])
128 raw = evalstring(context, mapping, args[0])
129 ctx = context.resource(mapping, 'ctx')
129 ctx = context.resource(mapping, 'ctx')
130 m = ctx.match([raw])
130 m = ctx.match([raw])
131 files = list(ctx.matches(m))
131 files = list(ctx.matches(m))
132 return templateutil.compatlist(context, mapping, "file", files)
132 return templateutil.compatlist(context, mapping, "file", files)
133
133
134 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
134 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
135 def fill(context, mapping, args):
135 def fill(context, mapping, args):
136 """Fill many
136 """Fill many
137 paragraphs with optional indentation. See the "fill" filter."""
137 paragraphs with optional indentation. See the "fill" filter."""
138 if not (1 <= len(args) <= 4):
138 if not (1 <= len(args) <= 4):
139 # i18n: "fill" is a keyword
139 # i18n: "fill" is a keyword
140 raise error.ParseError(_("fill expects one to four arguments"))
140 raise error.ParseError(_("fill expects one to four arguments"))
141
141
142 text = evalstring(context, mapping, args[0])
142 text = evalstring(context, mapping, args[0])
143 width = 76
143 width = 76
144 initindent = ''
144 initindent = ''
145 hangindent = ''
145 hangindent = ''
146 if 2 <= len(args) <= 4:
146 if 2 <= len(args) <= 4:
147 width = evalinteger(context, mapping, args[1],
147 width = evalinteger(context, mapping, args[1],
148 # i18n: "fill" is a keyword
148 # i18n: "fill" is a keyword
149 _("fill expects an integer width"))
149 _("fill expects an integer width"))
150 try:
150 try:
151 initindent = evalstring(context, mapping, args[2])
151 initindent = evalstring(context, mapping, args[2])
152 hangindent = evalstring(context, mapping, args[3])
152 hangindent = evalstring(context, mapping, args[3])
153 except IndexError:
153 except IndexError:
154 pass
154 pass
155
155
156 return templatefilters.fill(text, width, initindent, hangindent)
156 return templatefilters.fill(text, width, initindent, hangindent)
157
157
158 @templatefunc('formatnode(node)')
158 @templatefunc('formatnode(node)')
159 def formatnode(context, mapping, args):
159 def formatnode(context, mapping, args):
160 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
160 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
161 if len(args) != 1:
161 if len(args) != 1:
162 # i18n: "formatnode" is a keyword
162 # i18n: "formatnode" is a keyword
163 raise error.ParseError(_("formatnode expects one argument"))
163 raise error.ParseError(_("formatnode expects one argument"))
164
164
165 ui = context.resource(mapping, 'ui')
165 ui = context.resource(mapping, 'ui')
166 node = evalstring(context, mapping, args[0])
166 node = evalstring(context, mapping, args[0])
167 if ui.debugflag:
167 if ui.debugflag:
168 return node
168 return node
169 return templatefilters.short(node)
169 return templatefilters.short(node)
170
170
171 @templatefunc('mailmap(author)')
171 @templatefunc('mailmap(author)')
172 def mailmap(context, mapping, args):
172 def mailmap(context, mapping, args):
173 """Return the author, updated according to the value
173 """Return the author, updated according to the value
174 set in the .mailmap file"""
174 set in the .mailmap file"""
175 if len(args) != 1:
175 if len(args) != 1:
176 raise error.ParseError(_("mailmap expects one argument"))
176 raise error.ParseError(_("mailmap expects one argument"))
177
177
178 author = evalstring(context, mapping, args[0])
178 author = evalstring(context, mapping, args[0])
179
179
180 cache = context.resource(mapping, 'cache')
180 cache = context.resource(mapping, 'cache')
181 repo = context.resource(mapping, 'repo')
181 repo = context.resource(mapping, 'repo')
182
182
183 if 'mailmap' not in cache:
183 if 'mailmap' not in cache:
184 data = repo.wvfs.tryread('.mailmap')
184 data = repo.wvfs.tryread('.mailmap')
185 cache['mailmap'] = stringutil.parsemailmap(data)
185 cache['mailmap'] = stringutil.parsemailmap(data)
186
186
187 return stringutil.mapname(cache['mailmap'], author)
187 return stringutil.mapname(cache['mailmap'], author)
188
188
189 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
189 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
190 argspec='text width fillchar left')
190 argspec='text width fillchar left')
191 def pad(context, mapping, args):
191 def pad(context, mapping, args):
192 """Pad text with a
192 """Pad text with a
193 fill character."""
193 fill character."""
194 if 'text' not in args or 'width' not in args:
194 if 'text' not in args or 'width' not in args:
195 # i18n: "pad" is a keyword
195 # i18n: "pad" is a keyword
196 raise error.ParseError(_("pad() expects two to four arguments"))
196 raise error.ParseError(_("pad() expects two to four arguments"))
197
197
198 width = evalinteger(context, mapping, args['width'],
198 width = evalinteger(context, mapping, args['width'],
199 # i18n: "pad" is a keyword
199 # i18n: "pad" is a keyword
200 _("pad() expects an integer width"))
200 _("pad() expects an integer width"))
201
201
202 text = evalstring(context, mapping, args['text'])
202 text = evalstring(context, mapping, args['text'])
203
203
204 left = False
204 left = False
205 fillchar = ' '
205 fillchar = ' '
206 if 'fillchar' in args:
206 if 'fillchar' in args:
207 fillchar = evalstring(context, mapping, args['fillchar'])
207 fillchar = evalstring(context, mapping, args['fillchar'])
208 if len(color.stripeffects(fillchar)) != 1:
208 if len(color.stripeffects(fillchar)) != 1:
209 # i18n: "pad" is a keyword
209 # i18n: "pad" is a keyword
210 raise error.ParseError(_("pad() expects a single fill character"))
210 raise error.ParseError(_("pad() expects a single fill character"))
211 if 'left' in args:
211 if 'left' in args:
212 left = evalboolean(context, mapping, args['left'])
212 left = evalboolean(context, mapping, args['left'])
213
213
214 fillwidth = width - encoding.colwidth(color.stripeffects(text))
214 fillwidth = width - encoding.colwidth(color.stripeffects(text))
215 if fillwidth <= 0:
215 if fillwidth <= 0:
216 return text
216 return text
217 if left:
217 if left:
218 return fillchar * fillwidth + text
218 return fillchar * fillwidth + text
219 else:
219 else:
220 return text + fillchar * fillwidth
220 return text + fillchar * fillwidth
221
221
222 @templatefunc('indent(text, indentchars[, firstline])')
222 @templatefunc('indent(text, indentchars[, firstline])')
223 def indent(context, mapping, args):
223 def indent(context, mapping, args):
224 """Indents all non-empty lines
224 """Indents all non-empty lines
225 with the characters given in the indentchars string. An optional
225 with the characters given in the indentchars string. An optional
226 third parameter will override the indent for the first line only
226 third parameter will override the indent for the first line only
227 if present."""
227 if present."""
228 if not (2 <= len(args) <= 3):
228 if not (2 <= len(args) <= 3):
229 # i18n: "indent" is a keyword
229 # i18n: "indent" is a keyword
230 raise error.ParseError(_("indent() expects two or three arguments"))
230 raise error.ParseError(_("indent() expects two or three arguments"))
231
231
232 text = evalstring(context, mapping, args[0])
232 text = evalstring(context, mapping, args[0])
233 indent = evalstring(context, mapping, args[1])
233 indent = evalstring(context, mapping, args[1])
234
234
235 if len(args) == 3:
235 if len(args) == 3:
236 firstline = evalstring(context, mapping, args[2])
236 firstline = evalstring(context, mapping, args[2])
237 else:
237 else:
238 firstline = indent
238 firstline = indent
239
239
240 # the indent function doesn't indent the first line, so we do it here
240 # the indent function doesn't indent the first line, so we do it here
241 return templatefilters.indent(firstline + text, indent)
241 return templatefilters.indent(firstline + text, indent)
242
242
243 @templatefunc('get(dict, key)')
243 @templatefunc('get(dict, key)')
244 def get(context, mapping, args):
244 def get(context, mapping, args):
245 """Get an attribute/key from an object. Some keywords
245 """Get an attribute/key from an object. Some keywords
246 are complex types. This function allows you to obtain the value of an
246 are complex types. This function allows you to obtain the value of an
247 attribute on these types."""
247 attribute on these types."""
248 if len(args) != 2:
248 if len(args) != 2:
249 # i18n: "get" is a keyword
249 # i18n: "get" is a keyword
250 raise error.ParseError(_("get() expects two arguments"))
250 raise error.ParseError(_("get() expects two arguments"))
251
251
252 dictarg = evalfuncarg(context, mapping, args[0])
252 dictarg = evalfuncarg(context, mapping, args[0])
253 if not util.safehasattr(dictarg, 'get'):
253 if not util.safehasattr(dictarg, 'get'):
254 # i18n: "get" is a keyword
254 # i18n: "get" is a keyword
255 raise error.ParseError(_("get() expects a dict as first argument"))
255 raise error.ParseError(_("get() expects a dict as first argument"))
256
256
257 key = evalfuncarg(context, mapping, args[1])
257 key = evalfuncarg(context, mapping, args[1])
258 return templateutil.getdictitem(dictarg, key)
258 return templateutil.getdictitem(dictarg, key)
259
259
260 @templatefunc('if(expr, then[, else])')
260 @templatefunc('if(expr, then[, else])')
261 def if_(context, mapping, args):
261 def if_(context, mapping, args):
262 """Conditionally execute based on the result of
262 """Conditionally execute based on the result of
263 an expression."""
263 an expression."""
264 if not (2 <= len(args) <= 3):
264 if not (2 <= len(args) <= 3):
265 # i18n: "if" is a keyword
265 # i18n: "if" is a keyword
266 raise error.ParseError(_("if expects two or three arguments"))
266 raise error.ParseError(_("if expects two or three arguments"))
267
267
268 test = evalboolean(context, mapping, args[0])
268 test = evalboolean(context, mapping, args[0])
269 if test:
269 if test:
270 return evalrawexp(context, mapping, args[1])
270 return evalrawexp(context, mapping, args[1])
271 elif len(args) == 3:
271 elif len(args) == 3:
272 return evalrawexp(context, mapping, args[2])
272 return evalrawexp(context, mapping, args[2])
273
273
274 @templatefunc('ifcontains(needle, haystack, then[, else])')
274 @templatefunc('ifcontains(needle, haystack, then[, else])')
275 def ifcontains(context, mapping, args):
275 def ifcontains(context, mapping, args):
276 """Conditionally execute based
276 """Conditionally execute based
277 on whether the item "needle" is in "haystack"."""
277 on whether the item "needle" is in "haystack"."""
278 if not (3 <= len(args) <= 4):
278 if not (3 <= len(args) <= 4):
279 # i18n: "ifcontains" is a keyword
279 # i18n: "ifcontains" is a keyword
280 raise error.ParseError(_("ifcontains expects three or four arguments"))
280 raise error.ParseError(_("ifcontains expects three or four arguments"))
281
281
282 haystack = evalfuncarg(context, mapping, args[1])
282 haystack = evalfuncarg(context, mapping, args[1])
283 keytype = getattr(haystack, 'keytype', None)
283 keytype = getattr(haystack, 'keytype', None)
284 try:
284 try:
285 needle = evalrawexp(context, mapping, args[0])
285 needle = evalrawexp(context, mapping, args[0])
286 needle = templateutil.unwrapastype(context, mapping, needle,
286 needle = templateutil.unwrapastype(context, mapping, needle,
287 keytype or bytes)
287 keytype or bytes)
288 found = (needle in haystack)
288 found = (needle in haystack)
289 except error.ParseError:
289 except error.ParseError:
290 found = False
290 found = False
291
291
292 if found:
292 if found:
293 return evalrawexp(context, mapping, args[2])
293 return evalrawexp(context, mapping, args[2])
294 elif len(args) == 4:
294 elif len(args) == 4:
295 return evalrawexp(context, mapping, args[3])
295 return evalrawexp(context, mapping, args[3])
296
296
297 @templatefunc('ifeq(expr1, expr2, then[, else])')
297 @templatefunc('ifeq(expr1, expr2, then[, else])')
298 def ifeq(context, mapping, args):
298 def ifeq(context, mapping, args):
299 """Conditionally execute based on
299 """Conditionally execute based on
300 whether 2 items are equivalent."""
300 whether 2 items are equivalent."""
301 if not (3 <= len(args) <= 4):
301 if not (3 <= len(args) <= 4):
302 # i18n: "ifeq" is a keyword
302 # i18n: "ifeq" is a keyword
303 raise error.ParseError(_("ifeq expects three or four arguments"))
303 raise error.ParseError(_("ifeq expects three or four arguments"))
304
304
305 test = evalstring(context, mapping, args[0])
305 test = evalstring(context, mapping, args[0])
306 match = evalstring(context, mapping, args[1])
306 match = evalstring(context, mapping, args[1])
307 if test == match:
307 if test == match:
308 return evalrawexp(context, mapping, args[2])
308 return evalrawexp(context, mapping, args[2])
309 elif len(args) == 4:
309 elif len(args) == 4:
310 return evalrawexp(context, mapping, args[3])
310 return evalrawexp(context, mapping, args[3])
311
311
312 @templatefunc('join(list, sep)')
312 @templatefunc('join(list, sep)')
313 def join(context, mapping, args):
313 def join(context, mapping, args):
314 """Join items in a list with a delimiter."""
314 """Join items in a list with a delimiter."""
315 if not (1 <= len(args) <= 2):
315 if not (1 <= len(args) <= 2):
316 # i18n: "join" is a keyword
316 # i18n: "join" is a keyword
317 raise error.ParseError(_("join expects one or two arguments"))
317 raise error.ParseError(_("join expects one or two arguments"))
318
318
319 joinset = evalrawexp(context, mapping, args[0])
319 joinset = evalrawexp(context, mapping, args[0])
320 joiner = " "
320 joiner = " "
321 if len(args) > 1:
321 if len(args) > 1:
322 joiner = evalstring(context, mapping, args[1])
322 joiner = evalstring(context, mapping, args[1])
323 if isinstance(joinset, templateutil.wrapped):
323 if isinstance(joinset, templateutil.wrapped):
324 return joinset.join(context, mapping, joiner)
324 return joinset.join(context, mapping, joiner)
325 # TODO: perhaps a generator should be stringify()-ed here, but we can't
325 # TODO: perhaps a generator should be stringify()-ed here, but we can't
326 # because hgweb abuses it as a keyword that returns a list of dicts.
326 # because hgweb abuses it as a keyword that returns a list of dicts.
327 joinset = templateutil.unwrapvalue(context, mapping, joinset)
327 joinset = templateutil.unwrapvalue(context, mapping, joinset)
328 return templateutil.joinitems(pycompat.maybebytestr(joinset), joiner)
328 return templateutil.joinitems(pycompat.maybebytestr(joinset), joiner)
329
329
330 @templatefunc('label(label, expr)')
330 @templatefunc('label(label, expr)')
331 def label(context, mapping, args):
331 def label(context, mapping, args):
332 """Apply a label to generated content. Content with
332 """Apply a label to generated content. Content with
333 a label applied can result in additional post-processing, such as
333 a label applied can result in additional post-processing, such as
334 automatic colorization."""
334 automatic colorization."""
335 if len(args) != 2:
335 if len(args) != 2:
336 # i18n: "label" is a keyword
336 # i18n: "label" is a keyword
337 raise error.ParseError(_("label expects two arguments"))
337 raise error.ParseError(_("label expects two arguments"))
338
338
339 ui = context.resource(mapping, 'ui')
339 ui = context.resource(mapping, 'ui')
340 thing = evalstring(context, mapping, args[1])
340 thing = evalstring(context, mapping, args[1])
341 # preserve unknown symbol as literal so effects like 'red', 'bold',
341 # preserve unknown symbol as literal so effects like 'red', 'bold',
342 # etc. don't need to be quoted
342 # etc. don't need to be quoted
343 label = evalstringliteral(context, mapping, args[0])
343 label = evalstringliteral(context, mapping, args[0])
344
344
345 return ui.label(thing, label)
345 return ui.label(thing, label)
346
346
347 @templatefunc('latesttag([pattern])')
347 @templatefunc('latesttag([pattern])')
348 def latesttag(context, mapping, args):
348 def latesttag(context, mapping, args):
349 """The global tags matching the given pattern on the
349 """The global tags matching the given pattern on the
350 most recent globally tagged ancestor of this changeset.
350 most recent globally tagged ancestor of this changeset.
351 If no such tags exist, the "{tag}" template resolves to
351 If no such tags exist, the "{tag}" template resolves to
352 the string "null"."""
352 the string "null"."""
353 if len(args) > 1:
353 if len(args) > 1:
354 # i18n: "latesttag" is a keyword
354 # i18n: "latesttag" is a keyword
355 raise error.ParseError(_("latesttag expects at most one argument"))
355 raise error.ParseError(_("latesttag expects at most one argument"))
356
356
357 pattern = None
357 pattern = None
358 if len(args) == 1:
358 if len(args) == 1:
359 pattern = evalstring(context, mapping, args[0])
359 pattern = evalstring(context, mapping, args[0])
360 return templatekw.showlatesttags(context, mapping, pattern)
360 return templatekw.showlatesttags(context, mapping, pattern)
361
361
362 @templatefunc('localdate(date[, tz])')
362 @templatefunc('localdate(date[, tz])')
363 def localdate(context, mapping, args):
363 def localdate(context, mapping, args):
364 """Converts a date to the specified timezone.
364 """Converts a date to the specified timezone.
365 The default is local date."""
365 The default is local date."""
366 if not (1 <= len(args) <= 2):
366 if not (1 <= len(args) <= 2):
367 # i18n: "localdate" is a keyword
367 # i18n: "localdate" is a keyword
368 raise error.ParseError(_("localdate expects one or two arguments"))
368 raise error.ParseError(_("localdate expects one or two arguments"))
369
369
370 date = evaldate(context, mapping, args[0],
370 date = evaldate(context, mapping, args[0],
371 # i18n: "localdate" is a keyword
371 # i18n: "localdate" is a keyword
372 _("localdate expects a date information"))
372 _("localdate expects a date information"))
373 if len(args) >= 2:
373 if len(args) >= 2:
374 tzoffset = None
374 tzoffset = None
375 tz = evalfuncarg(context, mapping, args[1])
375 tz = evalfuncarg(context, mapping, args[1])
376 if isinstance(tz, bytes):
376 if isinstance(tz, bytes):
377 tzoffset, remainder = dateutil.parsetimezone(tz)
377 tzoffset, remainder = dateutil.parsetimezone(tz)
378 if remainder:
378 if remainder:
379 tzoffset = None
379 tzoffset = None
380 if tzoffset is None:
380 if tzoffset is None:
381 try:
381 try:
382 tzoffset = int(tz)
382 tzoffset = int(tz)
383 except (TypeError, ValueError):
383 except (TypeError, ValueError):
384 # i18n: "localdate" is a keyword
384 # i18n: "localdate" is a keyword
385 raise error.ParseError(_("localdate expects a timezone"))
385 raise error.ParseError(_("localdate expects a timezone"))
386 else:
386 else:
387 tzoffset = dateutil.makedate()[1]
387 tzoffset = dateutil.makedate()[1]
388 return (date[0], tzoffset)
388 return (date[0], tzoffset)
389
389
390 @templatefunc('max(iterable)')
390 @templatefunc('max(iterable)')
391 def max_(context, mapping, args, **kwargs):
391 def max_(context, mapping, args, **kwargs):
392 """Return the max of an iterable"""
392 """Return the max of an iterable"""
393 if len(args) != 1:
393 if len(args) != 1:
394 # i18n: "max" is a keyword
394 # i18n: "max" is a keyword
395 raise error.ParseError(_("max expects one argument"))
395 raise error.ParseError(_("max expects one argument"))
396
396
397 iterable = evalfuncarg(context, mapping, args[0])
397 iterable = evalfuncarg(context, mapping, args[0])
398 try:
398 try:
399 x = max(pycompat.maybebytestr(iterable))
399 x = max(pycompat.maybebytestr(iterable))
400 except (TypeError, ValueError):
400 except (TypeError, ValueError):
401 # i18n: "max" is a keyword
401 # i18n: "max" is a keyword
402 raise error.ParseError(_("max first argument should be an iterable"))
402 raise error.ParseError(_("max first argument should be an iterable"))
403 return templateutil.wraphybridvalue(iterable, x, x)
403 return templateutil.wraphybridvalue(iterable, x, x)
404
404
405 @templatefunc('min(iterable)')
405 @templatefunc('min(iterable)')
406 def min_(context, mapping, args, **kwargs):
406 def min_(context, mapping, args, **kwargs):
407 """Return the min of an iterable"""
407 """Return the min of an iterable"""
408 if len(args) != 1:
408 if len(args) != 1:
409 # i18n: "min" is a keyword
409 # i18n: "min" is a keyword
410 raise error.ParseError(_("min expects one argument"))
410 raise error.ParseError(_("min expects one argument"))
411
411
412 iterable = evalfuncarg(context, mapping, args[0])
412 iterable = evalfuncarg(context, mapping, args[0])
413 try:
413 try:
414 x = min(pycompat.maybebytestr(iterable))
414 x = min(pycompat.maybebytestr(iterable))
415 except (TypeError, ValueError):
415 except (TypeError, ValueError):
416 # i18n: "min" is a keyword
416 # i18n: "min" is a keyword
417 raise error.ParseError(_("min first argument should be an iterable"))
417 raise error.ParseError(_("min first argument should be an iterable"))
418 return templateutil.wraphybridvalue(iterable, x, x)
418 return templateutil.wraphybridvalue(iterable, x, x)
419
419
420 @templatefunc('mod(a, b)')
420 @templatefunc('mod(a, b)')
421 def mod(context, mapping, args):
421 def mod(context, mapping, args):
422 """Calculate a mod b such that a / b + a mod b == a"""
422 """Calculate a mod b such that a / b + a mod b == a"""
423 if not len(args) == 2:
423 if not len(args) == 2:
424 # i18n: "mod" is a keyword
424 # i18n: "mod" is a keyword
425 raise error.ParseError(_("mod expects two arguments"))
425 raise error.ParseError(_("mod expects two arguments"))
426
426
427 func = lambda a, b: a % b
427 func = lambda a, b: a % b
428 return templateutil.runarithmetic(context, mapping,
428 return templateutil.runarithmetic(context, mapping,
429 (func, args[0], args[1]))
429 (func, args[0], args[1]))
430
430
431 @templatefunc('obsfateoperations(markers)')
431 @templatefunc('obsfateoperations(markers)')
432 def obsfateoperations(context, mapping, args):
432 def obsfateoperations(context, mapping, args):
433 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
433 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
434 if len(args) != 1:
434 if len(args) != 1:
435 # i18n: "obsfateoperations" is a keyword
435 # i18n: "obsfateoperations" is a keyword
436 raise error.ParseError(_("obsfateoperations expects one argument"))
436 raise error.ParseError(_("obsfateoperations expects one argument"))
437
437
438 markers = evalfuncarg(context, mapping, args[0])
438 markers = evalfuncarg(context, mapping, args[0])
439
439
440 try:
440 try:
441 data = obsutil.markersoperations(markers)
441 data = obsutil.markersoperations(markers)
442 return templateutil.hybridlist(data, name='operation')
442 return templateutil.hybridlist(data, name='operation')
443 except (TypeError, KeyError):
443 except (TypeError, KeyError):
444 # i18n: "obsfateoperations" is a keyword
444 # i18n: "obsfateoperations" is a keyword
445 errmsg = _("obsfateoperations first argument should be an iterable")
445 errmsg = _("obsfateoperations first argument should be an iterable")
446 raise error.ParseError(errmsg)
446 raise error.ParseError(errmsg)
447
447
448 @templatefunc('obsfatedate(markers)')
448 @templatefunc('obsfatedate(markers)')
449 def obsfatedate(context, mapping, args):
449 def obsfatedate(context, mapping, args):
450 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
450 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
451 if len(args) != 1:
451 if len(args) != 1:
452 # i18n: "obsfatedate" is a keyword
452 # i18n: "obsfatedate" is a keyword
453 raise error.ParseError(_("obsfatedate expects one argument"))
453 raise error.ParseError(_("obsfatedate expects one argument"))
454
454
455 markers = evalfuncarg(context, mapping, args[0])
455 markers = evalfuncarg(context, mapping, args[0])
456
456
457 try:
457 try:
458 data = obsutil.markersdates(markers)
458 data = obsutil.markersdates(markers)
459 return templateutil.hybridlist(data, name='date', fmt='%d %d')
459 return templateutil.hybridlist(data, name='date', fmt='%d %d')
460 except (TypeError, KeyError):
460 except (TypeError, KeyError):
461 # i18n: "obsfatedate" is a keyword
461 # i18n: "obsfatedate" is a keyword
462 errmsg = _("obsfatedate first argument should be an iterable")
462 errmsg = _("obsfatedate first argument should be an iterable")
463 raise error.ParseError(errmsg)
463 raise error.ParseError(errmsg)
464
464
465 @templatefunc('obsfateusers(markers)')
465 @templatefunc('obsfateusers(markers)')
466 def obsfateusers(context, mapping, args):
466 def obsfateusers(context, mapping, args):
467 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
467 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
468 if len(args) != 1:
468 if len(args) != 1:
469 # i18n: "obsfateusers" is a keyword
469 # i18n: "obsfateusers" is a keyword
470 raise error.ParseError(_("obsfateusers expects one argument"))
470 raise error.ParseError(_("obsfateusers expects one argument"))
471
471
472 markers = evalfuncarg(context, mapping, args[0])
472 markers = evalfuncarg(context, mapping, args[0])
473
473
474 try:
474 try:
475 data = obsutil.markersusers(markers)
475 data = obsutil.markersusers(markers)
476 return templateutil.hybridlist(data, name='user')
476 return templateutil.hybridlist(data, name='user')
477 except (TypeError, KeyError, ValueError):
477 except (TypeError, KeyError, ValueError):
478 # i18n: "obsfateusers" is a keyword
478 # i18n: "obsfateusers" is a keyword
479 msg = _("obsfateusers first argument should be an iterable of "
479 msg = _("obsfateusers first argument should be an iterable of "
480 "obsmakers")
480 "obsmakers")
481 raise error.ParseError(msg)
481 raise error.ParseError(msg)
482
482
483 @templatefunc('obsfateverb(successors, markers)')
483 @templatefunc('obsfateverb(successors, markers)')
484 def obsfateverb(context, mapping, args):
484 def obsfateverb(context, mapping, args):
485 """Compute obsfate related information based on successors (EXPERIMENTAL)"""
485 """Compute obsfate related information based on successors (EXPERIMENTAL)"""
486 if len(args) != 2:
486 if len(args) != 2:
487 # i18n: "obsfateverb" is a keyword
487 # i18n: "obsfateverb" is a keyword
488 raise error.ParseError(_("obsfateverb expects two arguments"))
488 raise error.ParseError(_("obsfateverb expects two arguments"))
489
489
490 successors = evalfuncarg(context, mapping, args[0])
490 successors = evalfuncarg(context, mapping, args[0])
491 markers = evalfuncarg(context, mapping, args[1])
491 markers = evalfuncarg(context, mapping, args[1])
492
492
493 try:
493 try:
494 return obsutil.obsfateverb(successors, markers)
494 return obsutil.obsfateverb(successors, markers)
495 except TypeError:
495 except TypeError:
496 # i18n: "obsfateverb" is a keyword
496 # i18n: "obsfateverb" is a keyword
497 errmsg = _("obsfateverb first argument should be countable")
497 errmsg = _("obsfateverb first argument should be countable")
498 raise error.ParseError(errmsg)
498 raise error.ParseError(errmsg)
499
499
500 @templatefunc('relpath(path)')
500 @templatefunc('relpath(path)')
501 def relpath(context, mapping, args):
501 def relpath(context, mapping, args):
502 """Convert a repository-absolute path into a filesystem path relative to
502 """Convert a repository-absolute path into a filesystem path relative to
503 the current working directory."""
503 the current working directory."""
504 if len(args) != 1:
504 if len(args) != 1:
505 # i18n: "relpath" is a keyword
505 # i18n: "relpath" is a keyword
506 raise error.ParseError(_("relpath expects one argument"))
506 raise error.ParseError(_("relpath expects one argument"))
507
507
508 repo = context.resource(mapping, 'ctx').repo()
508 repo = context.resource(mapping, 'ctx').repo()
509 path = evalstring(context, mapping, args[0])
509 path = evalstring(context, mapping, args[0])
510 return repo.pathto(path)
510 return repo.pathto(path)
511
511
512 @templatefunc('revset(query[, formatargs...])')
512 @templatefunc('revset(query[, formatargs...])')
513 def revset(context, mapping, args):
513 def revset(context, mapping, args):
514 """Execute a revision set query. See
514 """Execute a revision set query. See
515 :hg:`help revset`."""
515 :hg:`help revset`."""
516 if not len(args) > 0:
516 if not len(args) > 0:
517 # i18n: "revset" is a keyword
517 # i18n: "revset" is a keyword
518 raise error.ParseError(_("revset expects one or more arguments"))
518 raise error.ParseError(_("revset expects one or more arguments"))
519
519
520 raw = evalstring(context, mapping, args[0])
520 raw = evalstring(context, mapping, args[0])
521 ctx = context.resource(mapping, 'ctx')
521 ctx = context.resource(mapping, 'ctx')
522 repo = ctx.repo()
522 repo = ctx.repo()
523
523
524 def query(expr):
524 def query(expr):
525 m = revsetmod.match(repo.ui, expr, repo=repo)
525 m = revsetmod.match(repo.ui, expr, lookup=revsetmod.lookupfn(repo))
526 return m(repo)
526 return m(repo)
527
527
528 if len(args) > 1:
528 if len(args) > 1:
529 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
529 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
530 revs = query(revsetlang.formatspec(raw, *formatargs))
530 revs = query(revsetlang.formatspec(raw, *formatargs))
531 revs = list(revs)
531 revs = list(revs)
532 else:
532 else:
533 cache = context.resource(mapping, 'cache')
533 cache = context.resource(mapping, 'cache')
534 revsetcache = cache.setdefault("revsetcache", {})
534 revsetcache = cache.setdefault("revsetcache", {})
535 if raw in revsetcache:
535 if raw in revsetcache:
536 revs = revsetcache[raw]
536 revs = revsetcache[raw]
537 else:
537 else:
538 revs = query(raw)
538 revs = query(raw)
539 revs = list(revs)
539 revs = list(revs)
540 revsetcache[raw] = revs
540 revsetcache[raw] = revs
541 return templatekw.showrevslist(context, mapping, "revision", revs)
541 return templatekw.showrevslist(context, mapping, "revision", revs)
542
542
543 @templatefunc('rstdoc(text, style)')
543 @templatefunc('rstdoc(text, style)')
544 def rstdoc(context, mapping, args):
544 def rstdoc(context, mapping, args):
545 """Format reStructuredText."""
545 """Format reStructuredText."""
546 if len(args) != 2:
546 if len(args) != 2:
547 # i18n: "rstdoc" is a keyword
547 # i18n: "rstdoc" is a keyword
548 raise error.ParseError(_("rstdoc expects two arguments"))
548 raise error.ParseError(_("rstdoc expects two arguments"))
549
549
550 text = evalstring(context, mapping, args[0])
550 text = evalstring(context, mapping, args[0])
551 style = evalstring(context, mapping, args[1])
551 style = evalstring(context, mapping, args[1])
552
552
553 return minirst.format(text, style=style, keep=['verbose'])
553 return minirst.format(text, style=style, keep=['verbose'])
554
554
555 @templatefunc('separate(sep, args)', argspec='sep *args')
555 @templatefunc('separate(sep, args)', argspec='sep *args')
556 def separate(context, mapping, args):
556 def separate(context, mapping, args):
557 """Add a separator between non-empty arguments."""
557 """Add a separator between non-empty arguments."""
558 if 'sep' not in args:
558 if 'sep' not in args:
559 # i18n: "separate" is a keyword
559 # i18n: "separate" is a keyword
560 raise error.ParseError(_("separate expects at least one argument"))
560 raise error.ParseError(_("separate expects at least one argument"))
561
561
562 sep = evalstring(context, mapping, args['sep'])
562 sep = evalstring(context, mapping, args['sep'])
563 first = True
563 first = True
564 for arg in args['args']:
564 for arg in args['args']:
565 argstr = evalstring(context, mapping, arg)
565 argstr = evalstring(context, mapping, arg)
566 if not argstr:
566 if not argstr:
567 continue
567 continue
568 if first:
568 if first:
569 first = False
569 first = False
570 else:
570 else:
571 yield sep
571 yield sep
572 yield argstr
572 yield argstr
573
573
574 @templatefunc('shortest(node, minlength=4)')
574 @templatefunc('shortest(node, minlength=4)')
575 def shortest(context, mapping, args):
575 def shortest(context, mapping, args):
576 """Obtain the shortest representation of
576 """Obtain the shortest representation of
577 a node."""
577 a node."""
578 if not (1 <= len(args) <= 2):
578 if not (1 <= len(args) <= 2):
579 # i18n: "shortest" is a keyword
579 # i18n: "shortest" is a keyword
580 raise error.ParseError(_("shortest() expects one or two arguments"))
580 raise error.ParseError(_("shortest() expects one or two arguments"))
581
581
582 node = evalstring(context, mapping, args[0])
582 node = evalstring(context, mapping, args[0])
583
583
584 minlength = 4
584 minlength = 4
585 if len(args) > 1:
585 if len(args) > 1:
586 minlength = evalinteger(context, mapping, args[1],
586 minlength = evalinteger(context, mapping, args[1],
587 # i18n: "shortest" is a keyword
587 # i18n: "shortest" is a keyword
588 _("shortest() expects an integer minlength"))
588 _("shortest() expects an integer minlength"))
589
589
590 # _partialmatch() of filtered changelog could take O(len(repo)) time,
590 # _partialmatch() of filtered changelog could take O(len(repo)) time,
591 # which would be unacceptably slow. so we look for hash collision in
591 # which would be unacceptably slow. so we look for hash collision in
592 # unfiltered space, which means some hashes may be slightly longer.
592 # unfiltered space, which means some hashes may be slightly longer.
593 cl = context.resource(mapping, 'ctx')._repo.unfiltered().changelog
593 cl = context.resource(mapping, 'ctx')._repo.unfiltered().changelog
594 return cl.shortest(node, minlength)
594 return cl.shortest(node, minlength)
595
595
596 @templatefunc('strip(text[, chars])')
596 @templatefunc('strip(text[, chars])')
597 def strip(context, mapping, args):
597 def strip(context, mapping, args):
598 """Strip characters from a string. By default,
598 """Strip characters from a string. By default,
599 strips all leading and trailing whitespace."""
599 strips all leading and trailing whitespace."""
600 if not (1 <= len(args) <= 2):
600 if not (1 <= len(args) <= 2):
601 # i18n: "strip" is a keyword
601 # i18n: "strip" is a keyword
602 raise error.ParseError(_("strip expects one or two arguments"))
602 raise error.ParseError(_("strip expects one or two arguments"))
603
603
604 text = evalstring(context, mapping, args[0])
604 text = evalstring(context, mapping, args[0])
605 if len(args) == 2:
605 if len(args) == 2:
606 chars = evalstring(context, mapping, args[1])
606 chars = evalstring(context, mapping, args[1])
607 return text.strip(chars)
607 return text.strip(chars)
608 return text.strip()
608 return text.strip()
609
609
610 @templatefunc('sub(pattern, replacement, expression)')
610 @templatefunc('sub(pattern, replacement, expression)')
611 def sub(context, mapping, args):
611 def sub(context, mapping, args):
612 """Perform text substitution
612 """Perform text substitution
613 using regular expressions."""
613 using regular expressions."""
614 if len(args) != 3:
614 if len(args) != 3:
615 # i18n: "sub" is a keyword
615 # i18n: "sub" is a keyword
616 raise error.ParseError(_("sub expects three arguments"))
616 raise error.ParseError(_("sub expects three arguments"))
617
617
618 pat = evalstring(context, mapping, args[0])
618 pat = evalstring(context, mapping, args[0])
619 rpl = evalstring(context, mapping, args[1])
619 rpl = evalstring(context, mapping, args[1])
620 src = evalstring(context, mapping, args[2])
620 src = evalstring(context, mapping, args[2])
621 try:
621 try:
622 patre = re.compile(pat)
622 patre = re.compile(pat)
623 except re.error:
623 except re.error:
624 # i18n: "sub" is a keyword
624 # i18n: "sub" is a keyword
625 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
625 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
626 try:
626 try:
627 yield patre.sub(rpl, src)
627 yield patre.sub(rpl, src)
628 except re.error:
628 except re.error:
629 # i18n: "sub" is a keyword
629 # i18n: "sub" is a keyword
630 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
630 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
631
631
632 @templatefunc('startswith(pattern, text)')
632 @templatefunc('startswith(pattern, text)')
633 def startswith(context, mapping, args):
633 def startswith(context, mapping, args):
634 """Returns the value from the "text" argument
634 """Returns the value from the "text" argument
635 if it begins with the content from the "pattern" argument."""
635 if it begins with the content from the "pattern" argument."""
636 if len(args) != 2:
636 if len(args) != 2:
637 # i18n: "startswith" is a keyword
637 # i18n: "startswith" is a keyword
638 raise error.ParseError(_("startswith expects two arguments"))
638 raise error.ParseError(_("startswith expects two arguments"))
639
639
640 patn = evalstring(context, mapping, args[0])
640 patn = evalstring(context, mapping, args[0])
641 text = evalstring(context, mapping, args[1])
641 text = evalstring(context, mapping, args[1])
642 if text.startswith(patn):
642 if text.startswith(patn):
643 return text
643 return text
644 return ''
644 return ''
645
645
646 @templatefunc('word(number, text[, separator])')
646 @templatefunc('word(number, text[, separator])')
647 def word(context, mapping, args):
647 def word(context, mapping, args):
648 """Return the nth word from a string."""
648 """Return the nth word from a string."""
649 if not (2 <= len(args) <= 3):
649 if not (2 <= len(args) <= 3):
650 # i18n: "word" is a keyword
650 # i18n: "word" is a keyword
651 raise error.ParseError(_("word expects two or three arguments, got %d")
651 raise error.ParseError(_("word expects two or three arguments, got %d")
652 % len(args))
652 % len(args))
653
653
654 num = evalinteger(context, mapping, args[0],
654 num = evalinteger(context, mapping, args[0],
655 # i18n: "word" is a keyword
655 # i18n: "word" is a keyword
656 _("word expects an integer index"))
656 _("word expects an integer index"))
657 text = evalstring(context, mapping, args[1])
657 text = evalstring(context, mapping, args[1])
658 if len(args) == 3:
658 if len(args) == 3:
659 splitter = evalstring(context, mapping, args[2])
659 splitter = evalstring(context, mapping, args[2])
660 else:
660 else:
661 splitter = None
661 splitter = None
662
662
663 tokens = text.split(splitter)
663 tokens = text.split(splitter)
664 if num >= len(tokens) or num < -len(tokens):
664 if num >= len(tokens) or num < -len(tokens):
665 return ''
665 return ''
666 else:
666 else:
667 return tokens[num]
667 return tokens[num]
668
668
669 def loadfunction(ui, extname, registrarobj):
669 def loadfunction(ui, extname, registrarobj):
670 """Load template function from specified registrarobj
670 """Load template function from specified registrarobj
671 """
671 """
672 for name, func in registrarobj._table.iteritems():
672 for name, func in registrarobj._table.iteritems():
673 funcs[name] = func
673 funcs[name] = func
674
674
675 # tell hggettext to extract docstrings from these functions:
675 # tell hggettext to extract docstrings from these functions:
676 i18nfunctions = funcs.values()
676 i18nfunctions = funcs.values()
@@ -1,2803 +1,2803 b''
1 $ HGENCODING=utf-8
1 $ HGENCODING=utf-8
2 $ export HGENCODING
2 $ export HGENCODING
3 $ cat > testrevset.py << EOF
3 $ cat > testrevset.py << EOF
4 > import mercurial.revset
4 > import mercurial.revset
5 >
5 >
6 > baseset = mercurial.revset.baseset
6 > baseset = mercurial.revset.baseset
7 >
7 >
8 > def r3232(repo, subset, x):
8 > def r3232(repo, subset, x):
9 > """"simple revset that return [3,2,3,2]
9 > """"simple revset that return [3,2,3,2]
10 >
10 >
11 > revisions duplicated on purpose.
11 > revisions duplicated on purpose.
12 > """
12 > """
13 > if 3 not in subset:
13 > if 3 not in subset:
14 > if 2 in subset:
14 > if 2 in subset:
15 > return baseset([2,2])
15 > return baseset([2,2])
16 > return baseset()
16 > return baseset()
17 > return baseset([3,3,2,2])
17 > return baseset([3,3,2,2])
18 >
18 >
19 > mercurial.revset.symbols[b'r3232'] = r3232
19 > mercurial.revset.symbols[b'r3232'] = r3232
20 > EOF
20 > EOF
21 $ cat >> $HGRCPATH << EOF
21 $ cat >> $HGRCPATH << EOF
22 > [extensions]
22 > [extensions]
23 > drawdag=$TESTDIR/drawdag.py
23 > drawdag=$TESTDIR/drawdag.py
24 > testrevset=$TESTTMP/testrevset.py
24 > testrevset=$TESTTMP/testrevset.py
25 > EOF
25 > EOF
26
26
27 $ try() {
27 $ try() {
28 > hg debugrevspec --debug "$@"
28 > hg debugrevspec --debug "$@"
29 > }
29 > }
30
30
31 $ log() {
31 $ log() {
32 > hg log --template '{rev}\n' -r "$1"
32 > hg log --template '{rev}\n' -r "$1"
33 > }
33 > }
34
34
35 extension to build '_intlist()' and '_hexlist()', which is necessary because
35 extension to build '_intlist()' and '_hexlist()', which is necessary because
36 these predicates use '\0' as a separator:
36 these predicates use '\0' as a separator:
37
37
38 $ cat <<EOF > debugrevlistspec.py
38 $ cat <<EOF > debugrevlistspec.py
39 > from __future__ import absolute_import
39 > from __future__ import absolute_import
40 > from mercurial import (
40 > from mercurial import (
41 > node as nodemod,
41 > node as nodemod,
42 > registrar,
42 > registrar,
43 > revset,
43 > revset,
44 > revsetlang,
44 > revsetlang,
45 > smartset,
45 > smartset,
46 > )
46 > )
47 > cmdtable = {}
47 > cmdtable = {}
48 > command = registrar.command(cmdtable)
48 > command = registrar.command(cmdtable)
49 > @command(b'debugrevlistspec',
49 > @command(b'debugrevlistspec',
50 > [(b'', b'optimize', None, b'print parsed tree after optimizing'),
50 > [(b'', b'optimize', None, b'print parsed tree after optimizing'),
51 > (b'', b'bin', None, b'unhexlify arguments')])
51 > (b'', b'bin', None, b'unhexlify arguments')])
52 > def debugrevlistspec(ui, repo, fmt, *args, **opts):
52 > def debugrevlistspec(ui, repo, fmt, *args, **opts):
53 > if opts['bin']:
53 > if opts['bin']:
54 > args = map(nodemod.bin, args)
54 > args = map(nodemod.bin, args)
55 > expr = revsetlang.formatspec(fmt, list(args))
55 > expr = revsetlang.formatspec(fmt, list(args))
56 > if ui.verbose:
56 > if ui.verbose:
57 > tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
57 > tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
58 > ui.note(revsetlang.prettyformat(tree), b"\n")
58 > ui.note(revsetlang.prettyformat(tree), b"\n")
59 > if opts["optimize"]:
59 > if opts["optimize"]:
60 > opttree = revsetlang.optimize(revsetlang.analyze(tree))
60 > opttree = revsetlang.optimize(revsetlang.analyze(tree))
61 > ui.note(b"* optimized:\n", revsetlang.prettyformat(opttree),
61 > ui.note(b"* optimized:\n", revsetlang.prettyformat(opttree),
62 > b"\n")
62 > b"\n")
63 > func = revset.match(ui, expr, repo)
63 > func = revset.match(ui, expr, lookup=revset.lookupfn(repo))
64 > revs = func(repo)
64 > revs = func(repo)
65 > if ui.verbose:
65 > if ui.verbose:
66 > ui.note(b"* set:\n", smartset.prettyformat(revs), b"\n")
66 > ui.note(b"* set:\n", smartset.prettyformat(revs), b"\n")
67 > for c in revs:
67 > for c in revs:
68 > ui.write(b"%d\n" % c)
68 > ui.write(b"%d\n" % c)
69 > EOF
69 > EOF
70 $ cat <<EOF >> $HGRCPATH
70 $ cat <<EOF >> $HGRCPATH
71 > [extensions]
71 > [extensions]
72 > debugrevlistspec = $TESTTMP/debugrevlistspec.py
72 > debugrevlistspec = $TESTTMP/debugrevlistspec.py
73 > EOF
73 > EOF
74 $ trylist() {
74 $ trylist() {
75 > hg debugrevlistspec --debug "$@"
75 > hg debugrevlistspec --debug "$@"
76 > }
76 > }
77
77
78 $ hg init repo
78 $ hg init repo
79 $ cd repo
79 $ cd repo
80
80
81 $ echo a > a
81 $ echo a > a
82 $ hg branch a
82 $ hg branch a
83 marked working directory as branch a
83 marked working directory as branch a
84 (branches are permanent and global, did you want a bookmark?)
84 (branches are permanent and global, did you want a bookmark?)
85 $ hg ci -Aqm0
85 $ hg ci -Aqm0
86
86
87 $ echo b > b
87 $ echo b > b
88 $ hg branch b
88 $ hg branch b
89 marked working directory as branch b
89 marked working directory as branch b
90 $ hg ci -Aqm1
90 $ hg ci -Aqm1
91
91
92 $ rm a
92 $ rm a
93 $ hg branch a-b-c-
93 $ hg branch a-b-c-
94 marked working directory as branch a-b-c-
94 marked working directory as branch a-b-c-
95 $ hg ci -Aqm2 -u Bob
95 $ hg ci -Aqm2 -u Bob
96
96
97 $ hg log -r "extra('branch', 'a-b-c-')" --template '{rev}\n'
97 $ hg log -r "extra('branch', 'a-b-c-')" --template '{rev}\n'
98 2
98 2
99 $ hg log -r "extra('branch')" --template '{rev}\n'
99 $ hg log -r "extra('branch')" --template '{rev}\n'
100 0
100 0
101 1
101 1
102 2
102 2
103 $ hg log -r "extra('branch', 're:a')" --template '{rev} {branch}\n'
103 $ hg log -r "extra('branch', 're:a')" --template '{rev} {branch}\n'
104 0 a
104 0 a
105 2 a-b-c-
105 2 a-b-c-
106
106
107 $ hg co 1
107 $ hg co 1
108 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
108 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 $ hg branch +a+b+c+
109 $ hg branch +a+b+c+
110 marked working directory as branch +a+b+c+
110 marked working directory as branch +a+b+c+
111 $ hg ci -Aqm3
111 $ hg ci -Aqm3
112
112
113 $ hg co 2 # interleave
113 $ hg co 2 # interleave
114 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
114 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
115 $ echo bb > b
115 $ echo bb > b
116 $ hg branch -- -a-b-c-
116 $ hg branch -- -a-b-c-
117 marked working directory as branch -a-b-c-
117 marked working directory as branch -a-b-c-
118 $ hg ci -Aqm4 -d "May 12 2005"
118 $ hg ci -Aqm4 -d "May 12 2005"
119
119
120 $ hg co 3
120 $ hg co 3
121 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
121 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
122 $ hg branch !a/b/c/
122 $ hg branch !a/b/c/
123 marked working directory as branch !a/b/c/
123 marked working directory as branch !a/b/c/
124 $ hg ci -Aqm"5 bug"
124 $ hg ci -Aqm"5 bug"
125
125
126 $ hg merge 4
126 $ hg merge 4
127 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
127 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
128 (branch merge, don't forget to commit)
128 (branch merge, don't forget to commit)
129 $ hg branch _a_b_c_
129 $ hg branch _a_b_c_
130 marked working directory as branch _a_b_c_
130 marked working directory as branch _a_b_c_
131 $ hg ci -Aqm"6 issue619"
131 $ hg ci -Aqm"6 issue619"
132
132
133 $ hg branch .a.b.c.
133 $ hg branch .a.b.c.
134 marked working directory as branch .a.b.c.
134 marked working directory as branch .a.b.c.
135 $ hg ci -Aqm7
135 $ hg ci -Aqm7
136
136
137 $ hg branch all
137 $ hg branch all
138 marked working directory as branch all
138 marked working directory as branch all
139
139
140 $ hg co 4
140 $ hg co 4
141 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
141 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
142 $ hg branch Γ©
142 $ hg branch Γ©
143 marked working directory as branch \xc3\xa9 (esc)
143 marked working directory as branch \xc3\xa9 (esc)
144 $ hg ci -Aqm9
144 $ hg ci -Aqm9
145
145
146 $ hg tag -r6 1.0
146 $ hg tag -r6 1.0
147 $ hg bookmark -r6 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
147 $ hg bookmark -r6 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
148
148
149 $ hg clone --quiet -U -r 7 . ../remote1
149 $ hg clone --quiet -U -r 7 . ../remote1
150 $ hg clone --quiet -U -r 8 . ../remote2
150 $ hg clone --quiet -U -r 8 . ../remote2
151 $ echo "[paths]" >> .hg/hgrc
151 $ echo "[paths]" >> .hg/hgrc
152 $ echo "default = ../remote1" >> .hg/hgrc
152 $ echo "default = ../remote1" >> .hg/hgrc
153
153
154 trivial
154 trivial
155
155
156 $ try 0:1
156 $ try 0:1
157 (range
157 (range
158 (symbol '0')
158 (symbol '0')
159 (symbol '1'))
159 (symbol '1'))
160 * set:
160 * set:
161 <spanset+ 0:2>
161 <spanset+ 0:2>
162 0
162 0
163 1
163 1
164 $ try --optimize :
164 $ try --optimize :
165 (rangeall
165 (rangeall
166 None)
166 None)
167 * optimized:
167 * optimized:
168 (rangeall
168 (rangeall
169 None)
169 None)
170 * set:
170 * set:
171 <spanset+ 0:10>
171 <spanset+ 0:10>
172 0
172 0
173 1
173 1
174 2
174 2
175 3
175 3
176 4
176 4
177 5
177 5
178 6
178 6
179 7
179 7
180 8
180 8
181 9
181 9
182 $ try 3::6
182 $ try 3::6
183 (dagrange
183 (dagrange
184 (symbol '3')
184 (symbol '3')
185 (symbol '6'))
185 (symbol '6'))
186 * set:
186 * set:
187 <baseset+ [3, 5, 6]>
187 <baseset+ [3, 5, 6]>
188 3
188 3
189 5
189 5
190 6
190 6
191 $ try '0|1|2'
191 $ try '0|1|2'
192 (or
192 (or
193 (list
193 (list
194 (symbol '0')
194 (symbol '0')
195 (symbol '1')
195 (symbol '1')
196 (symbol '2')))
196 (symbol '2')))
197 * set:
197 * set:
198 <baseset [0, 1, 2]>
198 <baseset [0, 1, 2]>
199 0
199 0
200 1
200 1
201 2
201 2
202
202
203 names that should work without quoting
203 names that should work without quoting
204
204
205 $ try a
205 $ try a
206 (symbol 'a')
206 (symbol 'a')
207 * set:
207 * set:
208 <baseset [0]>
208 <baseset [0]>
209 0
209 0
210 $ try b-a
210 $ try b-a
211 (minus
211 (minus
212 (symbol 'b')
212 (symbol 'b')
213 (symbol 'a'))
213 (symbol 'a'))
214 * set:
214 * set:
215 <filteredset
215 <filteredset
216 <baseset [1]>,
216 <baseset [1]>,
217 <not
217 <not
218 <baseset [0]>>>
218 <baseset [0]>>>
219 1
219 1
220 $ try _a_b_c_
220 $ try _a_b_c_
221 (symbol '_a_b_c_')
221 (symbol '_a_b_c_')
222 * set:
222 * set:
223 <baseset [6]>
223 <baseset [6]>
224 6
224 6
225 $ try _a_b_c_-a
225 $ try _a_b_c_-a
226 (minus
226 (minus
227 (symbol '_a_b_c_')
227 (symbol '_a_b_c_')
228 (symbol 'a'))
228 (symbol 'a'))
229 * set:
229 * set:
230 <filteredset
230 <filteredset
231 <baseset [6]>,
231 <baseset [6]>,
232 <not
232 <not
233 <baseset [0]>>>
233 <baseset [0]>>>
234 6
234 6
235 $ try .a.b.c.
235 $ try .a.b.c.
236 (symbol '.a.b.c.')
236 (symbol '.a.b.c.')
237 * set:
237 * set:
238 <baseset [7]>
238 <baseset [7]>
239 7
239 7
240 $ try .a.b.c.-a
240 $ try .a.b.c.-a
241 (minus
241 (minus
242 (symbol '.a.b.c.')
242 (symbol '.a.b.c.')
243 (symbol 'a'))
243 (symbol 'a'))
244 * set:
244 * set:
245 <filteredset
245 <filteredset
246 <baseset [7]>,
246 <baseset [7]>,
247 <not
247 <not
248 <baseset [0]>>>
248 <baseset [0]>>>
249 7
249 7
250
250
251 names that should be caught by fallback mechanism
251 names that should be caught by fallback mechanism
252
252
253 $ try -- '-a-b-c-'
253 $ try -- '-a-b-c-'
254 (symbol '-a-b-c-')
254 (symbol '-a-b-c-')
255 * set:
255 * set:
256 <baseset [4]>
256 <baseset [4]>
257 4
257 4
258 $ log -a-b-c-
258 $ log -a-b-c-
259 4
259 4
260 $ try '+a+b+c+'
260 $ try '+a+b+c+'
261 (symbol '+a+b+c+')
261 (symbol '+a+b+c+')
262 * set:
262 * set:
263 <baseset [3]>
263 <baseset [3]>
264 3
264 3
265 $ try '+a+b+c+:'
265 $ try '+a+b+c+:'
266 (rangepost
266 (rangepost
267 (symbol '+a+b+c+'))
267 (symbol '+a+b+c+'))
268 * set:
268 * set:
269 <spanset+ 3:10>
269 <spanset+ 3:10>
270 3
270 3
271 4
271 4
272 5
272 5
273 6
273 6
274 7
274 7
275 8
275 8
276 9
276 9
277 $ try ':+a+b+c+'
277 $ try ':+a+b+c+'
278 (rangepre
278 (rangepre
279 (symbol '+a+b+c+'))
279 (symbol '+a+b+c+'))
280 * set:
280 * set:
281 <spanset+ 0:4>
281 <spanset+ 0:4>
282 0
282 0
283 1
283 1
284 2
284 2
285 3
285 3
286 $ try -- '-a-b-c-:+a+b+c+'
286 $ try -- '-a-b-c-:+a+b+c+'
287 (range
287 (range
288 (symbol '-a-b-c-')
288 (symbol '-a-b-c-')
289 (symbol '+a+b+c+'))
289 (symbol '+a+b+c+'))
290 * set:
290 * set:
291 <spanset- 3:5>
291 <spanset- 3:5>
292 4
292 4
293 3
293 3
294 $ log '-a-b-c-:+a+b+c+'
294 $ log '-a-b-c-:+a+b+c+'
295 4
295 4
296 3
296 3
297
297
298 $ try -- -a-b-c--a # complains
298 $ try -- -a-b-c--a # complains
299 (minus
299 (minus
300 (minus
300 (minus
301 (minus
301 (minus
302 (negate
302 (negate
303 (symbol 'a'))
303 (symbol 'a'))
304 (symbol 'b'))
304 (symbol 'b'))
305 (symbol 'c'))
305 (symbol 'c'))
306 (negate
306 (negate
307 (symbol 'a')))
307 (symbol 'a')))
308 abort: unknown revision '-a'!
308 abort: unknown revision '-a'!
309 [255]
309 [255]
310 $ try Γ©
310 $ try Γ©
311 (symbol '\xc3\xa9')
311 (symbol '\xc3\xa9')
312 * set:
312 * set:
313 <baseset [9]>
313 <baseset [9]>
314 9
314 9
315
315
316 no quoting needed
316 no quoting needed
317
317
318 $ log ::a-b-c-
318 $ log ::a-b-c-
319 0
319 0
320 1
320 1
321 2
321 2
322
322
323 quoting needed
323 quoting needed
324
324
325 $ try '"-a-b-c-"-a'
325 $ try '"-a-b-c-"-a'
326 (minus
326 (minus
327 (string '-a-b-c-')
327 (string '-a-b-c-')
328 (symbol 'a'))
328 (symbol 'a'))
329 * set:
329 * set:
330 <filteredset
330 <filteredset
331 <baseset [4]>,
331 <baseset [4]>,
332 <not
332 <not
333 <baseset [0]>>>
333 <baseset [0]>>>
334 4
334 4
335
335
336 $ log '1 or 2'
336 $ log '1 or 2'
337 1
337 1
338 2
338 2
339 $ log '1|2'
339 $ log '1|2'
340 1
340 1
341 2
341 2
342 $ log '1 and 2'
342 $ log '1 and 2'
343 $ log '1&2'
343 $ log '1&2'
344 $ try '1&2|3' # precedence - and is higher
344 $ try '1&2|3' # precedence - and is higher
345 (or
345 (or
346 (list
346 (list
347 (and
347 (and
348 (symbol '1')
348 (symbol '1')
349 (symbol '2'))
349 (symbol '2'))
350 (symbol '3')))
350 (symbol '3')))
351 * set:
351 * set:
352 <addset
352 <addset
353 <baseset []>,
353 <baseset []>,
354 <baseset [3]>>
354 <baseset [3]>>
355 3
355 3
356 $ try '1|2&3'
356 $ try '1|2&3'
357 (or
357 (or
358 (list
358 (list
359 (symbol '1')
359 (symbol '1')
360 (and
360 (and
361 (symbol '2')
361 (symbol '2')
362 (symbol '3'))))
362 (symbol '3'))))
363 * set:
363 * set:
364 <addset
364 <addset
365 <baseset [1]>,
365 <baseset [1]>,
366 <baseset []>>
366 <baseset []>>
367 1
367 1
368 $ try '1&2&3' # associativity
368 $ try '1&2&3' # associativity
369 (and
369 (and
370 (and
370 (and
371 (symbol '1')
371 (symbol '1')
372 (symbol '2'))
372 (symbol '2'))
373 (symbol '3'))
373 (symbol '3'))
374 * set:
374 * set:
375 <baseset []>
375 <baseset []>
376 $ try '1|(2|3)'
376 $ try '1|(2|3)'
377 (or
377 (or
378 (list
378 (list
379 (symbol '1')
379 (symbol '1')
380 (group
380 (group
381 (or
381 (or
382 (list
382 (list
383 (symbol '2')
383 (symbol '2')
384 (symbol '3'))))))
384 (symbol '3'))))))
385 * set:
385 * set:
386 <addset
386 <addset
387 <baseset [1]>,
387 <baseset [1]>,
388 <baseset [2, 3]>>
388 <baseset [2, 3]>>
389 1
389 1
390 2
390 2
391 3
391 3
392 $ log '1.0' # tag
392 $ log '1.0' # tag
393 6
393 6
394 $ log 'a' # branch
394 $ log 'a' # branch
395 0
395 0
396 $ log '2785f51ee'
396 $ log '2785f51ee'
397 0
397 0
398 $ log 'date(2005)'
398 $ log 'date(2005)'
399 4
399 4
400 $ log 'date(this is a test)'
400 $ log 'date(this is a test)'
401 hg: parse error at 10: unexpected token: symbol
401 hg: parse error at 10: unexpected token: symbol
402 (date(this is a test)
402 (date(this is a test)
403 ^ here)
403 ^ here)
404 [255]
404 [255]
405 $ log 'date()'
405 $ log 'date()'
406 hg: parse error: date requires a string
406 hg: parse error: date requires a string
407 [255]
407 [255]
408 $ log 'date'
408 $ log 'date'
409 abort: unknown revision 'date'!
409 abort: unknown revision 'date'!
410 [255]
410 [255]
411 $ log 'date('
411 $ log 'date('
412 hg: parse error at 5: not a prefix: end
412 hg: parse error at 5: not a prefix: end
413 (date(
413 (date(
414 ^ here)
414 ^ here)
415 [255]
415 [255]
416 $ log 'date("\xy")'
416 $ log 'date("\xy")'
417 hg: parse error: invalid \x escape* (glob)
417 hg: parse error: invalid \x escape* (glob)
418 [255]
418 [255]
419 $ log 'date(tip)'
419 $ log 'date(tip)'
420 hg: parse error: invalid date: 'tip'
420 hg: parse error: invalid date: 'tip'
421 [255]
421 [255]
422 $ log '0:date'
422 $ log '0:date'
423 abort: unknown revision 'date'!
423 abort: unknown revision 'date'!
424 [255]
424 [255]
425 $ log '::"date"'
425 $ log '::"date"'
426 abort: unknown revision 'date'!
426 abort: unknown revision 'date'!
427 [255]
427 [255]
428 $ hg book date -r 4
428 $ hg book date -r 4
429 $ log '0:date'
429 $ log '0:date'
430 0
430 0
431 1
431 1
432 2
432 2
433 3
433 3
434 4
434 4
435 $ log '::date'
435 $ log '::date'
436 0
436 0
437 1
437 1
438 2
438 2
439 4
439 4
440 $ log '::"date"'
440 $ log '::"date"'
441 0
441 0
442 1
442 1
443 2
443 2
444 4
444 4
445 $ log 'date(2005) and 1::'
445 $ log 'date(2005) and 1::'
446 4
446 4
447 $ hg book -d date
447 $ hg book -d date
448
448
449 function name should be a symbol
449 function name should be a symbol
450
450
451 $ log '"date"(2005)'
451 $ log '"date"(2005)'
452 hg: parse error: not a symbol
452 hg: parse error: not a symbol
453 [255]
453 [255]
454
454
455 keyword arguments
455 keyword arguments
456
456
457 $ log 'extra(branch, value=a)'
457 $ log 'extra(branch, value=a)'
458 0
458 0
459
459
460 $ log 'extra(branch, a, b)'
460 $ log 'extra(branch, a, b)'
461 hg: parse error: extra takes at most 2 positional arguments
461 hg: parse error: extra takes at most 2 positional arguments
462 [255]
462 [255]
463 $ log 'extra(a, label=b)'
463 $ log 'extra(a, label=b)'
464 hg: parse error: extra got multiple values for keyword argument 'label'
464 hg: parse error: extra got multiple values for keyword argument 'label'
465 [255]
465 [255]
466 $ log 'extra(label=branch, default)'
466 $ log 'extra(label=branch, default)'
467 hg: parse error: extra got an invalid argument
467 hg: parse error: extra got an invalid argument
468 [255]
468 [255]
469 $ log 'extra(branch, foo+bar=baz)'
469 $ log 'extra(branch, foo+bar=baz)'
470 hg: parse error: extra got an invalid argument
470 hg: parse error: extra got an invalid argument
471 [255]
471 [255]
472 $ log 'extra(unknown=branch)'
472 $ log 'extra(unknown=branch)'
473 hg: parse error: extra got an unexpected keyword argument 'unknown'
473 hg: parse error: extra got an unexpected keyword argument 'unknown'
474 [255]
474 [255]
475
475
476 $ try 'foo=bar|baz'
476 $ try 'foo=bar|baz'
477 (keyvalue
477 (keyvalue
478 (symbol 'foo')
478 (symbol 'foo')
479 (or
479 (or
480 (list
480 (list
481 (symbol 'bar')
481 (symbol 'bar')
482 (symbol 'baz'))))
482 (symbol 'baz'))))
483 hg: parse error: can't use a key-value pair in this context
483 hg: parse error: can't use a key-value pair in this context
484 [255]
484 [255]
485
485
486 right-hand side should be optimized recursively
486 right-hand side should be optimized recursively
487
487
488 $ try --optimize 'foo=(not public())'
488 $ try --optimize 'foo=(not public())'
489 (keyvalue
489 (keyvalue
490 (symbol 'foo')
490 (symbol 'foo')
491 (group
491 (group
492 (not
492 (not
493 (func
493 (func
494 (symbol 'public')
494 (symbol 'public')
495 None))))
495 None))))
496 * optimized:
496 * optimized:
497 (keyvalue
497 (keyvalue
498 (symbol 'foo')
498 (symbol 'foo')
499 (func
499 (func
500 (symbol '_notpublic')
500 (symbol '_notpublic')
501 None))
501 None))
502 hg: parse error: can't use a key-value pair in this context
502 hg: parse error: can't use a key-value pair in this context
503 [255]
503 [255]
504
504
505 relation-subscript operator has the highest binding strength (as function call):
505 relation-subscript operator has the highest binding strength (as function call):
506
506
507 $ hg debugrevspec -p parsed 'tip:tip^#generations[-1]'
507 $ hg debugrevspec -p parsed 'tip:tip^#generations[-1]'
508 * parsed:
508 * parsed:
509 (range
509 (range
510 (symbol 'tip')
510 (symbol 'tip')
511 (relsubscript
511 (relsubscript
512 (parentpost
512 (parentpost
513 (symbol 'tip'))
513 (symbol 'tip'))
514 (symbol 'generations')
514 (symbol 'generations')
515 (negate
515 (negate
516 (symbol '1'))))
516 (symbol '1'))))
517 9
517 9
518 8
518 8
519 7
519 7
520 6
520 6
521 5
521 5
522 4
522 4
523
523
524 $ hg debugrevspec -p parsed --no-show-revs 'not public()#generations[0]'
524 $ hg debugrevspec -p parsed --no-show-revs 'not public()#generations[0]'
525 * parsed:
525 * parsed:
526 (not
526 (not
527 (relsubscript
527 (relsubscript
528 (func
528 (func
529 (symbol 'public')
529 (symbol 'public')
530 None)
530 None)
531 (symbol 'generations')
531 (symbol 'generations')
532 (symbol '0')))
532 (symbol '0')))
533
533
534 left-hand side of relation-subscript operator should be optimized recursively:
534 left-hand side of relation-subscript operator should be optimized recursively:
535
535
536 $ hg debugrevspec -p analyzed -p optimized --no-show-revs \
536 $ hg debugrevspec -p analyzed -p optimized --no-show-revs \
537 > '(not public())#generations[0]'
537 > '(not public())#generations[0]'
538 * analyzed:
538 * analyzed:
539 (relsubscript
539 (relsubscript
540 (not
540 (not
541 (func
541 (func
542 (symbol 'public')
542 (symbol 'public')
543 None))
543 None))
544 (symbol 'generations')
544 (symbol 'generations')
545 (symbol '0'))
545 (symbol '0'))
546 * optimized:
546 * optimized:
547 (relsubscript
547 (relsubscript
548 (func
548 (func
549 (symbol '_notpublic')
549 (symbol '_notpublic')
550 None)
550 None)
551 (symbol 'generations')
551 (symbol 'generations')
552 (symbol '0'))
552 (symbol '0'))
553
553
554 resolution of subscript and relation-subscript ternary operators:
554 resolution of subscript and relation-subscript ternary operators:
555
555
556 $ hg debugrevspec -p analyzed 'tip[0]'
556 $ hg debugrevspec -p analyzed 'tip[0]'
557 * analyzed:
557 * analyzed:
558 (subscript
558 (subscript
559 (symbol 'tip')
559 (symbol 'tip')
560 (symbol '0'))
560 (symbol '0'))
561 hg: parse error: can't use a subscript in this context
561 hg: parse error: can't use a subscript in this context
562 [255]
562 [255]
563
563
564 $ hg debugrevspec -p analyzed 'tip#rel[0]'
564 $ hg debugrevspec -p analyzed 'tip#rel[0]'
565 * analyzed:
565 * analyzed:
566 (relsubscript
566 (relsubscript
567 (symbol 'tip')
567 (symbol 'tip')
568 (symbol 'rel')
568 (symbol 'rel')
569 (symbol '0'))
569 (symbol '0'))
570 hg: parse error: unknown identifier: rel
570 hg: parse error: unknown identifier: rel
571 [255]
571 [255]
572
572
573 $ hg debugrevspec -p analyzed '(tip#rel)[0]'
573 $ hg debugrevspec -p analyzed '(tip#rel)[0]'
574 * analyzed:
574 * analyzed:
575 (subscript
575 (subscript
576 (relation
576 (relation
577 (symbol 'tip')
577 (symbol 'tip')
578 (symbol 'rel'))
578 (symbol 'rel'))
579 (symbol '0'))
579 (symbol '0'))
580 hg: parse error: can't use a subscript in this context
580 hg: parse error: can't use a subscript in this context
581 [255]
581 [255]
582
582
583 $ hg debugrevspec -p analyzed 'tip#rel[0][1]'
583 $ hg debugrevspec -p analyzed 'tip#rel[0][1]'
584 * analyzed:
584 * analyzed:
585 (subscript
585 (subscript
586 (relsubscript
586 (relsubscript
587 (symbol 'tip')
587 (symbol 'tip')
588 (symbol 'rel')
588 (symbol 'rel')
589 (symbol '0'))
589 (symbol '0'))
590 (symbol '1'))
590 (symbol '1'))
591 hg: parse error: can't use a subscript in this context
591 hg: parse error: can't use a subscript in this context
592 [255]
592 [255]
593
593
594 $ hg debugrevspec -p analyzed 'tip#rel0#rel1[1]'
594 $ hg debugrevspec -p analyzed 'tip#rel0#rel1[1]'
595 * analyzed:
595 * analyzed:
596 (relsubscript
596 (relsubscript
597 (relation
597 (relation
598 (symbol 'tip')
598 (symbol 'tip')
599 (symbol 'rel0'))
599 (symbol 'rel0'))
600 (symbol 'rel1')
600 (symbol 'rel1')
601 (symbol '1'))
601 (symbol '1'))
602 hg: parse error: unknown identifier: rel1
602 hg: parse error: unknown identifier: rel1
603 [255]
603 [255]
604
604
605 $ hg debugrevspec -p analyzed 'tip#rel0[0]#rel1[1]'
605 $ hg debugrevspec -p analyzed 'tip#rel0[0]#rel1[1]'
606 * analyzed:
606 * analyzed:
607 (relsubscript
607 (relsubscript
608 (relsubscript
608 (relsubscript
609 (symbol 'tip')
609 (symbol 'tip')
610 (symbol 'rel0')
610 (symbol 'rel0')
611 (symbol '0'))
611 (symbol '0'))
612 (symbol 'rel1')
612 (symbol 'rel1')
613 (symbol '1'))
613 (symbol '1'))
614 hg: parse error: unknown identifier: rel1
614 hg: parse error: unknown identifier: rel1
615 [255]
615 [255]
616
616
617 parse errors of relation, subscript and relation-subscript operators:
617 parse errors of relation, subscript and relation-subscript operators:
618
618
619 $ hg debugrevspec '[0]'
619 $ hg debugrevspec '[0]'
620 hg: parse error at 0: not a prefix: [
620 hg: parse error at 0: not a prefix: [
621 ([0]
621 ([0]
622 ^ here)
622 ^ here)
623 [255]
623 [255]
624 $ hg debugrevspec '.#'
624 $ hg debugrevspec '.#'
625 hg: parse error at 2: not a prefix: end
625 hg: parse error at 2: not a prefix: end
626 (.#
626 (.#
627 ^ here)
627 ^ here)
628 [255]
628 [255]
629 $ hg debugrevspec '#rel'
629 $ hg debugrevspec '#rel'
630 hg: parse error at 0: not a prefix: #
630 hg: parse error at 0: not a prefix: #
631 (#rel
631 (#rel
632 ^ here)
632 ^ here)
633 [255]
633 [255]
634 $ hg debugrevspec '.#rel[0'
634 $ hg debugrevspec '.#rel[0'
635 hg: parse error at 7: unexpected token: end
635 hg: parse error at 7: unexpected token: end
636 (.#rel[0
636 (.#rel[0
637 ^ here)
637 ^ here)
638 [255]
638 [255]
639 $ hg debugrevspec '.]'
639 $ hg debugrevspec '.]'
640 hg: parse error at 1: invalid token
640 hg: parse error at 1: invalid token
641 (.]
641 (.]
642 ^ here)
642 ^ here)
643 [255]
643 [255]
644
644
645 $ hg debugrevspec '.#generations[a]'
645 $ hg debugrevspec '.#generations[a]'
646 hg: parse error: relation subscript must be an integer
646 hg: parse error: relation subscript must be an integer
647 [255]
647 [255]
648 $ hg debugrevspec '.#generations[1-2]'
648 $ hg debugrevspec '.#generations[1-2]'
649 hg: parse error: relation subscript must be an integer
649 hg: parse error: relation subscript must be an integer
650 [255]
650 [255]
651
651
652 parsed tree at stages:
652 parsed tree at stages:
653
653
654 $ hg debugrevspec -p all '()'
654 $ hg debugrevspec -p all '()'
655 * parsed:
655 * parsed:
656 (group
656 (group
657 None)
657 None)
658 * expanded:
658 * expanded:
659 (group
659 (group
660 None)
660 None)
661 * concatenated:
661 * concatenated:
662 (group
662 (group
663 None)
663 None)
664 * analyzed:
664 * analyzed:
665 None
665 None
666 * optimized:
666 * optimized:
667 None
667 None
668 hg: parse error: missing argument
668 hg: parse error: missing argument
669 [255]
669 [255]
670
670
671 $ hg debugrevspec --no-optimized -p all '()'
671 $ hg debugrevspec --no-optimized -p all '()'
672 * parsed:
672 * parsed:
673 (group
673 (group
674 None)
674 None)
675 * expanded:
675 * expanded:
676 (group
676 (group
677 None)
677 None)
678 * concatenated:
678 * concatenated:
679 (group
679 (group
680 None)
680 None)
681 * analyzed:
681 * analyzed:
682 None
682 None
683 hg: parse error: missing argument
683 hg: parse error: missing argument
684 [255]
684 [255]
685
685
686 $ hg debugrevspec -p parsed -p analyzed -p optimized '(0|1)-1'
686 $ hg debugrevspec -p parsed -p analyzed -p optimized '(0|1)-1'
687 * parsed:
687 * parsed:
688 (minus
688 (minus
689 (group
689 (group
690 (or
690 (or
691 (list
691 (list
692 (symbol '0')
692 (symbol '0')
693 (symbol '1'))))
693 (symbol '1'))))
694 (symbol '1'))
694 (symbol '1'))
695 * analyzed:
695 * analyzed:
696 (and
696 (and
697 (or
697 (or
698 (list
698 (list
699 (symbol '0')
699 (symbol '0')
700 (symbol '1')))
700 (symbol '1')))
701 (not
701 (not
702 (symbol '1')))
702 (symbol '1')))
703 * optimized:
703 * optimized:
704 (difference
704 (difference
705 (func
705 (func
706 (symbol '_list')
706 (symbol '_list')
707 (string '0\x001'))
707 (string '0\x001'))
708 (symbol '1'))
708 (symbol '1'))
709 0
709 0
710
710
711 $ hg debugrevspec -p unknown '0'
711 $ hg debugrevspec -p unknown '0'
712 abort: invalid stage name: unknown
712 abort: invalid stage name: unknown
713 [255]
713 [255]
714
714
715 $ hg debugrevspec -p all --optimize '0'
715 $ hg debugrevspec -p all --optimize '0'
716 abort: cannot use --optimize with --show-stage
716 abort: cannot use --optimize with --show-stage
717 [255]
717 [255]
718
718
719 verify optimized tree:
719 verify optimized tree:
720
720
721 $ hg debugrevspec --verify '0|1'
721 $ hg debugrevspec --verify '0|1'
722
722
723 $ hg debugrevspec --verify -v -p analyzed -p optimized 'r3232() & 2'
723 $ hg debugrevspec --verify -v -p analyzed -p optimized 'r3232() & 2'
724 * analyzed:
724 * analyzed:
725 (and
725 (and
726 (func
726 (func
727 (symbol 'r3232')
727 (symbol 'r3232')
728 None)
728 None)
729 (symbol '2'))
729 (symbol '2'))
730 * optimized:
730 * optimized:
731 (andsmally
731 (andsmally
732 (func
732 (func
733 (symbol 'r3232')
733 (symbol 'r3232')
734 None)
734 None)
735 (symbol '2'))
735 (symbol '2'))
736 * analyzed set:
736 * analyzed set:
737 <baseset [2]>
737 <baseset [2]>
738 * optimized set:
738 * optimized set:
739 <baseset [2, 2]>
739 <baseset [2, 2]>
740 --- analyzed
740 --- analyzed
741 +++ optimized
741 +++ optimized
742 2
742 2
743 +2
743 +2
744 [1]
744 [1]
745
745
746 $ hg debugrevspec --no-optimized --verify-optimized '0'
746 $ hg debugrevspec --no-optimized --verify-optimized '0'
747 abort: cannot use --verify-optimized with --no-optimized
747 abort: cannot use --verify-optimized with --no-optimized
748 [255]
748 [255]
749
749
750 Test that symbols only get parsed as functions if there's an opening
750 Test that symbols only get parsed as functions if there's an opening
751 parenthesis.
751 parenthesis.
752
752
753 $ hg book only -r 9
753 $ hg book only -r 9
754 $ log 'only(only)' # Outer "only" is a function, inner "only" is the bookmark
754 $ log 'only(only)' # Outer "only" is a function, inner "only" is the bookmark
755 8
755 8
756 9
756 9
757
757
758 ':y' behaves like '0:y', but can't be rewritten as such since the revision '0'
758 ':y' behaves like '0:y', but can't be rewritten as such since the revision '0'
759 may be hidden (issue5385)
759 may be hidden (issue5385)
760
760
761 $ try -p parsed -p analyzed ':'
761 $ try -p parsed -p analyzed ':'
762 * parsed:
762 * parsed:
763 (rangeall
763 (rangeall
764 None)
764 None)
765 * analyzed:
765 * analyzed:
766 (rangeall
766 (rangeall
767 None)
767 None)
768 * set:
768 * set:
769 <spanset+ 0:10>
769 <spanset+ 0:10>
770 0
770 0
771 1
771 1
772 2
772 2
773 3
773 3
774 4
774 4
775 5
775 5
776 6
776 6
777 7
777 7
778 8
778 8
779 9
779 9
780 $ try -p analyzed ':1'
780 $ try -p analyzed ':1'
781 * analyzed:
781 * analyzed:
782 (rangepre
782 (rangepre
783 (symbol '1'))
783 (symbol '1'))
784 * set:
784 * set:
785 <spanset+ 0:2>
785 <spanset+ 0:2>
786 0
786 0
787 1
787 1
788 $ try -p analyzed ':(1|2)'
788 $ try -p analyzed ':(1|2)'
789 * analyzed:
789 * analyzed:
790 (rangepre
790 (rangepre
791 (or
791 (or
792 (list
792 (list
793 (symbol '1')
793 (symbol '1')
794 (symbol '2'))))
794 (symbol '2'))))
795 * set:
795 * set:
796 <spanset+ 0:3>
796 <spanset+ 0:3>
797 0
797 0
798 1
798 1
799 2
799 2
800 $ try -p analyzed ':(1&2)'
800 $ try -p analyzed ':(1&2)'
801 * analyzed:
801 * analyzed:
802 (rangepre
802 (rangepre
803 (and
803 (and
804 (symbol '1')
804 (symbol '1')
805 (symbol '2')))
805 (symbol '2')))
806 * set:
806 * set:
807 <baseset []>
807 <baseset []>
808
808
809 infix/suffix resolution of ^ operator (issue2884, issue5764):
809 infix/suffix resolution of ^ operator (issue2884, issue5764):
810
810
811 x^:y means (x^):y
811 x^:y means (x^):y
812
812
813 $ try '1^:2'
813 $ try '1^:2'
814 (range
814 (range
815 (parentpost
815 (parentpost
816 (symbol '1'))
816 (symbol '1'))
817 (symbol '2'))
817 (symbol '2'))
818 * set:
818 * set:
819 <spanset+ 0:3>
819 <spanset+ 0:3>
820 0
820 0
821 1
821 1
822 2
822 2
823
823
824 $ try '1^::2'
824 $ try '1^::2'
825 (dagrange
825 (dagrange
826 (parentpost
826 (parentpost
827 (symbol '1'))
827 (symbol '1'))
828 (symbol '2'))
828 (symbol '2'))
829 * set:
829 * set:
830 <baseset+ [0, 1, 2]>
830 <baseset+ [0, 1, 2]>
831 0
831 0
832 1
832 1
833 2
833 2
834
834
835 $ try '1^..2'
835 $ try '1^..2'
836 (dagrange
836 (dagrange
837 (parentpost
837 (parentpost
838 (symbol '1'))
838 (symbol '1'))
839 (symbol '2'))
839 (symbol '2'))
840 * set:
840 * set:
841 <baseset+ [0, 1, 2]>
841 <baseset+ [0, 1, 2]>
842 0
842 0
843 1
843 1
844 2
844 2
845
845
846 $ try '9^:'
846 $ try '9^:'
847 (rangepost
847 (rangepost
848 (parentpost
848 (parentpost
849 (symbol '9')))
849 (symbol '9')))
850 * set:
850 * set:
851 <spanset+ 8:10>
851 <spanset+ 8:10>
852 8
852 8
853 9
853 9
854
854
855 $ try '9^::'
855 $ try '9^::'
856 (dagrangepost
856 (dagrangepost
857 (parentpost
857 (parentpost
858 (symbol '9')))
858 (symbol '9')))
859 * set:
859 * set:
860 <generatorsetasc+>
860 <generatorsetasc+>
861 8
861 8
862 9
862 9
863
863
864 $ try '9^..'
864 $ try '9^..'
865 (dagrangepost
865 (dagrangepost
866 (parentpost
866 (parentpost
867 (symbol '9')))
867 (symbol '9')))
868 * set:
868 * set:
869 <generatorsetasc+>
869 <generatorsetasc+>
870 8
870 8
871 9
871 9
872
872
873 x^:y should be resolved before omitting group operators
873 x^:y should be resolved before omitting group operators
874
874
875 $ try '1^(:2)'
875 $ try '1^(:2)'
876 (parent
876 (parent
877 (symbol '1')
877 (symbol '1')
878 (group
878 (group
879 (rangepre
879 (rangepre
880 (symbol '2'))))
880 (symbol '2'))))
881 hg: parse error: ^ expects a number 0, 1, or 2
881 hg: parse error: ^ expects a number 0, 1, or 2
882 [255]
882 [255]
883
883
884 x^:y should be resolved recursively
884 x^:y should be resolved recursively
885
885
886 $ try 'sort(1^:2)'
886 $ try 'sort(1^:2)'
887 (func
887 (func
888 (symbol 'sort')
888 (symbol 'sort')
889 (range
889 (range
890 (parentpost
890 (parentpost
891 (symbol '1'))
891 (symbol '1'))
892 (symbol '2')))
892 (symbol '2')))
893 * set:
893 * set:
894 <spanset+ 0:3>
894 <spanset+ 0:3>
895 0
895 0
896 1
896 1
897 2
897 2
898
898
899 $ try '(3^:4)^:2'
899 $ try '(3^:4)^:2'
900 (range
900 (range
901 (parentpost
901 (parentpost
902 (group
902 (group
903 (range
903 (range
904 (parentpost
904 (parentpost
905 (symbol '3'))
905 (symbol '3'))
906 (symbol '4'))))
906 (symbol '4'))))
907 (symbol '2'))
907 (symbol '2'))
908 * set:
908 * set:
909 <spanset+ 0:3>
909 <spanset+ 0:3>
910 0
910 0
911 1
911 1
912 2
912 2
913
913
914 $ try '(3^::4)^::2'
914 $ try '(3^::4)^::2'
915 (dagrange
915 (dagrange
916 (parentpost
916 (parentpost
917 (group
917 (group
918 (dagrange
918 (dagrange
919 (parentpost
919 (parentpost
920 (symbol '3'))
920 (symbol '3'))
921 (symbol '4'))))
921 (symbol '4'))))
922 (symbol '2'))
922 (symbol '2'))
923 * set:
923 * set:
924 <baseset+ [0, 1, 2]>
924 <baseset+ [0, 1, 2]>
925 0
925 0
926 1
926 1
927 2
927 2
928
928
929 $ try '(9^:)^:'
929 $ try '(9^:)^:'
930 (rangepost
930 (rangepost
931 (parentpost
931 (parentpost
932 (group
932 (group
933 (rangepost
933 (rangepost
934 (parentpost
934 (parentpost
935 (symbol '9'))))))
935 (symbol '9'))))))
936 * set:
936 * set:
937 <spanset+ 4:10>
937 <spanset+ 4:10>
938 4
938 4
939 5
939 5
940 6
940 6
941 7
941 7
942 8
942 8
943 9
943 9
944
944
945 x^ in alias should also be resolved
945 x^ in alias should also be resolved
946
946
947 $ try 'A' --config 'revsetalias.A=1^:2'
947 $ try 'A' --config 'revsetalias.A=1^:2'
948 (symbol 'A')
948 (symbol 'A')
949 * expanded:
949 * expanded:
950 (range
950 (range
951 (parentpost
951 (parentpost
952 (symbol '1'))
952 (symbol '1'))
953 (symbol '2'))
953 (symbol '2'))
954 * set:
954 * set:
955 <spanset+ 0:3>
955 <spanset+ 0:3>
956 0
956 0
957 1
957 1
958 2
958 2
959
959
960 $ try 'A:2' --config 'revsetalias.A=1^'
960 $ try 'A:2' --config 'revsetalias.A=1^'
961 (range
961 (range
962 (symbol 'A')
962 (symbol 'A')
963 (symbol '2'))
963 (symbol '2'))
964 * expanded:
964 * expanded:
965 (range
965 (range
966 (parentpost
966 (parentpost
967 (symbol '1'))
967 (symbol '1'))
968 (symbol '2'))
968 (symbol '2'))
969 * set:
969 * set:
970 <spanset+ 0:3>
970 <spanset+ 0:3>
971 0
971 0
972 1
972 1
973 2
973 2
974
974
975 but not beyond the boundary of alias expansion, because the resolution should
975 but not beyond the boundary of alias expansion, because the resolution should
976 be made at the parsing stage
976 be made at the parsing stage
977
977
978 $ try '1^A' --config 'revsetalias.A=:2'
978 $ try '1^A' --config 'revsetalias.A=:2'
979 (parent
979 (parent
980 (symbol '1')
980 (symbol '1')
981 (symbol 'A'))
981 (symbol 'A'))
982 * expanded:
982 * expanded:
983 (parent
983 (parent
984 (symbol '1')
984 (symbol '1')
985 (rangepre
985 (rangepre
986 (symbol '2')))
986 (symbol '2')))
987 hg: parse error: ^ expects a number 0, 1, or 2
987 hg: parse error: ^ expects a number 0, 1, or 2
988 [255]
988 [255]
989
989
990 '::' itself isn't a valid expression
990 '::' itself isn't a valid expression
991
991
992 $ try '::'
992 $ try '::'
993 (dagrangeall
993 (dagrangeall
994 None)
994 None)
995 hg: parse error: can't use '::' in this context
995 hg: parse error: can't use '::' in this context
996 [255]
996 [255]
997
997
998 ancestor can accept 0 or more arguments
998 ancestor can accept 0 or more arguments
999
999
1000 $ log 'ancestor()'
1000 $ log 'ancestor()'
1001 $ log 'ancestor(1)'
1001 $ log 'ancestor(1)'
1002 1
1002 1
1003 $ log 'ancestor(4,5)'
1003 $ log 'ancestor(4,5)'
1004 1
1004 1
1005 $ log 'ancestor(4,5) and 4'
1005 $ log 'ancestor(4,5) and 4'
1006 $ log 'ancestor(0,0,1,3)'
1006 $ log 'ancestor(0,0,1,3)'
1007 0
1007 0
1008 $ log 'ancestor(3,1,5,3,5,1)'
1008 $ log 'ancestor(3,1,5,3,5,1)'
1009 1
1009 1
1010 $ log 'ancestor(0,1,3,5)'
1010 $ log 'ancestor(0,1,3,5)'
1011 0
1011 0
1012 $ log 'ancestor(1,2,3,4,5)'
1012 $ log 'ancestor(1,2,3,4,5)'
1013 1
1013 1
1014
1014
1015 test ancestors
1015 test ancestors
1016
1016
1017 $ hg log -G -T '{rev}\n' --config experimental.graphshorten=True
1017 $ hg log -G -T '{rev}\n' --config experimental.graphshorten=True
1018 @ 9
1018 @ 9
1019 o 8
1019 o 8
1020 | o 7
1020 | o 7
1021 | o 6
1021 | o 6
1022 |/|
1022 |/|
1023 | o 5
1023 | o 5
1024 o | 4
1024 o | 4
1025 | o 3
1025 | o 3
1026 o | 2
1026 o | 2
1027 |/
1027 |/
1028 o 1
1028 o 1
1029 o 0
1029 o 0
1030
1030
1031 $ log 'ancestors(5)'
1031 $ log 'ancestors(5)'
1032 0
1032 0
1033 1
1033 1
1034 3
1034 3
1035 5
1035 5
1036 $ log 'ancestor(ancestors(5))'
1036 $ log 'ancestor(ancestors(5))'
1037 0
1037 0
1038 $ log '::r3232()'
1038 $ log '::r3232()'
1039 0
1039 0
1040 1
1040 1
1041 2
1041 2
1042 3
1042 3
1043
1043
1044 test ancestors with depth limit
1044 test ancestors with depth limit
1045
1045
1046 (depth=0 selects the node itself)
1046 (depth=0 selects the node itself)
1047
1047
1048 $ log 'reverse(ancestors(9, depth=0))'
1048 $ log 'reverse(ancestors(9, depth=0))'
1049 9
1049 9
1050
1050
1051 (interleaved: '4' would be missing if heap queue were higher depth first)
1051 (interleaved: '4' would be missing if heap queue were higher depth first)
1052
1052
1053 $ log 'reverse(ancestors(8:9, depth=1))'
1053 $ log 'reverse(ancestors(8:9, depth=1))'
1054 9
1054 9
1055 8
1055 8
1056 4
1056 4
1057
1057
1058 (interleaved: '2' would be missing if heap queue were higher depth first)
1058 (interleaved: '2' would be missing if heap queue were higher depth first)
1059
1059
1060 $ log 'reverse(ancestors(7+8, depth=2))'
1060 $ log 'reverse(ancestors(7+8, depth=2))'
1061 8
1061 8
1062 7
1062 7
1063 6
1063 6
1064 5
1064 5
1065 4
1065 4
1066 2
1066 2
1067
1067
1068 (walk example above by separate queries)
1068 (walk example above by separate queries)
1069
1069
1070 $ log 'reverse(ancestors(8, depth=2)) + reverse(ancestors(7, depth=2))'
1070 $ log 'reverse(ancestors(8, depth=2)) + reverse(ancestors(7, depth=2))'
1071 8
1071 8
1072 4
1072 4
1073 2
1073 2
1074 7
1074 7
1075 6
1075 6
1076 5
1076 5
1077
1077
1078 (walk 2nd and 3rd ancestors)
1078 (walk 2nd and 3rd ancestors)
1079
1079
1080 $ log 'reverse(ancestors(7, depth=3, startdepth=2))'
1080 $ log 'reverse(ancestors(7, depth=3, startdepth=2))'
1081 5
1081 5
1082 4
1082 4
1083 3
1083 3
1084 2
1084 2
1085
1085
1086 (interleaved: '4' would be missing if higher-depth ancestors weren't scanned)
1086 (interleaved: '4' would be missing if higher-depth ancestors weren't scanned)
1087
1087
1088 $ log 'reverse(ancestors(7+8, depth=2, startdepth=2))'
1088 $ log 'reverse(ancestors(7+8, depth=2, startdepth=2))'
1089 5
1089 5
1090 4
1090 4
1091 2
1091 2
1092
1092
1093 (note that 'ancestors(x, depth=y, startdepth=z)' does not identical to
1093 (note that 'ancestors(x, depth=y, startdepth=z)' does not identical to
1094 'ancestors(x, depth=y) - ancestors(x, depth=z-1)' because a node may have
1094 'ancestors(x, depth=y) - ancestors(x, depth=z-1)' because a node may have
1095 multiple depths)
1095 multiple depths)
1096
1096
1097 $ log 'reverse(ancestors(7+8, depth=2) - ancestors(7+8, depth=1))'
1097 $ log 'reverse(ancestors(7+8, depth=2) - ancestors(7+8, depth=1))'
1098 5
1098 5
1099 2
1099 2
1100
1100
1101 test bad arguments passed to ancestors()
1101 test bad arguments passed to ancestors()
1102
1102
1103 $ log 'ancestors(., depth=-1)'
1103 $ log 'ancestors(., depth=-1)'
1104 hg: parse error: negative depth
1104 hg: parse error: negative depth
1105 [255]
1105 [255]
1106 $ log 'ancestors(., depth=foo)'
1106 $ log 'ancestors(., depth=foo)'
1107 hg: parse error: ancestors expects an integer depth
1107 hg: parse error: ancestors expects an integer depth
1108 [255]
1108 [255]
1109
1109
1110 test descendants
1110 test descendants
1111
1111
1112 $ hg log -G -T '{rev}\n' --config experimental.graphshorten=True
1112 $ hg log -G -T '{rev}\n' --config experimental.graphshorten=True
1113 @ 9
1113 @ 9
1114 o 8
1114 o 8
1115 | o 7
1115 | o 7
1116 | o 6
1116 | o 6
1117 |/|
1117 |/|
1118 | o 5
1118 | o 5
1119 o | 4
1119 o | 4
1120 | o 3
1120 | o 3
1121 o | 2
1121 o | 2
1122 |/
1122 |/
1123 o 1
1123 o 1
1124 o 0
1124 o 0
1125
1125
1126 (null is ultimate root and has optimized path)
1126 (null is ultimate root and has optimized path)
1127
1127
1128 $ log 'null:4 & descendants(null)'
1128 $ log 'null:4 & descendants(null)'
1129 -1
1129 -1
1130 0
1130 0
1131 1
1131 1
1132 2
1132 2
1133 3
1133 3
1134 4
1134 4
1135
1135
1136 (including merge)
1136 (including merge)
1137
1137
1138 $ log ':8 & descendants(2)'
1138 $ log ':8 & descendants(2)'
1139 2
1139 2
1140 4
1140 4
1141 6
1141 6
1142 7
1142 7
1143 8
1143 8
1144
1144
1145 (multiple roots)
1145 (multiple roots)
1146
1146
1147 $ log ':8 & descendants(2+5)'
1147 $ log ':8 & descendants(2+5)'
1148 2
1148 2
1149 4
1149 4
1150 5
1150 5
1151 6
1151 6
1152 7
1152 7
1153 8
1153 8
1154
1154
1155 test descendants with depth limit
1155 test descendants with depth limit
1156
1156
1157 (depth=0 selects the node itself)
1157 (depth=0 selects the node itself)
1158
1158
1159 $ log 'descendants(0, depth=0)'
1159 $ log 'descendants(0, depth=0)'
1160 0
1160 0
1161 $ log 'null: & descendants(null, depth=0)'
1161 $ log 'null: & descendants(null, depth=0)'
1162 -1
1162 -1
1163
1163
1164 (p2 = null should be ignored)
1164 (p2 = null should be ignored)
1165
1165
1166 $ log 'null: & descendants(null, depth=2)'
1166 $ log 'null: & descendants(null, depth=2)'
1167 -1
1167 -1
1168 0
1168 0
1169 1
1169 1
1170
1170
1171 (multiple paths: depth(6) = (2, 3))
1171 (multiple paths: depth(6) = (2, 3))
1172
1172
1173 $ log 'descendants(1+3, depth=2)'
1173 $ log 'descendants(1+3, depth=2)'
1174 1
1174 1
1175 2
1175 2
1176 3
1176 3
1177 4
1177 4
1178 5
1178 5
1179 6
1179 6
1180
1180
1181 (multiple paths: depth(5) = (1, 2), depth(6) = (2, 3))
1181 (multiple paths: depth(5) = (1, 2), depth(6) = (2, 3))
1182
1182
1183 $ log 'descendants(3+1, depth=2, startdepth=2)'
1183 $ log 'descendants(3+1, depth=2, startdepth=2)'
1184 4
1184 4
1185 5
1185 5
1186 6
1186 6
1187
1187
1188 (multiple depths: depth(6) = (0, 2, 4), search for depth=2)
1188 (multiple depths: depth(6) = (0, 2, 4), search for depth=2)
1189
1189
1190 $ log 'descendants(0+3+6, depth=3, startdepth=1)'
1190 $ log 'descendants(0+3+6, depth=3, startdepth=1)'
1191 1
1191 1
1192 2
1192 2
1193 3
1193 3
1194 4
1194 4
1195 5
1195 5
1196 6
1196 6
1197 7
1197 7
1198
1198
1199 (multiple depths: depth(6) = (0, 4), no match)
1199 (multiple depths: depth(6) = (0, 4), no match)
1200
1200
1201 $ log 'descendants(0+6, depth=3, startdepth=1)'
1201 $ log 'descendants(0+6, depth=3, startdepth=1)'
1202 1
1202 1
1203 2
1203 2
1204 3
1204 3
1205 4
1205 4
1206 5
1206 5
1207 7
1207 7
1208
1208
1209 test ancestors/descendants relation subscript:
1209 test ancestors/descendants relation subscript:
1210
1210
1211 $ log 'tip#generations[0]'
1211 $ log 'tip#generations[0]'
1212 9
1212 9
1213 $ log '.#generations[-1]'
1213 $ log '.#generations[-1]'
1214 8
1214 8
1215 $ log '.#g[(-1)]'
1215 $ log '.#g[(-1)]'
1216 8
1216 8
1217
1217
1218 $ hg debugrevspec -p parsed 'roots(:)#g[2]'
1218 $ hg debugrevspec -p parsed 'roots(:)#g[2]'
1219 * parsed:
1219 * parsed:
1220 (relsubscript
1220 (relsubscript
1221 (func
1221 (func
1222 (symbol 'roots')
1222 (symbol 'roots')
1223 (rangeall
1223 (rangeall
1224 None))
1224 None))
1225 (symbol 'g')
1225 (symbol 'g')
1226 (symbol '2'))
1226 (symbol '2'))
1227 2
1227 2
1228 3
1228 3
1229
1229
1230 test author
1230 test author
1231
1231
1232 $ log 'author(bob)'
1232 $ log 'author(bob)'
1233 2
1233 2
1234 $ log 'author("re:bob|test")'
1234 $ log 'author("re:bob|test")'
1235 0
1235 0
1236 1
1236 1
1237 2
1237 2
1238 3
1238 3
1239 4
1239 4
1240 5
1240 5
1241 6
1241 6
1242 7
1242 7
1243 8
1243 8
1244 9
1244 9
1245 $ log 'author(r"re:\S")'
1245 $ log 'author(r"re:\S")'
1246 0
1246 0
1247 1
1247 1
1248 2
1248 2
1249 3
1249 3
1250 4
1250 4
1251 5
1251 5
1252 6
1252 6
1253 7
1253 7
1254 8
1254 8
1255 9
1255 9
1256 $ log 'branch(Γ©)'
1256 $ log 'branch(Γ©)'
1257 8
1257 8
1258 9
1258 9
1259 $ log 'branch(a)'
1259 $ log 'branch(a)'
1260 0
1260 0
1261 $ hg log -r 'branch("re:a")' --template '{rev} {branch}\n'
1261 $ hg log -r 'branch("re:a")' --template '{rev} {branch}\n'
1262 0 a
1262 0 a
1263 2 a-b-c-
1263 2 a-b-c-
1264 3 +a+b+c+
1264 3 +a+b+c+
1265 4 -a-b-c-
1265 4 -a-b-c-
1266 5 !a/b/c/
1266 5 !a/b/c/
1267 6 _a_b_c_
1267 6 _a_b_c_
1268 7 .a.b.c.
1268 7 .a.b.c.
1269 $ log 'children(ancestor(4,5))'
1269 $ log 'children(ancestor(4,5))'
1270 2
1270 2
1271 3
1271 3
1272
1272
1273 $ log 'children(4)'
1273 $ log 'children(4)'
1274 6
1274 6
1275 8
1275 8
1276 $ log 'children(null)'
1276 $ log 'children(null)'
1277 0
1277 0
1278
1278
1279 $ log 'closed()'
1279 $ log 'closed()'
1280 $ log 'contains(a)'
1280 $ log 'contains(a)'
1281 0
1281 0
1282 1
1282 1
1283 3
1283 3
1284 5
1284 5
1285 $ log 'contains("../repo/a")'
1285 $ log 'contains("../repo/a")'
1286 0
1286 0
1287 1
1287 1
1288 3
1288 3
1289 5
1289 5
1290 $ log 'desc(B)'
1290 $ log 'desc(B)'
1291 5
1291 5
1292 $ hg log -r 'desc(r"re:S?u")' --template "{rev} {desc|firstline}\n"
1292 $ hg log -r 'desc(r"re:S?u")' --template "{rev} {desc|firstline}\n"
1293 5 5 bug
1293 5 5 bug
1294 6 6 issue619
1294 6 6 issue619
1295 $ log 'descendants(2 or 3)'
1295 $ log 'descendants(2 or 3)'
1296 2
1296 2
1297 3
1297 3
1298 4
1298 4
1299 5
1299 5
1300 6
1300 6
1301 7
1301 7
1302 8
1302 8
1303 9
1303 9
1304 $ log 'file("b*")'
1304 $ log 'file("b*")'
1305 1
1305 1
1306 4
1306 4
1307 $ log 'filelog("b")'
1307 $ log 'filelog("b")'
1308 1
1308 1
1309 4
1309 4
1310 $ log 'filelog("../repo/b")'
1310 $ log 'filelog("../repo/b")'
1311 1
1311 1
1312 4
1312 4
1313 $ log 'follow()'
1313 $ log 'follow()'
1314 0
1314 0
1315 1
1315 1
1316 2
1316 2
1317 4
1317 4
1318 8
1318 8
1319 9
1319 9
1320 $ log 'grep("issue\d+")'
1320 $ log 'grep("issue\d+")'
1321 6
1321 6
1322 $ try 'grep("(")' # invalid regular expression
1322 $ try 'grep("(")' # invalid regular expression
1323 (func
1323 (func
1324 (symbol 'grep')
1324 (symbol 'grep')
1325 (string '('))
1325 (string '('))
1326 hg: parse error: invalid match pattern: (unbalanced parenthesis|missing \),.*) (re)
1326 hg: parse error: invalid match pattern: (unbalanced parenthesis|missing \),.*) (re)
1327 [255]
1327 [255]
1328 $ try 'grep("\bissue\d+")'
1328 $ try 'grep("\bissue\d+")'
1329 (func
1329 (func
1330 (symbol 'grep')
1330 (symbol 'grep')
1331 (string '\x08issue\\d+'))
1331 (string '\x08issue\\d+'))
1332 * set:
1332 * set:
1333 <filteredset
1333 <filteredset
1334 <fullreposet+ 0:10>,
1334 <fullreposet+ 0:10>,
1335 <grep '\x08issue\\d+'>>
1335 <grep '\x08issue\\d+'>>
1336 $ try 'grep(r"\bissue\d+")'
1336 $ try 'grep(r"\bissue\d+")'
1337 (func
1337 (func
1338 (symbol 'grep')
1338 (symbol 'grep')
1339 (string '\\bissue\\d+'))
1339 (string '\\bissue\\d+'))
1340 * set:
1340 * set:
1341 <filteredset
1341 <filteredset
1342 <fullreposet+ 0:10>,
1342 <fullreposet+ 0:10>,
1343 <grep '\\bissue\\d+'>>
1343 <grep '\\bissue\\d+'>>
1344 6
1344 6
1345 $ try 'grep(r"\")'
1345 $ try 'grep(r"\")'
1346 hg: parse error at 7: unterminated string
1346 hg: parse error at 7: unterminated string
1347 (grep(r"\")
1347 (grep(r"\")
1348 ^ here)
1348 ^ here)
1349 [255]
1349 [255]
1350 $ log 'head()'
1350 $ log 'head()'
1351 0
1351 0
1352 1
1352 1
1353 2
1353 2
1354 3
1354 3
1355 4
1355 4
1356 5
1356 5
1357 6
1357 6
1358 7
1358 7
1359 9
1359 9
1360 $ log 'heads(6::)'
1360 $ log 'heads(6::)'
1361 7
1361 7
1362 $ log 'keyword(issue)'
1362 $ log 'keyword(issue)'
1363 6
1363 6
1364 $ log 'keyword("test a")'
1364 $ log 'keyword("test a")'
1365
1365
1366 Test first (=limit) and last
1366 Test first (=limit) and last
1367
1367
1368 $ log 'limit(head(), 1)'
1368 $ log 'limit(head(), 1)'
1369 0
1369 0
1370 $ log 'limit(author("re:bob|test"), 3, 5)'
1370 $ log 'limit(author("re:bob|test"), 3, 5)'
1371 5
1371 5
1372 6
1372 6
1373 7
1373 7
1374 $ log 'limit(author("re:bob|test"), offset=6)'
1374 $ log 'limit(author("re:bob|test"), offset=6)'
1375 6
1375 6
1376 $ log 'limit(author("re:bob|test"), offset=10)'
1376 $ log 'limit(author("re:bob|test"), offset=10)'
1377 $ log 'limit(all(), 1, -1)'
1377 $ log 'limit(all(), 1, -1)'
1378 hg: parse error: negative offset
1378 hg: parse error: negative offset
1379 [255]
1379 [255]
1380 $ log 'limit(all(), -1)'
1380 $ log 'limit(all(), -1)'
1381 hg: parse error: negative number to select
1381 hg: parse error: negative number to select
1382 [255]
1382 [255]
1383 $ log 'limit(all(), 0)'
1383 $ log 'limit(all(), 0)'
1384
1384
1385 $ log 'last(all(), -1)'
1385 $ log 'last(all(), -1)'
1386 hg: parse error: negative number to select
1386 hg: parse error: negative number to select
1387 [255]
1387 [255]
1388 $ log 'last(all(), 0)'
1388 $ log 'last(all(), 0)'
1389 $ log 'last(all(), 1)'
1389 $ log 'last(all(), 1)'
1390 9
1390 9
1391 $ log 'last(all(), 2)'
1391 $ log 'last(all(), 2)'
1392 8
1392 8
1393 9
1393 9
1394
1394
1395 Test smartset.slice() by first/last()
1395 Test smartset.slice() by first/last()
1396
1396
1397 (using unoptimized set, filteredset as example)
1397 (using unoptimized set, filteredset as example)
1398
1398
1399 $ hg debugrevspec --no-show-revs -s '0:7 & branch("re:")'
1399 $ hg debugrevspec --no-show-revs -s '0:7 & branch("re:")'
1400 * set:
1400 * set:
1401 <filteredset
1401 <filteredset
1402 <spanset+ 0:8>,
1402 <spanset+ 0:8>,
1403 <branch 're:'>>
1403 <branch 're:'>>
1404 $ log 'limit(0:7 & branch("re:"), 3, 4)'
1404 $ log 'limit(0:7 & branch("re:"), 3, 4)'
1405 4
1405 4
1406 5
1406 5
1407 6
1407 6
1408 $ log 'limit(7:0 & branch("re:"), 3, 4)'
1408 $ log 'limit(7:0 & branch("re:"), 3, 4)'
1409 3
1409 3
1410 2
1410 2
1411 1
1411 1
1412 $ log 'last(0:7 & branch("re:"), 2)'
1412 $ log 'last(0:7 & branch("re:"), 2)'
1413 6
1413 6
1414 7
1414 7
1415
1415
1416 (using baseset)
1416 (using baseset)
1417
1417
1418 $ hg debugrevspec --no-show-revs -s 0+1+2+3+4+5+6+7
1418 $ hg debugrevspec --no-show-revs -s 0+1+2+3+4+5+6+7
1419 * set:
1419 * set:
1420 <baseset [0, 1, 2, 3, 4, 5, 6, 7]>
1420 <baseset [0, 1, 2, 3, 4, 5, 6, 7]>
1421 $ hg debugrevspec --no-show-revs -s 0::7
1421 $ hg debugrevspec --no-show-revs -s 0::7
1422 * set:
1422 * set:
1423 <baseset+ [0, 1, 2, 3, 4, 5, 6, 7]>
1423 <baseset+ [0, 1, 2, 3, 4, 5, 6, 7]>
1424 $ log 'limit(0+1+2+3+4+5+6+7, 3, 4)'
1424 $ log 'limit(0+1+2+3+4+5+6+7, 3, 4)'
1425 4
1425 4
1426 5
1426 5
1427 6
1427 6
1428 $ log 'limit(sort(0::7, rev), 3, 4)'
1428 $ log 'limit(sort(0::7, rev), 3, 4)'
1429 4
1429 4
1430 5
1430 5
1431 6
1431 6
1432 $ log 'limit(sort(0::7, -rev), 3, 4)'
1432 $ log 'limit(sort(0::7, -rev), 3, 4)'
1433 3
1433 3
1434 2
1434 2
1435 1
1435 1
1436 $ log 'last(sort(0::7, rev), 2)'
1436 $ log 'last(sort(0::7, rev), 2)'
1437 6
1437 6
1438 7
1438 7
1439 $ hg debugrevspec -s 'limit(sort(0::7, rev), 3, 6)'
1439 $ hg debugrevspec -s 'limit(sort(0::7, rev), 3, 6)'
1440 * set:
1440 * set:
1441 <baseset+ [6, 7]>
1441 <baseset+ [6, 7]>
1442 6
1442 6
1443 7
1443 7
1444 $ hg debugrevspec -s 'limit(sort(0::7, rev), 3, 9)'
1444 $ hg debugrevspec -s 'limit(sort(0::7, rev), 3, 9)'
1445 * set:
1445 * set:
1446 <baseset+ []>
1446 <baseset+ []>
1447 $ hg debugrevspec -s 'limit(sort(0::7, -rev), 3, 6)'
1447 $ hg debugrevspec -s 'limit(sort(0::7, -rev), 3, 6)'
1448 * set:
1448 * set:
1449 <baseset- [0, 1]>
1449 <baseset- [0, 1]>
1450 1
1450 1
1451 0
1451 0
1452 $ hg debugrevspec -s 'limit(sort(0::7, -rev), 3, 9)'
1452 $ hg debugrevspec -s 'limit(sort(0::7, -rev), 3, 9)'
1453 * set:
1453 * set:
1454 <baseset- []>
1454 <baseset- []>
1455 $ hg debugrevspec -s 'limit(0::7, 0)'
1455 $ hg debugrevspec -s 'limit(0::7, 0)'
1456 * set:
1456 * set:
1457 <baseset+ []>
1457 <baseset+ []>
1458
1458
1459 (using spanset)
1459 (using spanset)
1460
1460
1461 $ hg debugrevspec --no-show-revs -s 0:7
1461 $ hg debugrevspec --no-show-revs -s 0:7
1462 * set:
1462 * set:
1463 <spanset+ 0:8>
1463 <spanset+ 0:8>
1464 $ log 'limit(0:7, 3, 4)'
1464 $ log 'limit(0:7, 3, 4)'
1465 4
1465 4
1466 5
1466 5
1467 6
1467 6
1468 $ log 'limit(7:0, 3, 4)'
1468 $ log 'limit(7:0, 3, 4)'
1469 3
1469 3
1470 2
1470 2
1471 1
1471 1
1472 $ log 'limit(0:7, 3, 6)'
1472 $ log 'limit(0:7, 3, 6)'
1473 6
1473 6
1474 7
1474 7
1475 $ log 'limit(7:0, 3, 6)'
1475 $ log 'limit(7:0, 3, 6)'
1476 1
1476 1
1477 0
1477 0
1478 $ log 'last(0:7, 2)'
1478 $ log 'last(0:7, 2)'
1479 6
1479 6
1480 7
1480 7
1481 $ hg debugrevspec -s 'limit(0:7, 3, 6)'
1481 $ hg debugrevspec -s 'limit(0:7, 3, 6)'
1482 * set:
1482 * set:
1483 <spanset+ 6:8>
1483 <spanset+ 6:8>
1484 6
1484 6
1485 7
1485 7
1486 $ hg debugrevspec -s 'limit(0:7, 3, 9)'
1486 $ hg debugrevspec -s 'limit(0:7, 3, 9)'
1487 * set:
1487 * set:
1488 <spanset+ 8:8>
1488 <spanset+ 8:8>
1489 $ hg debugrevspec -s 'limit(7:0, 3, 6)'
1489 $ hg debugrevspec -s 'limit(7:0, 3, 6)'
1490 * set:
1490 * set:
1491 <spanset- 0:2>
1491 <spanset- 0:2>
1492 1
1492 1
1493 0
1493 0
1494 $ hg debugrevspec -s 'limit(7:0, 3, 9)'
1494 $ hg debugrevspec -s 'limit(7:0, 3, 9)'
1495 * set:
1495 * set:
1496 <spanset- 0:0>
1496 <spanset- 0:0>
1497 $ hg debugrevspec -s 'limit(0:7, 0)'
1497 $ hg debugrevspec -s 'limit(0:7, 0)'
1498 * set:
1498 * set:
1499 <spanset+ 0:0>
1499 <spanset+ 0:0>
1500
1500
1501 Test order of first/last revisions
1501 Test order of first/last revisions
1502
1502
1503 $ hg debugrevspec -s 'first(4:0, 3) & 3:'
1503 $ hg debugrevspec -s 'first(4:0, 3) & 3:'
1504 * set:
1504 * set:
1505 <filteredset
1505 <filteredset
1506 <spanset- 2:5>,
1506 <spanset- 2:5>,
1507 <spanset+ 3:10>>
1507 <spanset+ 3:10>>
1508 4
1508 4
1509 3
1509 3
1510
1510
1511 $ hg debugrevspec -s '3: & first(4:0, 3)'
1511 $ hg debugrevspec -s '3: & first(4:0, 3)'
1512 * set:
1512 * set:
1513 <filteredset
1513 <filteredset
1514 <spanset+ 3:10>,
1514 <spanset+ 3:10>,
1515 <spanset- 2:5>>
1515 <spanset- 2:5>>
1516 3
1516 3
1517 4
1517 4
1518
1518
1519 $ hg debugrevspec -s 'last(4:0, 3) & :1'
1519 $ hg debugrevspec -s 'last(4:0, 3) & :1'
1520 * set:
1520 * set:
1521 <filteredset
1521 <filteredset
1522 <spanset- 0:3>,
1522 <spanset- 0:3>,
1523 <spanset+ 0:2>>
1523 <spanset+ 0:2>>
1524 1
1524 1
1525 0
1525 0
1526
1526
1527 $ hg debugrevspec -s ':1 & last(4:0, 3)'
1527 $ hg debugrevspec -s ':1 & last(4:0, 3)'
1528 * set:
1528 * set:
1529 <filteredset
1529 <filteredset
1530 <spanset+ 0:2>,
1530 <spanset+ 0:2>,
1531 <spanset+ 0:3>>
1531 <spanset+ 0:3>>
1532 0
1532 0
1533 1
1533 1
1534
1534
1535 Test scmutil.revsingle() should return the last revision
1535 Test scmutil.revsingle() should return the last revision
1536
1536
1537 $ hg debugrevspec -s 'last(0::)'
1537 $ hg debugrevspec -s 'last(0::)'
1538 * set:
1538 * set:
1539 <baseset slice=0:1
1539 <baseset slice=0:1
1540 <generatorsetasc->>
1540 <generatorsetasc->>
1541 9
1541 9
1542 $ hg identify -r '0::' --num
1542 $ hg identify -r '0::' --num
1543 9
1543 9
1544
1544
1545 Test matching
1545 Test matching
1546
1546
1547 $ log 'matching(6)'
1547 $ log 'matching(6)'
1548 6
1548 6
1549 $ log 'matching(6:7, "phase parents user date branch summary files description substate")'
1549 $ log 'matching(6:7, "phase parents user date branch summary files description substate")'
1550 6
1550 6
1551 7
1551 7
1552
1552
1553 Testing min and max
1553 Testing min and max
1554
1554
1555 max: simple
1555 max: simple
1556
1556
1557 $ log 'max(contains(a))'
1557 $ log 'max(contains(a))'
1558 5
1558 5
1559
1559
1560 max: simple on unordered set)
1560 max: simple on unordered set)
1561
1561
1562 $ log 'max((4+0+2+5+7) and contains(a))'
1562 $ log 'max((4+0+2+5+7) and contains(a))'
1563 5
1563 5
1564
1564
1565 max: no result
1565 max: no result
1566
1566
1567 $ log 'max(contains(stringthatdoesnotappearanywhere))'
1567 $ log 'max(contains(stringthatdoesnotappearanywhere))'
1568
1568
1569 max: no result on unordered set
1569 max: no result on unordered set
1570
1570
1571 $ log 'max((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
1571 $ log 'max((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
1572
1572
1573 min: simple
1573 min: simple
1574
1574
1575 $ log 'min(contains(a))'
1575 $ log 'min(contains(a))'
1576 0
1576 0
1577
1577
1578 min: simple on unordered set
1578 min: simple on unordered set
1579
1579
1580 $ log 'min((4+0+2+5+7) and contains(a))'
1580 $ log 'min((4+0+2+5+7) and contains(a))'
1581 0
1581 0
1582
1582
1583 min: empty
1583 min: empty
1584
1584
1585 $ log 'min(contains(stringthatdoesnotappearanywhere))'
1585 $ log 'min(contains(stringthatdoesnotappearanywhere))'
1586
1586
1587 min: empty on unordered set
1587 min: empty on unordered set
1588
1588
1589 $ log 'min((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
1589 $ log 'min((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
1590
1590
1591
1591
1592 $ log 'merge()'
1592 $ log 'merge()'
1593 6
1593 6
1594 $ log 'branchpoint()'
1594 $ log 'branchpoint()'
1595 1
1595 1
1596 4
1596 4
1597 $ log 'modifies(b)'
1597 $ log 'modifies(b)'
1598 4
1598 4
1599 $ log 'modifies("path:b")'
1599 $ log 'modifies("path:b")'
1600 4
1600 4
1601 $ log 'modifies("*")'
1601 $ log 'modifies("*")'
1602 4
1602 4
1603 6
1603 6
1604 $ log 'modifies("set:modified()")'
1604 $ log 'modifies("set:modified()")'
1605 4
1605 4
1606 $ log 'id(5)'
1606 $ log 'id(5)'
1607 2
1607 2
1608 $ log 'only(9)'
1608 $ log 'only(9)'
1609 8
1609 8
1610 9
1610 9
1611 $ log 'only(8)'
1611 $ log 'only(8)'
1612 8
1612 8
1613 $ log 'only(9, 5)'
1613 $ log 'only(9, 5)'
1614 2
1614 2
1615 4
1615 4
1616 8
1616 8
1617 9
1617 9
1618 $ log 'only(7 + 9, 5 + 2)'
1618 $ log 'only(7 + 9, 5 + 2)'
1619 4
1619 4
1620 6
1620 6
1621 7
1621 7
1622 8
1622 8
1623 9
1623 9
1624
1624
1625 Test empty set input
1625 Test empty set input
1626 $ log 'only(p2())'
1626 $ log 'only(p2())'
1627 $ log 'only(p1(), p2())'
1627 $ log 'only(p1(), p2())'
1628 0
1628 0
1629 1
1629 1
1630 2
1630 2
1631 4
1631 4
1632 8
1632 8
1633 9
1633 9
1634
1634
1635 Test '%' operator
1635 Test '%' operator
1636
1636
1637 $ log '9%'
1637 $ log '9%'
1638 8
1638 8
1639 9
1639 9
1640 $ log '9%5'
1640 $ log '9%5'
1641 2
1641 2
1642 4
1642 4
1643 8
1643 8
1644 9
1644 9
1645 $ log '(7 + 9)%(5 + 2)'
1645 $ log '(7 + 9)%(5 + 2)'
1646 4
1646 4
1647 6
1647 6
1648 7
1648 7
1649 8
1649 8
1650 9
1650 9
1651
1651
1652 Test operand of '%' is optimized recursively (issue4670)
1652 Test operand of '%' is optimized recursively (issue4670)
1653
1653
1654 $ try --optimize '8:9-8%'
1654 $ try --optimize '8:9-8%'
1655 (onlypost
1655 (onlypost
1656 (minus
1656 (minus
1657 (range
1657 (range
1658 (symbol '8')
1658 (symbol '8')
1659 (symbol '9'))
1659 (symbol '9'))
1660 (symbol '8')))
1660 (symbol '8')))
1661 * optimized:
1661 * optimized:
1662 (func
1662 (func
1663 (symbol 'only')
1663 (symbol 'only')
1664 (difference
1664 (difference
1665 (range
1665 (range
1666 (symbol '8')
1666 (symbol '8')
1667 (symbol '9'))
1667 (symbol '9'))
1668 (symbol '8')))
1668 (symbol '8')))
1669 * set:
1669 * set:
1670 <baseset+ [8, 9]>
1670 <baseset+ [8, 9]>
1671 8
1671 8
1672 9
1672 9
1673 $ try --optimize '(9)%(5)'
1673 $ try --optimize '(9)%(5)'
1674 (only
1674 (only
1675 (group
1675 (group
1676 (symbol '9'))
1676 (symbol '9'))
1677 (group
1677 (group
1678 (symbol '5')))
1678 (symbol '5')))
1679 * optimized:
1679 * optimized:
1680 (func
1680 (func
1681 (symbol 'only')
1681 (symbol 'only')
1682 (list
1682 (list
1683 (symbol '9')
1683 (symbol '9')
1684 (symbol '5')))
1684 (symbol '5')))
1685 * set:
1685 * set:
1686 <baseset+ [2, 4, 8, 9]>
1686 <baseset+ [2, 4, 8, 9]>
1687 2
1687 2
1688 4
1688 4
1689 8
1689 8
1690 9
1690 9
1691
1691
1692 Test the order of operations
1692 Test the order of operations
1693
1693
1694 $ log '7 + 9%5 + 2'
1694 $ log '7 + 9%5 + 2'
1695 7
1695 7
1696 2
1696 2
1697 4
1697 4
1698 8
1698 8
1699 9
1699 9
1700
1700
1701 Test explicit numeric revision
1701 Test explicit numeric revision
1702 $ log 'rev(-2)'
1702 $ log 'rev(-2)'
1703 $ log 'rev(-1)'
1703 $ log 'rev(-1)'
1704 -1
1704 -1
1705 $ log 'rev(0)'
1705 $ log 'rev(0)'
1706 0
1706 0
1707 $ log 'rev(9)'
1707 $ log 'rev(9)'
1708 9
1708 9
1709 $ log 'rev(10)'
1709 $ log 'rev(10)'
1710 $ log 'rev(tip)'
1710 $ log 'rev(tip)'
1711 hg: parse error: rev expects a number
1711 hg: parse error: rev expects a number
1712 [255]
1712 [255]
1713
1713
1714 Test hexadecimal revision
1714 Test hexadecimal revision
1715 $ log 'id(2)'
1715 $ log 'id(2)'
1716 abort: 00changelog.i@2: ambiguous identifier!
1716 abort: 00changelog.i@2: ambiguous identifier!
1717 [255]
1717 [255]
1718 $ log 'id(23268)'
1718 $ log 'id(23268)'
1719 4
1719 4
1720 $ log 'id(2785f51eece)'
1720 $ log 'id(2785f51eece)'
1721 0
1721 0
1722 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532c)'
1722 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532c)'
1723 8
1723 8
1724 $ log 'id(d5d0dcbdc4a)'
1724 $ log 'id(d5d0dcbdc4a)'
1725 $ log 'id(d5d0dcbdc4w)'
1725 $ log 'id(d5d0dcbdc4w)'
1726 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532d)'
1726 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532d)'
1727 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532q)'
1727 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532q)'
1728 $ log 'id(1.0)'
1728 $ log 'id(1.0)'
1729 $ log 'id(xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)'
1729 $ log 'id(xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)'
1730
1730
1731 Test null revision
1731 Test null revision
1732 $ log '(null)'
1732 $ log '(null)'
1733 -1
1733 -1
1734 $ log '(null:0)'
1734 $ log '(null:0)'
1735 -1
1735 -1
1736 0
1736 0
1737 $ log '(0:null)'
1737 $ log '(0:null)'
1738 0
1738 0
1739 -1
1739 -1
1740 $ log 'null::0'
1740 $ log 'null::0'
1741 -1
1741 -1
1742 0
1742 0
1743 $ log 'null:tip - 0:'
1743 $ log 'null:tip - 0:'
1744 -1
1744 -1
1745 $ log 'null: and null::' | head -1
1745 $ log 'null: and null::' | head -1
1746 -1
1746 -1
1747 $ log 'null: or 0:' | head -2
1747 $ log 'null: or 0:' | head -2
1748 -1
1748 -1
1749 0
1749 0
1750 $ log 'ancestors(null)'
1750 $ log 'ancestors(null)'
1751 -1
1751 -1
1752 $ log 'reverse(null:)' | tail -2
1752 $ log 'reverse(null:)' | tail -2
1753 0
1753 0
1754 -1
1754 -1
1755 $ log 'first(null:)'
1755 $ log 'first(null:)'
1756 -1
1756 -1
1757 $ log 'min(null:)'
1757 $ log 'min(null:)'
1758 BROKEN: should be '-1'
1758 BROKEN: should be '-1'
1759 $ log 'tip:null and all()' | tail -2
1759 $ log 'tip:null and all()' | tail -2
1760 1
1760 1
1761 0
1761 0
1762
1762
1763 Test working-directory revision
1763 Test working-directory revision
1764 $ hg debugrevspec 'wdir()'
1764 $ hg debugrevspec 'wdir()'
1765 2147483647
1765 2147483647
1766 $ hg debugrevspec 'wdir()^'
1766 $ hg debugrevspec 'wdir()^'
1767 9
1767 9
1768 $ hg up 7
1768 $ hg up 7
1769 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1769 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1770 $ hg debugrevspec 'wdir()^'
1770 $ hg debugrevspec 'wdir()^'
1771 7
1771 7
1772 $ hg debugrevspec 'wdir()^0'
1772 $ hg debugrevspec 'wdir()^0'
1773 2147483647
1773 2147483647
1774 $ hg debugrevspec 'wdir()~3'
1774 $ hg debugrevspec 'wdir()~3'
1775 5
1775 5
1776 $ hg debugrevspec 'ancestors(wdir())'
1776 $ hg debugrevspec 'ancestors(wdir())'
1777 0
1777 0
1778 1
1778 1
1779 2
1779 2
1780 3
1780 3
1781 4
1781 4
1782 5
1782 5
1783 6
1783 6
1784 7
1784 7
1785 2147483647
1785 2147483647
1786 $ hg debugrevspec 'wdir()~0'
1786 $ hg debugrevspec 'wdir()~0'
1787 2147483647
1787 2147483647
1788 $ hg debugrevspec 'p1(wdir())'
1788 $ hg debugrevspec 'p1(wdir())'
1789 7
1789 7
1790 $ hg debugrevspec 'p2(wdir())'
1790 $ hg debugrevspec 'p2(wdir())'
1791 $ hg debugrevspec 'parents(wdir())'
1791 $ hg debugrevspec 'parents(wdir())'
1792 7
1792 7
1793 $ hg debugrevspec 'wdir()^1'
1793 $ hg debugrevspec 'wdir()^1'
1794 7
1794 7
1795 $ hg debugrevspec 'wdir()^2'
1795 $ hg debugrevspec 'wdir()^2'
1796 $ hg debugrevspec 'wdir()^3'
1796 $ hg debugrevspec 'wdir()^3'
1797 hg: parse error: ^ expects a number 0, 1, or 2
1797 hg: parse error: ^ expects a number 0, 1, or 2
1798 [255]
1798 [255]
1799 For tests consistency
1799 For tests consistency
1800 $ hg up 9
1800 $ hg up 9
1801 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1801 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1802 $ hg debugrevspec 'tip or wdir()'
1802 $ hg debugrevspec 'tip or wdir()'
1803 9
1803 9
1804 2147483647
1804 2147483647
1805 $ hg debugrevspec '0:tip and wdir()'
1805 $ hg debugrevspec '0:tip and wdir()'
1806 $ log '0:wdir()' | tail -3
1806 $ log '0:wdir()' | tail -3
1807 8
1807 8
1808 9
1808 9
1809 2147483647
1809 2147483647
1810 $ log 'wdir():0' | head -3
1810 $ log 'wdir():0' | head -3
1811 2147483647
1811 2147483647
1812 9
1812 9
1813 8
1813 8
1814 $ log 'wdir():wdir()'
1814 $ log 'wdir():wdir()'
1815 2147483647
1815 2147483647
1816 $ log '(all() + wdir()) & min(. + wdir())'
1816 $ log '(all() + wdir()) & min(. + wdir())'
1817 9
1817 9
1818 $ log '(all() + wdir()) & max(. + wdir())'
1818 $ log '(all() + wdir()) & max(. + wdir())'
1819 2147483647
1819 2147483647
1820 $ log 'first(wdir() + .)'
1820 $ log 'first(wdir() + .)'
1821 2147483647
1821 2147483647
1822 $ log 'last(. + wdir())'
1822 $ log 'last(. + wdir())'
1823 2147483647
1823 2147483647
1824
1824
1825 Test working-directory integer revision and node id
1825 Test working-directory integer revision and node id
1826 (BUG: '0:wdir()' is still needed to populate wdir revision)
1826 (BUG: '0:wdir()' is still needed to populate wdir revision)
1827
1827
1828 $ hg debugrevspec '0:wdir() & 2147483647'
1828 $ hg debugrevspec '0:wdir() & 2147483647'
1829 2147483647
1829 2147483647
1830 $ hg debugrevspec '0:wdir() & rev(2147483647)'
1830 $ hg debugrevspec '0:wdir() & rev(2147483647)'
1831 2147483647
1831 2147483647
1832 $ hg debugrevspec '0:wdir() & ffffffffffffffffffffffffffffffffffffffff'
1832 $ hg debugrevspec '0:wdir() & ffffffffffffffffffffffffffffffffffffffff'
1833 2147483647
1833 2147483647
1834 $ hg debugrevspec '0:wdir() & ffffffffffff'
1834 $ hg debugrevspec '0:wdir() & ffffffffffff'
1835 2147483647
1835 2147483647
1836 $ hg debugrevspec '0:wdir() & id(ffffffffffffffffffffffffffffffffffffffff)'
1836 $ hg debugrevspec '0:wdir() & id(ffffffffffffffffffffffffffffffffffffffff)'
1837 2147483647
1837 2147483647
1838 $ hg debugrevspec '0:wdir() & id(ffffffffffff)'
1838 $ hg debugrevspec '0:wdir() & id(ffffffffffff)'
1839 2147483647
1839 2147483647
1840
1840
1841 $ cd ..
1841 $ cd ..
1842
1842
1843 Test short 'ff...' hash collision
1843 Test short 'ff...' hash collision
1844 (BUG: '0:wdir()' is still needed to populate wdir revision)
1844 (BUG: '0:wdir()' is still needed to populate wdir revision)
1845
1845
1846 $ hg init wdir-hashcollision
1846 $ hg init wdir-hashcollision
1847 $ cd wdir-hashcollision
1847 $ cd wdir-hashcollision
1848 $ cat <<EOF >> .hg/hgrc
1848 $ cat <<EOF >> .hg/hgrc
1849 > [experimental]
1849 > [experimental]
1850 > evolution.createmarkers=True
1850 > evolution.createmarkers=True
1851 > EOF
1851 > EOF
1852 $ echo 0 > a
1852 $ echo 0 > a
1853 $ hg ci -qAm 0
1853 $ hg ci -qAm 0
1854 $ for i in 2463 2961 6726 78127; do
1854 $ for i in 2463 2961 6726 78127; do
1855 > hg up -q 0
1855 > hg up -q 0
1856 > echo $i > a
1856 > echo $i > a
1857 > hg ci -qm $i
1857 > hg ci -qm $i
1858 > done
1858 > done
1859 $ hg up -q null
1859 $ hg up -q null
1860 $ hg log -r '0:wdir()' -T '{rev}:{node} {shortest(node, 3)}\n'
1860 $ hg log -r '0:wdir()' -T '{rev}:{node} {shortest(node, 3)}\n'
1861 0:b4e73ffab476aa0ee32ed81ca51e07169844bc6a b4e
1861 0:b4e73ffab476aa0ee32ed81ca51e07169844bc6a b4e
1862 1:fffbae3886c8fbb2114296380d276fd37715d571 fffba
1862 1:fffbae3886c8fbb2114296380d276fd37715d571 fffba
1863 2:fffb6093b00943f91034b9bdad069402c834e572 fffb6
1863 2:fffb6093b00943f91034b9bdad069402c834e572 fffb6
1864 3:fff48a9b9de34a4d64120c29548214c67980ade3 fff4
1864 3:fff48a9b9de34a4d64120c29548214c67980ade3 fff4
1865 4:ffff85cff0ff78504fcdc3c0bc10de0c65379249 ffff8
1865 4:ffff85cff0ff78504fcdc3c0bc10de0c65379249 ffff8
1866 2147483647:ffffffffffffffffffffffffffffffffffffffff fffff
1866 2147483647:ffffffffffffffffffffffffffffffffffffffff fffff
1867 $ hg debugobsolete fffbae3886c8fbb2114296380d276fd37715d571
1867 $ hg debugobsolete fffbae3886c8fbb2114296380d276fd37715d571
1868 obsoleted 1 changesets
1868 obsoleted 1 changesets
1869
1869
1870 $ hg debugrevspec '0:wdir() & fff'
1870 $ hg debugrevspec '0:wdir() & fff'
1871 abort: 00changelog.i@fff: ambiguous identifier!
1871 abort: 00changelog.i@fff: ambiguous identifier!
1872 [255]
1872 [255]
1873 $ hg debugrevspec '0:wdir() & ffff'
1873 $ hg debugrevspec '0:wdir() & ffff'
1874 abort: 00changelog.i@ffff: ambiguous identifier!
1874 abort: 00changelog.i@ffff: ambiguous identifier!
1875 [255]
1875 [255]
1876 $ hg debugrevspec '0:wdir() & fffb'
1876 $ hg debugrevspec '0:wdir() & fffb'
1877 abort: 00changelog.i@fffb: ambiguous identifier!
1877 abort: 00changelog.i@fffb: ambiguous identifier!
1878 [255]
1878 [255]
1879 BROKEN should be '2' (node lookup uses unfiltered repo since dc25ed84bee8)
1879 BROKEN should be '2' (node lookup uses unfiltered repo since dc25ed84bee8)
1880 $ hg debugrevspec '0:wdir() & id(fffb)'
1880 $ hg debugrevspec '0:wdir() & id(fffb)'
1881 2
1881 2
1882 $ hg debugrevspec '0:wdir() & ffff8'
1882 $ hg debugrevspec '0:wdir() & ffff8'
1883 4
1883 4
1884 $ hg debugrevspec '0:wdir() & fffff'
1884 $ hg debugrevspec '0:wdir() & fffff'
1885 2147483647
1885 2147483647
1886
1886
1887 $ cd ..
1887 $ cd ..
1888
1888
1889 Test branch() with wdir()
1889 Test branch() with wdir()
1890
1890
1891 $ cd repo
1891 $ cd repo
1892
1892
1893 $ log '0:wdir() & branch("literal:Γ©")'
1893 $ log '0:wdir() & branch("literal:Γ©")'
1894 8
1894 8
1895 9
1895 9
1896 2147483647
1896 2147483647
1897 $ log '0:wdir() & branch("re:Γ©")'
1897 $ log '0:wdir() & branch("re:Γ©")'
1898 8
1898 8
1899 9
1899 9
1900 2147483647
1900 2147483647
1901 $ log '0:wdir() & branch("re:^a")'
1901 $ log '0:wdir() & branch("re:^a")'
1902 0
1902 0
1903 2
1903 2
1904 $ log '0:wdir() & branch(8)'
1904 $ log '0:wdir() & branch(8)'
1905 8
1905 8
1906 9
1906 9
1907 2147483647
1907 2147483647
1908
1908
1909 branch(wdir()) returns all revisions belonging to the working branch. The wdir
1909 branch(wdir()) returns all revisions belonging to the working branch. The wdir
1910 itself isn't returned unless it is explicitly populated.
1910 itself isn't returned unless it is explicitly populated.
1911
1911
1912 $ log 'branch(wdir())'
1912 $ log 'branch(wdir())'
1913 8
1913 8
1914 9
1914 9
1915 $ log '0:wdir() & branch(wdir())'
1915 $ log '0:wdir() & branch(wdir())'
1916 8
1916 8
1917 9
1917 9
1918 2147483647
1918 2147483647
1919
1919
1920 $ log 'outgoing()'
1920 $ log 'outgoing()'
1921 8
1921 8
1922 9
1922 9
1923 $ log 'outgoing("../remote1")'
1923 $ log 'outgoing("../remote1")'
1924 8
1924 8
1925 9
1925 9
1926 $ log 'outgoing("../remote2")'
1926 $ log 'outgoing("../remote2")'
1927 3
1927 3
1928 5
1928 5
1929 6
1929 6
1930 7
1930 7
1931 9
1931 9
1932 $ log 'p1(merge())'
1932 $ log 'p1(merge())'
1933 5
1933 5
1934 $ log 'p2(merge())'
1934 $ log 'p2(merge())'
1935 4
1935 4
1936 $ log 'parents(merge())'
1936 $ log 'parents(merge())'
1937 4
1937 4
1938 5
1938 5
1939 $ log 'p1(branchpoint())'
1939 $ log 'p1(branchpoint())'
1940 0
1940 0
1941 2
1941 2
1942 $ log 'p2(branchpoint())'
1942 $ log 'p2(branchpoint())'
1943 $ log 'parents(branchpoint())'
1943 $ log 'parents(branchpoint())'
1944 0
1944 0
1945 2
1945 2
1946 $ log 'removes(a)'
1946 $ log 'removes(a)'
1947 2
1947 2
1948 6
1948 6
1949 $ log 'roots(all())'
1949 $ log 'roots(all())'
1950 0
1950 0
1951 $ log 'reverse(2 or 3 or 4 or 5)'
1951 $ log 'reverse(2 or 3 or 4 or 5)'
1952 5
1952 5
1953 4
1953 4
1954 3
1954 3
1955 2
1955 2
1956 $ log 'reverse(all())'
1956 $ log 'reverse(all())'
1957 9
1957 9
1958 8
1958 8
1959 7
1959 7
1960 6
1960 6
1961 5
1961 5
1962 4
1962 4
1963 3
1963 3
1964 2
1964 2
1965 1
1965 1
1966 0
1966 0
1967 $ log 'reverse(all()) & filelog(b)'
1967 $ log 'reverse(all()) & filelog(b)'
1968 4
1968 4
1969 1
1969 1
1970 $ log 'rev(5)'
1970 $ log 'rev(5)'
1971 5
1971 5
1972 $ log 'sort(limit(reverse(all()), 3))'
1972 $ log 'sort(limit(reverse(all()), 3))'
1973 7
1973 7
1974 8
1974 8
1975 9
1975 9
1976 $ log 'sort(2 or 3 or 4 or 5, date)'
1976 $ log 'sort(2 or 3 or 4 or 5, date)'
1977 2
1977 2
1978 3
1978 3
1979 5
1979 5
1980 4
1980 4
1981 $ log 'tagged()'
1981 $ log 'tagged()'
1982 6
1982 6
1983 $ log 'tag()'
1983 $ log 'tag()'
1984 6
1984 6
1985 $ log 'tag(1.0)'
1985 $ log 'tag(1.0)'
1986 6
1986 6
1987 $ log 'tag(tip)'
1987 $ log 'tag(tip)'
1988 9
1988 9
1989
1989
1990 Test order of revisions in compound expression
1990 Test order of revisions in compound expression
1991 ----------------------------------------------
1991 ----------------------------------------------
1992
1992
1993 The general rule is that only the outermost (= leftmost) predicate can
1993 The general rule is that only the outermost (= leftmost) predicate can
1994 enforce its ordering requirement. The other predicates should take the
1994 enforce its ordering requirement. The other predicates should take the
1995 ordering defined by it.
1995 ordering defined by it.
1996
1996
1997 'A & B' should follow the order of 'A':
1997 'A & B' should follow the order of 'A':
1998
1998
1999 $ log '2:0 & 0::2'
1999 $ log '2:0 & 0::2'
2000 2
2000 2
2001 1
2001 1
2002 0
2002 0
2003
2003
2004 'head()' combines sets in right order:
2004 'head()' combines sets in right order:
2005
2005
2006 $ log '2:0 & head()'
2006 $ log '2:0 & head()'
2007 2
2007 2
2008 1
2008 1
2009 0
2009 0
2010
2010
2011 'x:y' takes ordering parameter into account:
2011 'x:y' takes ordering parameter into account:
2012
2012
2013 $ try -p optimized '3:0 & 0:3 & not 2:1'
2013 $ try -p optimized '3:0 & 0:3 & not 2:1'
2014 * optimized:
2014 * optimized:
2015 (difference
2015 (difference
2016 (and
2016 (and
2017 (range
2017 (range
2018 (symbol '3')
2018 (symbol '3')
2019 (symbol '0'))
2019 (symbol '0'))
2020 (range
2020 (range
2021 (symbol '0')
2021 (symbol '0')
2022 (symbol '3')))
2022 (symbol '3')))
2023 (range
2023 (range
2024 (symbol '2')
2024 (symbol '2')
2025 (symbol '1')))
2025 (symbol '1')))
2026 * set:
2026 * set:
2027 <filteredset
2027 <filteredset
2028 <filteredset
2028 <filteredset
2029 <spanset- 0:4>,
2029 <spanset- 0:4>,
2030 <spanset+ 0:4>>,
2030 <spanset+ 0:4>>,
2031 <not
2031 <not
2032 <spanset+ 1:3>>>
2032 <spanset+ 1:3>>>
2033 3
2033 3
2034 0
2034 0
2035
2035
2036 'a + b', which is optimized to '_list(a b)', should take the ordering of
2036 'a + b', which is optimized to '_list(a b)', should take the ordering of
2037 the left expression:
2037 the left expression:
2038
2038
2039 $ try --optimize '2:0 & (0 + 1 + 2)'
2039 $ try --optimize '2:0 & (0 + 1 + 2)'
2040 (and
2040 (and
2041 (range
2041 (range
2042 (symbol '2')
2042 (symbol '2')
2043 (symbol '0'))
2043 (symbol '0'))
2044 (group
2044 (group
2045 (or
2045 (or
2046 (list
2046 (list
2047 (symbol '0')
2047 (symbol '0')
2048 (symbol '1')
2048 (symbol '1')
2049 (symbol '2')))))
2049 (symbol '2')))))
2050 * optimized:
2050 * optimized:
2051 (and
2051 (and
2052 (range
2052 (range
2053 (symbol '2')
2053 (symbol '2')
2054 (symbol '0'))
2054 (symbol '0'))
2055 (func
2055 (func
2056 (symbol '_list')
2056 (symbol '_list')
2057 (string '0\x001\x002')))
2057 (string '0\x001\x002')))
2058 * set:
2058 * set:
2059 <filteredset
2059 <filteredset
2060 <spanset- 0:3>,
2060 <spanset- 0:3>,
2061 <baseset [0, 1, 2]>>
2061 <baseset [0, 1, 2]>>
2062 2
2062 2
2063 1
2063 1
2064 0
2064 0
2065
2065
2066 'A + B' should take the ordering of the left expression:
2066 'A + B' should take the ordering of the left expression:
2067
2067
2068 $ try --optimize '2:0 & (0:1 + 2)'
2068 $ try --optimize '2:0 & (0:1 + 2)'
2069 (and
2069 (and
2070 (range
2070 (range
2071 (symbol '2')
2071 (symbol '2')
2072 (symbol '0'))
2072 (symbol '0'))
2073 (group
2073 (group
2074 (or
2074 (or
2075 (list
2075 (list
2076 (range
2076 (range
2077 (symbol '0')
2077 (symbol '0')
2078 (symbol '1'))
2078 (symbol '1'))
2079 (symbol '2')))))
2079 (symbol '2')))))
2080 * optimized:
2080 * optimized:
2081 (and
2081 (and
2082 (range
2082 (range
2083 (symbol '2')
2083 (symbol '2')
2084 (symbol '0'))
2084 (symbol '0'))
2085 (or
2085 (or
2086 (list
2086 (list
2087 (range
2087 (range
2088 (symbol '0')
2088 (symbol '0')
2089 (symbol '1'))
2089 (symbol '1'))
2090 (symbol '2'))))
2090 (symbol '2'))))
2091 * set:
2091 * set:
2092 <filteredset
2092 <filteredset
2093 <spanset- 0:3>,
2093 <spanset- 0:3>,
2094 <addset
2094 <addset
2095 <spanset+ 0:2>,
2095 <spanset+ 0:2>,
2096 <baseset [2]>>>
2096 <baseset [2]>>>
2097 2
2097 2
2098 1
2098 1
2099 0
2099 0
2100
2100
2101 '_intlist(a b)' should behave like 'a + b':
2101 '_intlist(a b)' should behave like 'a + b':
2102
2102
2103 $ trylist --optimize '2:0 & %ld' 0 1 2
2103 $ trylist --optimize '2:0 & %ld' 0 1 2
2104 (and
2104 (and
2105 (range
2105 (range
2106 (symbol '2')
2106 (symbol '2')
2107 (symbol '0'))
2107 (symbol '0'))
2108 (func
2108 (func
2109 (symbol '_intlist')
2109 (symbol '_intlist')
2110 (string '0\x001\x002')))
2110 (string '0\x001\x002')))
2111 * optimized:
2111 * optimized:
2112 (andsmally
2112 (andsmally
2113 (range
2113 (range
2114 (symbol '2')
2114 (symbol '2')
2115 (symbol '0'))
2115 (symbol '0'))
2116 (func
2116 (func
2117 (symbol '_intlist')
2117 (symbol '_intlist')
2118 (string '0\x001\x002')))
2118 (string '0\x001\x002')))
2119 * set:
2119 * set:
2120 <filteredset
2120 <filteredset
2121 <spanset- 0:3>,
2121 <spanset- 0:3>,
2122 <baseset+ [0, 1, 2]>>
2122 <baseset+ [0, 1, 2]>>
2123 2
2123 2
2124 1
2124 1
2125 0
2125 0
2126
2126
2127 $ trylist --optimize '%ld & 2:0' 0 2 1
2127 $ trylist --optimize '%ld & 2:0' 0 2 1
2128 (and
2128 (and
2129 (func
2129 (func
2130 (symbol '_intlist')
2130 (symbol '_intlist')
2131 (string '0\x002\x001'))
2131 (string '0\x002\x001'))
2132 (range
2132 (range
2133 (symbol '2')
2133 (symbol '2')
2134 (symbol '0')))
2134 (symbol '0')))
2135 * optimized:
2135 * optimized:
2136 (and
2136 (and
2137 (func
2137 (func
2138 (symbol '_intlist')
2138 (symbol '_intlist')
2139 (string '0\x002\x001'))
2139 (string '0\x002\x001'))
2140 (range
2140 (range
2141 (symbol '2')
2141 (symbol '2')
2142 (symbol '0')))
2142 (symbol '0')))
2143 * set:
2143 * set:
2144 <filteredset
2144 <filteredset
2145 <baseset [0, 2, 1]>,
2145 <baseset [0, 2, 1]>,
2146 <spanset- 0:3>>
2146 <spanset- 0:3>>
2147 0
2147 0
2148 2
2148 2
2149 1
2149 1
2150
2150
2151 '_hexlist(a b)' should behave like 'a + b':
2151 '_hexlist(a b)' should behave like 'a + b':
2152
2152
2153 $ trylist --optimize --bin '2:0 & %ln' `hg log -T '{node} ' -r0:2`
2153 $ trylist --optimize --bin '2:0 & %ln' `hg log -T '{node} ' -r0:2`
2154 (and
2154 (and
2155 (range
2155 (range
2156 (symbol '2')
2156 (symbol '2')
2157 (symbol '0'))
2157 (symbol '0'))
2158 (func
2158 (func
2159 (symbol '_hexlist')
2159 (symbol '_hexlist')
2160 (string '*'))) (glob)
2160 (string '*'))) (glob)
2161 * optimized:
2161 * optimized:
2162 (and
2162 (and
2163 (range
2163 (range
2164 (symbol '2')
2164 (symbol '2')
2165 (symbol '0'))
2165 (symbol '0'))
2166 (func
2166 (func
2167 (symbol '_hexlist')
2167 (symbol '_hexlist')
2168 (string '*'))) (glob)
2168 (string '*'))) (glob)
2169 * set:
2169 * set:
2170 <filteredset
2170 <filteredset
2171 <spanset- 0:3>,
2171 <spanset- 0:3>,
2172 <baseset [0, 1, 2]>>
2172 <baseset [0, 1, 2]>>
2173 2
2173 2
2174 1
2174 1
2175 0
2175 0
2176
2176
2177 $ trylist --optimize --bin '%ln & 2:0' `hg log -T '{node} ' -r0+2+1`
2177 $ trylist --optimize --bin '%ln & 2:0' `hg log -T '{node} ' -r0+2+1`
2178 (and
2178 (and
2179 (func
2179 (func
2180 (symbol '_hexlist')
2180 (symbol '_hexlist')
2181 (string '*')) (glob)
2181 (string '*')) (glob)
2182 (range
2182 (range
2183 (symbol '2')
2183 (symbol '2')
2184 (symbol '0')))
2184 (symbol '0')))
2185 * optimized:
2185 * optimized:
2186 (andsmally
2186 (andsmally
2187 (func
2187 (func
2188 (symbol '_hexlist')
2188 (symbol '_hexlist')
2189 (string '*')) (glob)
2189 (string '*')) (glob)
2190 (range
2190 (range
2191 (symbol '2')
2191 (symbol '2')
2192 (symbol '0')))
2192 (symbol '0')))
2193 * set:
2193 * set:
2194 <baseset [0, 2, 1]>
2194 <baseset [0, 2, 1]>
2195 0
2195 0
2196 2
2196 2
2197 1
2197 1
2198
2198
2199 '_list' should not go through the slow follow-order path if order doesn't
2199 '_list' should not go through the slow follow-order path if order doesn't
2200 matter:
2200 matter:
2201
2201
2202 $ try -p optimized '2:0 & not (0 + 1)'
2202 $ try -p optimized '2:0 & not (0 + 1)'
2203 * optimized:
2203 * optimized:
2204 (difference
2204 (difference
2205 (range
2205 (range
2206 (symbol '2')
2206 (symbol '2')
2207 (symbol '0'))
2207 (symbol '0'))
2208 (func
2208 (func
2209 (symbol '_list')
2209 (symbol '_list')
2210 (string '0\x001')))
2210 (string '0\x001')))
2211 * set:
2211 * set:
2212 <filteredset
2212 <filteredset
2213 <spanset- 0:3>,
2213 <spanset- 0:3>,
2214 <not
2214 <not
2215 <baseset [0, 1]>>>
2215 <baseset [0, 1]>>>
2216 2
2216 2
2217
2217
2218 $ try -p optimized '2:0 & not (0:2 & (0 + 1))'
2218 $ try -p optimized '2:0 & not (0:2 & (0 + 1))'
2219 * optimized:
2219 * optimized:
2220 (difference
2220 (difference
2221 (range
2221 (range
2222 (symbol '2')
2222 (symbol '2')
2223 (symbol '0'))
2223 (symbol '0'))
2224 (and
2224 (and
2225 (range
2225 (range
2226 (symbol '0')
2226 (symbol '0')
2227 (symbol '2'))
2227 (symbol '2'))
2228 (func
2228 (func
2229 (symbol '_list')
2229 (symbol '_list')
2230 (string '0\x001'))))
2230 (string '0\x001'))))
2231 * set:
2231 * set:
2232 <filteredset
2232 <filteredset
2233 <spanset- 0:3>,
2233 <spanset- 0:3>,
2234 <not
2234 <not
2235 <baseset [0, 1]>>>
2235 <baseset [0, 1]>>>
2236 2
2236 2
2237
2237
2238 because 'present()' does nothing other than suppressing an error, the
2238 because 'present()' does nothing other than suppressing an error, the
2239 ordering requirement should be forwarded to the nested expression
2239 ordering requirement should be forwarded to the nested expression
2240
2240
2241 $ try -p optimized 'present(2 + 0 + 1)'
2241 $ try -p optimized 'present(2 + 0 + 1)'
2242 * optimized:
2242 * optimized:
2243 (func
2243 (func
2244 (symbol 'present')
2244 (symbol 'present')
2245 (func
2245 (func
2246 (symbol '_list')
2246 (symbol '_list')
2247 (string '2\x000\x001')))
2247 (string '2\x000\x001')))
2248 * set:
2248 * set:
2249 <baseset [2, 0, 1]>
2249 <baseset [2, 0, 1]>
2250 2
2250 2
2251 0
2251 0
2252 1
2252 1
2253
2253
2254 $ try --optimize '2:0 & present(0 + 1 + 2)'
2254 $ try --optimize '2:0 & present(0 + 1 + 2)'
2255 (and
2255 (and
2256 (range
2256 (range
2257 (symbol '2')
2257 (symbol '2')
2258 (symbol '0'))
2258 (symbol '0'))
2259 (func
2259 (func
2260 (symbol 'present')
2260 (symbol 'present')
2261 (or
2261 (or
2262 (list
2262 (list
2263 (symbol '0')
2263 (symbol '0')
2264 (symbol '1')
2264 (symbol '1')
2265 (symbol '2')))))
2265 (symbol '2')))))
2266 * optimized:
2266 * optimized:
2267 (and
2267 (and
2268 (range
2268 (range
2269 (symbol '2')
2269 (symbol '2')
2270 (symbol '0'))
2270 (symbol '0'))
2271 (func
2271 (func
2272 (symbol 'present')
2272 (symbol 'present')
2273 (func
2273 (func
2274 (symbol '_list')
2274 (symbol '_list')
2275 (string '0\x001\x002'))))
2275 (string '0\x001\x002'))))
2276 * set:
2276 * set:
2277 <filteredset
2277 <filteredset
2278 <spanset- 0:3>,
2278 <spanset- 0:3>,
2279 <baseset [0, 1, 2]>>
2279 <baseset [0, 1, 2]>>
2280 2
2280 2
2281 1
2281 1
2282 0
2282 0
2283
2283
2284 'reverse()' should take effect only if it is the outermost expression:
2284 'reverse()' should take effect only if it is the outermost expression:
2285
2285
2286 $ try --optimize '0:2 & reverse(all())'
2286 $ try --optimize '0:2 & reverse(all())'
2287 (and
2287 (and
2288 (range
2288 (range
2289 (symbol '0')
2289 (symbol '0')
2290 (symbol '2'))
2290 (symbol '2'))
2291 (func
2291 (func
2292 (symbol 'reverse')
2292 (symbol 'reverse')
2293 (func
2293 (func
2294 (symbol 'all')
2294 (symbol 'all')
2295 None)))
2295 None)))
2296 * optimized:
2296 * optimized:
2297 (and
2297 (and
2298 (range
2298 (range
2299 (symbol '0')
2299 (symbol '0')
2300 (symbol '2'))
2300 (symbol '2'))
2301 (func
2301 (func
2302 (symbol 'reverse')
2302 (symbol 'reverse')
2303 (func
2303 (func
2304 (symbol 'all')
2304 (symbol 'all')
2305 None)))
2305 None)))
2306 * set:
2306 * set:
2307 <filteredset
2307 <filteredset
2308 <spanset+ 0:3>,
2308 <spanset+ 0:3>,
2309 <spanset+ 0:10>>
2309 <spanset+ 0:10>>
2310 0
2310 0
2311 1
2311 1
2312 2
2312 2
2313
2313
2314 'sort()' should take effect only if it is the outermost expression:
2314 'sort()' should take effect only if it is the outermost expression:
2315
2315
2316 $ try --optimize '0:2 & sort(all(), -rev)'
2316 $ try --optimize '0:2 & sort(all(), -rev)'
2317 (and
2317 (and
2318 (range
2318 (range
2319 (symbol '0')
2319 (symbol '0')
2320 (symbol '2'))
2320 (symbol '2'))
2321 (func
2321 (func
2322 (symbol 'sort')
2322 (symbol 'sort')
2323 (list
2323 (list
2324 (func
2324 (func
2325 (symbol 'all')
2325 (symbol 'all')
2326 None)
2326 None)
2327 (negate
2327 (negate
2328 (symbol 'rev')))))
2328 (symbol 'rev')))))
2329 * optimized:
2329 * optimized:
2330 (and
2330 (and
2331 (range
2331 (range
2332 (symbol '0')
2332 (symbol '0')
2333 (symbol '2'))
2333 (symbol '2'))
2334 (func
2334 (func
2335 (symbol 'sort')
2335 (symbol 'sort')
2336 (list
2336 (list
2337 (func
2337 (func
2338 (symbol 'all')
2338 (symbol 'all')
2339 None)
2339 None)
2340 (string '-rev'))))
2340 (string '-rev'))))
2341 * set:
2341 * set:
2342 <filteredset
2342 <filteredset
2343 <spanset+ 0:3>,
2343 <spanset+ 0:3>,
2344 <spanset+ 0:10>>
2344 <spanset+ 0:10>>
2345 0
2345 0
2346 1
2346 1
2347 2
2347 2
2348
2348
2349 invalid argument passed to noop sort():
2349 invalid argument passed to noop sort():
2350
2350
2351 $ log '0:2 & sort()'
2351 $ log '0:2 & sort()'
2352 hg: parse error: sort requires one or two arguments
2352 hg: parse error: sort requires one or two arguments
2353 [255]
2353 [255]
2354 $ log '0:2 & sort(all(), -invalid)'
2354 $ log '0:2 & sort(all(), -invalid)'
2355 hg: parse error: unknown sort key '-invalid'
2355 hg: parse error: unknown sort key '-invalid'
2356 [255]
2356 [255]
2357
2357
2358 for 'A & f(B)', 'B' should not be affected by the order of 'A':
2358 for 'A & f(B)', 'B' should not be affected by the order of 'A':
2359
2359
2360 $ try --optimize '2:0 & first(1 + 0 + 2)'
2360 $ try --optimize '2:0 & first(1 + 0 + 2)'
2361 (and
2361 (and
2362 (range
2362 (range
2363 (symbol '2')
2363 (symbol '2')
2364 (symbol '0'))
2364 (symbol '0'))
2365 (func
2365 (func
2366 (symbol 'first')
2366 (symbol 'first')
2367 (or
2367 (or
2368 (list
2368 (list
2369 (symbol '1')
2369 (symbol '1')
2370 (symbol '0')
2370 (symbol '0')
2371 (symbol '2')))))
2371 (symbol '2')))))
2372 * optimized:
2372 * optimized:
2373 (and
2373 (and
2374 (range
2374 (range
2375 (symbol '2')
2375 (symbol '2')
2376 (symbol '0'))
2376 (symbol '0'))
2377 (func
2377 (func
2378 (symbol 'first')
2378 (symbol 'first')
2379 (func
2379 (func
2380 (symbol '_list')
2380 (symbol '_list')
2381 (string '1\x000\x002'))))
2381 (string '1\x000\x002'))))
2382 * set:
2382 * set:
2383 <filteredset
2383 <filteredset
2384 <baseset [1]>,
2384 <baseset [1]>,
2385 <spanset- 0:3>>
2385 <spanset- 0:3>>
2386 1
2386 1
2387
2387
2388 $ try --optimize '2:0 & not last(0 + 2 + 1)'
2388 $ try --optimize '2:0 & not last(0 + 2 + 1)'
2389 (and
2389 (and
2390 (range
2390 (range
2391 (symbol '2')
2391 (symbol '2')
2392 (symbol '0'))
2392 (symbol '0'))
2393 (not
2393 (not
2394 (func
2394 (func
2395 (symbol 'last')
2395 (symbol 'last')
2396 (or
2396 (or
2397 (list
2397 (list
2398 (symbol '0')
2398 (symbol '0')
2399 (symbol '2')
2399 (symbol '2')
2400 (symbol '1'))))))
2400 (symbol '1'))))))
2401 * optimized:
2401 * optimized:
2402 (difference
2402 (difference
2403 (range
2403 (range
2404 (symbol '2')
2404 (symbol '2')
2405 (symbol '0'))
2405 (symbol '0'))
2406 (func
2406 (func
2407 (symbol 'last')
2407 (symbol 'last')
2408 (func
2408 (func
2409 (symbol '_list')
2409 (symbol '_list')
2410 (string '0\x002\x001'))))
2410 (string '0\x002\x001'))))
2411 * set:
2411 * set:
2412 <filteredset
2412 <filteredset
2413 <spanset- 0:3>,
2413 <spanset- 0:3>,
2414 <not
2414 <not
2415 <baseset [1]>>>
2415 <baseset [1]>>>
2416 2
2416 2
2417 0
2417 0
2418
2418
2419 for 'A & (op)(B)', 'B' should not be affected by the order of 'A':
2419 for 'A & (op)(B)', 'B' should not be affected by the order of 'A':
2420
2420
2421 $ try --optimize '2:0 & (1 + 0 + 2):(0 + 2 + 1)'
2421 $ try --optimize '2:0 & (1 + 0 + 2):(0 + 2 + 1)'
2422 (and
2422 (and
2423 (range
2423 (range
2424 (symbol '2')
2424 (symbol '2')
2425 (symbol '0'))
2425 (symbol '0'))
2426 (range
2426 (range
2427 (group
2427 (group
2428 (or
2428 (or
2429 (list
2429 (list
2430 (symbol '1')
2430 (symbol '1')
2431 (symbol '0')
2431 (symbol '0')
2432 (symbol '2'))))
2432 (symbol '2'))))
2433 (group
2433 (group
2434 (or
2434 (or
2435 (list
2435 (list
2436 (symbol '0')
2436 (symbol '0')
2437 (symbol '2')
2437 (symbol '2')
2438 (symbol '1'))))))
2438 (symbol '1'))))))
2439 * optimized:
2439 * optimized:
2440 (and
2440 (and
2441 (range
2441 (range
2442 (symbol '2')
2442 (symbol '2')
2443 (symbol '0'))
2443 (symbol '0'))
2444 (range
2444 (range
2445 (func
2445 (func
2446 (symbol '_list')
2446 (symbol '_list')
2447 (string '1\x000\x002'))
2447 (string '1\x000\x002'))
2448 (func
2448 (func
2449 (symbol '_list')
2449 (symbol '_list')
2450 (string '0\x002\x001'))))
2450 (string '0\x002\x001'))))
2451 * set:
2451 * set:
2452 <filteredset
2452 <filteredset
2453 <spanset- 0:3>,
2453 <spanset- 0:3>,
2454 <baseset [1]>>
2454 <baseset [1]>>
2455 1
2455 1
2456
2456
2457 'A & B' can be rewritten as 'flipand(B, A)' by weight.
2457 'A & B' can be rewritten as 'flipand(B, A)' by weight.
2458
2458
2459 $ try --optimize 'contains("glob:*") & (2 + 0 + 1)'
2459 $ try --optimize 'contains("glob:*") & (2 + 0 + 1)'
2460 (and
2460 (and
2461 (func
2461 (func
2462 (symbol 'contains')
2462 (symbol 'contains')
2463 (string 'glob:*'))
2463 (string 'glob:*'))
2464 (group
2464 (group
2465 (or
2465 (or
2466 (list
2466 (list
2467 (symbol '2')
2467 (symbol '2')
2468 (symbol '0')
2468 (symbol '0')
2469 (symbol '1')))))
2469 (symbol '1')))))
2470 * optimized:
2470 * optimized:
2471 (andsmally
2471 (andsmally
2472 (func
2472 (func
2473 (symbol 'contains')
2473 (symbol 'contains')
2474 (string 'glob:*'))
2474 (string 'glob:*'))
2475 (func
2475 (func
2476 (symbol '_list')
2476 (symbol '_list')
2477 (string '2\x000\x001')))
2477 (string '2\x000\x001')))
2478 * set:
2478 * set:
2479 <filteredset
2479 <filteredset
2480 <baseset+ [0, 1, 2]>,
2480 <baseset+ [0, 1, 2]>,
2481 <contains 'glob:*'>>
2481 <contains 'glob:*'>>
2482 0
2482 0
2483 1
2483 1
2484 2
2484 2
2485
2485
2486 and in this example, 'A & B' is rewritten as 'B & A', but 'A' overrides
2486 and in this example, 'A & B' is rewritten as 'B & A', but 'A' overrides
2487 the order appropriately:
2487 the order appropriately:
2488
2488
2489 $ try --optimize 'reverse(contains("glob:*")) & (0 + 2 + 1)'
2489 $ try --optimize 'reverse(contains("glob:*")) & (0 + 2 + 1)'
2490 (and
2490 (and
2491 (func
2491 (func
2492 (symbol 'reverse')
2492 (symbol 'reverse')
2493 (func
2493 (func
2494 (symbol 'contains')
2494 (symbol 'contains')
2495 (string 'glob:*')))
2495 (string 'glob:*')))
2496 (group
2496 (group
2497 (or
2497 (or
2498 (list
2498 (list
2499 (symbol '0')
2499 (symbol '0')
2500 (symbol '2')
2500 (symbol '2')
2501 (symbol '1')))))
2501 (symbol '1')))))
2502 * optimized:
2502 * optimized:
2503 (andsmally
2503 (andsmally
2504 (func
2504 (func
2505 (symbol 'reverse')
2505 (symbol 'reverse')
2506 (func
2506 (func
2507 (symbol 'contains')
2507 (symbol 'contains')
2508 (string 'glob:*')))
2508 (string 'glob:*')))
2509 (func
2509 (func
2510 (symbol '_list')
2510 (symbol '_list')
2511 (string '0\x002\x001')))
2511 (string '0\x002\x001')))
2512 * set:
2512 * set:
2513 <filteredset
2513 <filteredset
2514 <baseset- [0, 1, 2]>,
2514 <baseset- [0, 1, 2]>,
2515 <contains 'glob:*'>>
2515 <contains 'glob:*'>>
2516 2
2516 2
2517 1
2517 1
2518 0
2518 0
2519
2519
2520 test sort revset
2520 test sort revset
2521 --------------------------------------------
2521 --------------------------------------------
2522
2522
2523 test when adding two unordered revsets
2523 test when adding two unordered revsets
2524
2524
2525 $ log 'sort(keyword(issue) or modifies(b))'
2525 $ log 'sort(keyword(issue) or modifies(b))'
2526 4
2526 4
2527 6
2527 6
2528
2528
2529 test when sorting a reversed collection in the same way it is
2529 test when sorting a reversed collection in the same way it is
2530
2530
2531 $ log 'sort(reverse(all()), -rev)'
2531 $ log 'sort(reverse(all()), -rev)'
2532 9
2532 9
2533 8
2533 8
2534 7
2534 7
2535 6
2535 6
2536 5
2536 5
2537 4
2537 4
2538 3
2538 3
2539 2
2539 2
2540 1
2540 1
2541 0
2541 0
2542
2542
2543 test when sorting a reversed collection
2543 test when sorting a reversed collection
2544
2544
2545 $ log 'sort(reverse(all()), rev)'
2545 $ log 'sort(reverse(all()), rev)'
2546 0
2546 0
2547 1
2547 1
2548 2
2548 2
2549 3
2549 3
2550 4
2550 4
2551 5
2551 5
2552 6
2552 6
2553 7
2553 7
2554 8
2554 8
2555 9
2555 9
2556
2556
2557
2557
2558 test sorting two sorted collections in different orders
2558 test sorting two sorted collections in different orders
2559
2559
2560 $ log 'sort(outgoing() or reverse(removes(a)), rev)'
2560 $ log 'sort(outgoing() or reverse(removes(a)), rev)'
2561 2
2561 2
2562 6
2562 6
2563 8
2563 8
2564 9
2564 9
2565
2565
2566 test sorting two sorted collections in different orders backwards
2566 test sorting two sorted collections in different orders backwards
2567
2567
2568 $ log 'sort(outgoing() or reverse(removes(a)), -rev)'
2568 $ log 'sort(outgoing() or reverse(removes(a)), -rev)'
2569 9
2569 9
2570 8
2570 8
2571 6
2571 6
2572 2
2572 2
2573
2573
2574 test empty sort key which is noop
2574 test empty sort key which is noop
2575
2575
2576 $ log 'sort(0 + 2 + 1, "")'
2576 $ log 'sort(0 + 2 + 1, "")'
2577 0
2577 0
2578 2
2578 2
2579 1
2579 1
2580
2580
2581 test invalid sort keys
2581 test invalid sort keys
2582
2582
2583 $ log 'sort(all(), -invalid)'
2583 $ log 'sort(all(), -invalid)'
2584 hg: parse error: unknown sort key '-invalid'
2584 hg: parse error: unknown sort key '-invalid'
2585 [255]
2585 [255]
2586
2586
2587 $ cd ..
2587 $ cd ..
2588
2588
2589 test sorting by multiple keys including variable-length strings
2589 test sorting by multiple keys including variable-length strings
2590
2590
2591 $ hg init sorting
2591 $ hg init sorting
2592 $ cd sorting
2592 $ cd sorting
2593 $ cat <<EOF >> .hg/hgrc
2593 $ cat <<EOF >> .hg/hgrc
2594 > [ui]
2594 > [ui]
2595 > logtemplate = '{rev} {branch|p5}{desc|p5}{author|p5}{date|hgdate}\n'
2595 > logtemplate = '{rev} {branch|p5}{desc|p5}{author|p5}{date|hgdate}\n'
2596 > [templatealias]
2596 > [templatealias]
2597 > p5(s) = pad(s, 5)
2597 > p5(s) = pad(s, 5)
2598 > EOF
2598 > EOF
2599 $ hg branch -qf b12
2599 $ hg branch -qf b12
2600 $ hg ci -m m111 -u u112 -d '111 10800'
2600 $ hg ci -m m111 -u u112 -d '111 10800'
2601 $ hg branch -qf b11
2601 $ hg branch -qf b11
2602 $ hg ci -m m12 -u u111 -d '112 7200'
2602 $ hg ci -m m12 -u u111 -d '112 7200'
2603 $ hg branch -qf b111
2603 $ hg branch -qf b111
2604 $ hg ci -m m11 -u u12 -d '111 3600'
2604 $ hg ci -m m11 -u u12 -d '111 3600'
2605 $ hg branch -qf b112
2605 $ hg branch -qf b112
2606 $ hg ci -m m111 -u u11 -d '120 0'
2606 $ hg ci -m m111 -u u11 -d '120 0'
2607 $ hg branch -qf b111
2607 $ hg branch -qf b111
2608 $ hg ci -m m112 -u u111 -d '110 14400'
2608 $ hg ci -m m112 -u u111 -d '110 14400'
2609 created new head
2609 created new head
2610
2610
2611 compare revisions (has fast path):
2611 compare revisions (has fast path):
2612
2612
2613 $ hg log -r 'sort(all(), rev)'
2613 $ hg log -r 'sort(all(), rev)'
2614 0 b12 m111 u112 111 10800
2614 0 b12 m111 u112 111 10800
2615 1 b11 m12 u111 112 7200
2615 1 b11 m12 u111 112 7200
2616 2 b111 m11 u12 111 3600
2616 2 b111 m11 u12 111 3600
2617 3 b112 m111 u11 120 0
2617 3 b112 m111 u11 120 0
2618 4 b111 m112 u111 110 14400
2618 4 b111 m112 u111 110 14400
2619
2619
2620 $ hg log -r 'sort(all(), -rev)'
2620 $ hg log -r 'sort(all(), -rev)'
2621 4 b111 m112 u111 110 14400
2621 4 b111 m112 u111 110 14400
2622 3 b112 m111 u11 120 0
2622 3 b112 m111 u11 120 0
2623 2 b111 m11 u12 111 3600
2623 2 b111 m11 u12 111 3600
2624 1 b11 m12 u111 112 7200
2624 1 b11 m12 u111 112 7200
2625 0 b12 m111 u112 111 10800
2625 0 b12 m111 u112 111 10800
2626
2626
2627 compare variable-length strings (issue5218):
2627 compare variable-length strings (issue5218):
2628
2628
2629 $ hg log -r 'sort(all(), branch)'
2629 $ hg log -r 'sort(all(), branch)'
2630 1 b11 m12 u111 112 7200
2630 1 b11 m12 u111 112 7200
2631 2 b111 m11 u12 111 3600
2631 2 b111 m11 u12 111 3600
2632 4 b111 m112 u111 110 14400
2632 4 b111 m112 u111 110 14400
2633 3 b112 m111 u11 120 0
2633 3 b112 m111 u11 120 0
2634 0 b12 m111 u112 111 10800
2634 0 b12 m111 u112 111 10800
2635
2635
2636 $ hg log -r 'sort(all(), -branch)'
2636 $ hg log -r 'sort(all(), -branch)'
2637 0 b12 m111 u112 111 10800
2637 0 b12 m111 u112 111 10800
2638 3 b112 m111 u11 120 0
2638 3 b112 m111 u11 120 0
2639 2 b111 m11 u12 111 3600
2639 2 b111 m11 u12 111 3600
2640 4 b111 m112 u111 110 14400
2640 4 b111 m112 u111 110 14400
2641 1 b11 m12 u111 112 7200
2641 1 b11 m12 u111 112 7200
2642
2642
2643 $ hg log -r 'sort(all(), desc)'
2643 $ hg log -r 'sort(all(), desc)'
2644 2 b111 m11 u12 111 3600
2644 2 b111 m11 u12 111 3600
2645 0 b12 m111 u112 111 10800
2645 0 b12 m111 u112 111 10800
2646 3 b112 m111 u11 120 0
2646 3 b112 m111 u11 120 0
2647 4 b111 m112 u111 110 14400
2647 4 b111 m112 u111 110 14400
2648 1 b11 m12 u111 112 7200
2648 1 b11 m12 u111 112 7200
2649
2649
2650 $ hg log -r 'sort(all(), -desc)'
2650 $ hg log -r 'sort(all(), -desc)'
2651 1 b11 m12 u111 112 7200
2651 1 b11 m12 u111 112 7200
2652 4 b111 m112 u111 110 14400
2652 4 b111 m112 u111 110 14400
2653 0 b12 m111 u112 111 10800
2653 0 b12 m111 u112 111 10800
2654 3 b112 m111 u11 120 0
2654 3 b112 m111 u11 120 0
2655 2 b111 m11 u12 111 3600
2655 2 b111 m11 u12 111 3600
2656
2656
2657 $ hg log -r 'sort(all(), user)'
2657 $ hg log -r 'sort(all(), user)'
2658 3 b112 m111 u11 120 0
2658 3 b112 m111 u11 120 0
2659 1 b11 m12 u111 112 7200
2659 1 b11 m12 u111 112 7200
2660 4 b111 m112 u111 110 14400
2660 4 b111 m112 u111 110 14400
2661 0 b12 m111 u112 111 10800
2661 0 b12 m111 u112 111 10800
2662 2 b111 m11 u12 111 3600
2662 2 b111 m11 u12 111 3600
2663
2663
2664 $ hg log -r 'sort(all(), -user)'
2664 $ hg log -r 'sort(all(), -user)'
2665 2 b111 m11 u12 111 3600
2665 2 b111 m11 u12 111 3600
2666 0 b12 m111 u112 111 10800
2666 0 b12 m111 u112 111 10800
2667 1 b11 m12 u111 112 7200
2667 1 b11 m12 u111 112 7200
2668 4 b111 m112 u111 110 14400
2668 4 b111 m112 u111 110 14400
2669 3 b112 m111 u11 120 0
2669 3 b112 m111 u11 120 0
2670
2670
2671 compare dates (tz offset should have no effect):
2671 compare dates (tz offset should have no effect):
2672
2672
2673 $ hg log -r 'sort(all(), date)'
2673 $ hg log -r 'sort(all(), date)'
2674 4 b111 m112 u111 110 14400
2674 4 b111 m112 u111 110 14400
2675 0 b12 m111 u112 111 10800
2675 0 b12 m111 u112 111 10800
2676 2 b111 m11 u12 111 3600
2676 2 b111 m11 u12 111 3600
2677 1 b11 m12 u111 112 7200
2677 1 b11 m12 u111 112 7200
2678 3 b112 m111 u11 120 0
2678 3 b112 m111 u11 120 0
2679
2679
2680 $ hg log -r 'sort(all(), -date)'
2680 $ hg log -r 'sort(all(), -date)'
2681 3 b112 m111 u11 120 0
2681 3 b112 m111 u11 120 0
2682 1 b11 m12 u111 112 7200
2682 1 b11 m12 u111 112 7200
2683 0 b12 m111 u112 111 10800
2683 0 b12 m111 u112 111 10800
2684 2 b111 m11 u12 111 3600
2684 2 b111 m11 u12 111 3600
2685 4 b111 m112 u111 110 14400
2685 4 b111 m112 u111 110 14400
2686
2686
2687 be aware that 'sort(x, -k)' is not exactly the same as 'reverse(sort(x, k))'
2687 be aware that 'sort(x, -k)' is not exactly the same as 'reverse(sort(x, k))'
2688 because '-k' reverses the comparison, not the list itself:
2688 because '-k' reverses the comparison, not the list itself:
2689
2689
2690 $ hg log -r 'sort(0 + 2, date)'
2690 $ hg log -r 'sort(0 + 2, date)'
2691 0 b12 m111 u112 111 10800
2691 0 b12 m111 u112 111 10800
2692 2 b111 m11 u12 111 3600
2692 2 b111 m11 u12 111 3600
2693
2693
2694 $ hg log -r 'sort(0 + 2, -date)'
2694 $ hg log -r 'sort(0 + 2, -date)'
2695 0 b12 m111 u112 111 10800
2695 0 b12 m111 u112 111 10800
2696 2 b111 m11 u12 111 3600
2696 2 b111 m11 u12 111 3600
2697
2697
2698 $ hg log -r 'reverse(sort(0 + 2, date))'
2698 $ hg log -r 'reverse(sort(0 + 2, date))'
2699 2 b111 m11 u12 111 3600
2699 2 b111 m11 u12 111 3600
2700 0 b12 m111 u112 111 10800
2700 0 b12 m111 u112 111 10800
2701
2701
2702 sort by multiple keys:
2702 sort by multiple keys:
2703
2703
2704 $ hg log -r 'sort(all(), "branch -rev")'
2704 $ hg log -r 'sort(all(), "branch -rev")'
2705 1 b11 m12 u111 112 7200
2705 1 b11 m12 u111 112 7200
2706 4 b111 m112 u111 110 14400
2706 4 b111 m112 u111 110 14400
2707 2 b111 m11 u12 111 3600
2707 2 b111 m11 u12 111 3600
2708 3 b112 m111 u11 120 0
2708 3 b112 m111 u11 120 0
2709 0 b12 m111 u112 111 10800
2709 0 b12 m111 u112 111 10800
2710
2710
2711 $ hg log -r 'sort(all(), "-desc -date")'
2711 $ hg log -r 'sort(all(), "-desc -date")'
2712 1 b11 m12 u111 112 7200
2712 1 b11 m12 u111 112 7200
2713 4 b111 m112 u111 110 14400
2713 4 b111 m112 u111 110 14400
2714 3 b112 m111 u11 120 0
2714 3 b112 m111 u11 120 0
2715 0 b12 m111 u112 111 10800
2715 0 b12 m111 u112 111 10800
2716 2 b111 m11 u12 111 3600
2716 2 b111 m11 u12 111 3600
2717
2717
2718 $ hg log -r 'sort(all(), "user -branch date rev")'
2718 $ hg log -r 'sort(all(), "user -branch date rev")'
2719 3 b112 m111 u11 120 0
2719 3 b112 m111 u11 120 0
2720 4 b111 m112 u111 110 14400
2720 4 b111 m112 u111 110 14400
2721 1 b11 m12 u111 112 7200
2721 1 b11 m12 u111 112 7200
2722 0 b12 m111 u112 111 10800
2722 0 b12 m111 u112 111 10800
2723 2 b111 m11 u12 111 3600
2723 2 b111 m11 u12 111 3600
2724
2724
2725 toposort prioritises graph branches
2725 toposort prioritises graph branches
2726
2726
2727 $ hg up 2
2727 $ hg up 2
2728 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
2728 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
2729 $ touch a
2729 $ touch a
2730 $ hg addremove
2730 $ hg addremove
2731 adding a
2731 adding a
2732 $ hg ci -m 't1' -u 'tu' -d '130 0'
2732 $ hg ci -m 't1' -u 'tu' -d '130 0'
2733 created new head
2733 created new head
2734 $ echo 'a' >> a
2734 $ echo 'a' >> a
2735 $ hg ci -m 't2' -u 'tu' -d '130 0'
2735 $ hg ci -m 't2' -u 'tu' -d '130 0'
2736 $ hg book book1
2736 $ hg book book1
2737 $ hg up 4
2737 $ hg up 4
2738 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
2738 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
2739 (leaving bookmark book1)
2739 (leaving bookmark book1)
2740 $ touch a
2740 $ touch a
2741 $ hg addremove
2741 $ hg addremove
2742 adding a
2742 adding a
2743 $ hg ci -m 't3' -u 'tu' -d '130 0'
2743 $ hg ci -m 't3' -u 'tu' -d '130 0'
2744
2744
2745 $ hg log -r 'sort(all(), topo)'
2745 $ hg log -r 'sort(all(), topo)'
2746 7 b111 t3 tu 130 0
2746 7 b111 t3 tu 130 0
2747 4 b111 m112 u111 110 14400
2747 4 b111 m112 u111 110 14400
2748 3 b112 m111 u11 120 0
2748 3 b112 m111 u11 120 0
2749 6 b111 t2 tu 130 0
2749 6 b111 t2 tu 130 0
2750 5 b111 t1 tu 130 0
2750 5 b111 t1 tu 130 0
2751 2 b111 m11 u12 111 3600
2751 2 b111 m11 u12 111 3600
2752 1 b11 m12 u111 112 7200
2752 1 b11 m12 u111 112 7200
2753 0 b12 m111 u112 111 10800
2753 0 b12 m111 u112 111 10800
2754
2754
2755 $ hg log -r 'sort(all(), -topo)'
2755 $ hg log -r 'sort(all(), -topo)'
2756 0 b12 m111 u112 111 10800
2756 0 b12 m111 u112 111 10800
2757 1 b11 m12 u111 112 7200
2757 1 b11 m12 u111 112 7200
2758 2 b111 m11 u12 111 3600
2758 2 b111 m11 u12 111 3600
2759 5 b111 t1 tu 130 0
2759 5 b111 t1 tu 130 0
2760 6 b111 t2 tu 130 0
2760 6 b111 t2 tu 130 0
2761 3 b112 m111 u11 120 0
2761 3 b112 m111 u11 120 0
2762 4 b111 m112 u111 110 14400
2762 4 b111 m112 u111 110 14400
2763 7 b111 t3 tu 130 0
2763 7 b111 t3 tu 130 0
2764
2764
2765 $ hg log -r 'sort(all(), topo, topo.firstbranch=book1)'
2765 $ hg log -r 'sort(all(), topo, topo.firstbranch=book1)'
2766 6 b111 t2 tu 130 0
2766 6 b111 t2 tu 130 0
2767 5 b111 t1 tu 130 0
2767 5 b111 t1 tu 130 0
2768 7 b111 t3 tu 130 0
2768 7 b111 t3 tu 130 0
2769 4 b111 m112 u111 110 14400
2769 4 b111 m112 u111 110 14400
2770 3 b112 m111 u11 120 0
2770 3 b112 m111 u11 120 0
2771 2 b111 m11 u12 111 3600
2771 2 b111 m11 u12 111 3600
2772 1 b11 m12 u111 112 7200
2772 1 b11 m12 u111 112 7200
2773 0 b12 m111 u112 111 10800
2773 0 b12 m111 u112 111 10800
2774
2774
2775 topographical sorting can't be combined with other sort keys, and you can't
2775 topographical sorting can't be combined with other sort keys, and you can't
2776 use the topo.firstbranch option when topo sort is not active:
2776 use the topo.firstbranch option when topo sort is not active:
2777
2777
2778 $ hg log -r 'sort(all(), "topo user")'
2778 $ hg log -r 'sort(all(), "topo user")'
2779 hg: parse error: topo sort order cannot be combined with other sort keys
2779 hg: parse error: topo sort order cannot be combined with other sort keys
2780 [255]
2780 [255]
2781
2781
2782 $ hg log -r 'sort(all(), user, topo.firstbranch=book1)'
2782 $ hg log -r 'sort(all(), user, topo.firstbranch=book1)'
2783 hg: parse error: topo.firstbranch can only be used when using the topo sort key
2783 hg: parse error: topo.firstbranch can only be used when using the topo sort key
2784 [255]
2784 [255]
2785
2785
2786 topo.firstbranch should accept any kind of expressions:
2786 topo.firstbranch should accept any kind of expressions:
2787
2787
2788 $ hg log -r 'sort(0, topo, topo.firstbranch=(book1))'
2788 $ hg log -r 'sort(0, topo, topo.firstbranch=(book1))'
2789 0 b12 m111 u112 111 10800
2789 0 b12 m111 u112 111 10800
2790
2790
2791 $ cd ..
2791 $ cd ..
2792 $ cd repo
2792 $ cd repo
2793
2793
2794 test multiline revset with errors
2794 test multiline revset with errors
2795
2795
2796 $ echo > multiline-revset
2796 $ echo > multiline-revset
2797 $ echo '. +' >> multiline-revset
2797 $ echo '. +' >> multiline-revset
2798 $ echo '.^ +' >> multiline-revset
2798 $ echo '.^ +' >> multiline-revset
2799 $ hg log -r "`cat multiline-revset`"
2799 $ hg log -r "`cat multiline-revset`"
2800 hg: parse error at 9: not a prefix: end
2800 hg: parse error at 9: not a prefix: end
2801 ( . + .^ +
2801 ( . + .^ +
2802 ^ here)
2802 ^ here)
2803 [255]
2803 [255]
General Comments 0
You need to be logged in to leave comments. Login now