##// END OF EJS Templates
dagop: extend filectxancestors() to walk multiple files
Yuya Nishihara -
r35277:205c3c6c default
parent child Browse files
Show More
@@ -1,541 +1,543 b''
1 # dagop.py - graph ancestry and topology algorithm for revset
1 # dagop.py - graph ancestry and topology algorithm for revset
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 mdiff,
14 mdiff,
15 node,
15 node,
16 patch,
16 patch,
17 smartset,
17 smartset,
18 )
18 )
19
19
20 baseset = smartset.baseset
20 baseset = smartset.baseset
21 generatorset = smartset.generatorset
21 generatorset = smartset.generatorset
22
22
23 # possible maximum depth between null and wdir()
23 # possible maximum depth between null and wdir()
24 _maxlogdepth = 0x80000000
24 _maxlogdepth = 0x80000000
25
25
26 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
26 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
27 """Walk DAG using 'pfunc' from the given 'revs' nodes
27 """Walk DAG using 'pfunc' from the given 'revs' nodes
28
28
29 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
29 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
30 if 'reverse' is True/False respectively.
30 if 'reverse' is True/False respectively.
31
31
32 Scan ends at the stopdepth (exlusive) if specified. Revisions found
32 Scan ends at the stopdepth (exlusive) if specified. Revisions found
33 earlier than the startdepth are omitted.
33 earlier than the startdepth are omitted.
34 """
34 """
35 if startdepth is None:
35 if startdepth is None:
36 startdepth = 0
36 startdepth = 0
37 if stopdepth is None:
37 if stopdepth is None:
38 stopdepth = _maxlogdepth
38 stopdepth = _maxlogdepth
39 if stopdepth == 0:
39 if stopdepth == 0:
40 return
40 return
41 if stopdepth < 0:
41 if stopdepth < 0:
42 raise error.ProgrammingError('negative stopdepth')
42 raise error.ProgrammingError('negative stopdepth')
43 if reverse:
43 if reverse:
44 heapsign = -1 # max heap
44 heapsign = -1 # max heap
45 else:
45 else:
46 heapsign = +1 # min heap
46 heapsign = +1 # min heap
47
47
48 # load input revs lazily to heap so earlier revisions can be yielded
48 # load input revs lazily to heap so earlier revisions can be yielded
49 # without fully computing the input revs
49 # without fully computing the input revs
50 revs.sort(reverse)
50 revs.sort(reverse)
51 irevs = iter(revs)
51 irevs = iter(revs)
52 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
52 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
53
53
54 inputrev = next(irevs, None)
54 inputrev = next(irevs, None)
55 if inputrev is not None:
55 if inputrev is not None:
56 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
56 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
57
57
58 lastrev = None
58 lastrev = None
59 while pendingheap:
59 while pendingheap:
60 currev, curdepth = heapq.heappop(pendingheap)
60 currev, curdepth = heapq.heappop(pendingheap)
61 currev = heapsign * currev
61 currev = heapsign * currev
62 if currev == inputrev:
62 if currev == inputrev:
63 inputrev = next(irevs, None)
63 inputrev = next(irevs, None)
64 if inputrev is not None:
64 if inputrev is not None:
65 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
65 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
66 # rescan parents until curdepth >= startdepth because queued entries
66 # rescan parents until curdepth >= startdepth because queued entries
67 # of the same revision are iterated from the lowest depth
67 # of the same revision are iterated from the lowest depth
68 foundnew = (currev != lastrev)
68 foundnew = (currev != lastrev)
69 if foundnew and curdepth >= startdepth:
69 if foundnew and curdepth >= startdepth:
70 lastrev = currev
70 lastrev = currev
71 yield currev
71 yield currev
72 pdepth = curdepth + 1
72 pdepth = curdepth + 1
73 if foundnew and pdepth < stopdepth:
73 if foundnew and pdepth < stopdepth:
74 for prev in pfunc(currev):
74 for prev in pfunc(currev):
75 if prev != node.nullrev:
75 if prev != node.nullrev:
76 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
76 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
77
77
78 def filectxancestors(fctx, followfirst=False):
78 def filectxancestors(fctxs, followfirst=False):
79 """Like filectx.ancestors(), but includes the given fctx itself"""
79 """Like filectx.ancestors(), but can walk from multiple files/revisions,
80 and includes the given fctxs themselves"""
80 visit = {}
81 visit = {}
81 def addvisit(fctx):
82 def addvisit(fctx):
82 rev = fctx.rev()
83 rev = fctx.rev()
83 if rev not in visit:
84 if rev not in visit:
84 visit[rev] = set()
85 visit[rev] = set()
85 visit[rev].add(fctx)
86 visit[rev].add(fctx)
86
87
87 if followfirst:
88 if followfirst:
88 cut = 1
89 cut = 1
89 else:
90 else:
90 cut = None
91 cut = None
91
92
92 addvisit(fctx)
93 for c in fctxs:
94 addvisit(c)
93 while visit:
95 while visit:
94 rev = max(visit)
96 rev = max(visit)
95 c = visit[rev].pop()
97 c = visit[rev].pop()
96 if not visit[rev]:
98 if not visit[rev]:
97 del visit[rev]
99 del visit[rev]
98 yield c
100 yield c
99 for parent in c.parents()[:cut]:
101 for parent in c.parents()[:cut]:
100 addvisit(parent)
102 addvisit(parent)
101
103
102 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
104 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
103 if followfirst:
105 if followfirst:
104 cut = 1
106 cut = 1
105 else:
107 else:
106 cut = None
108 cut = None
107 cl = repo.changelog
109 cl = repo.changelog
108 def plainpfunc(rev):
110 def plainpfunc(rev):
109 try:
111 try:
110 return cl.parentrevs(rev)[:cut]
112 return cl.parentrevs(rev)[:cut]
111 except error.WdirUnsupported:
113 except error.WdirUnsupported:
112 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
114 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
113 if cutfunc is None:
115 if cutfunc is None:
114 pfunc = plainpfunc
116 pfunc = plainpfunc
115 else:
117 else:
116 pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
118 pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
117 revs = revs.filter(lambda rev: not cutfunc(rev))
119 revs = revs.filter(lambda rev: not cutfunc(rev))
118 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
120 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
119
121
120 def revancestors(repo, revs, followfirst=False, startdepth=None,
122 def revancestors(repo, revs, followfirst=False, startdepth=None,
121 stopdepth=None, cutfunc=None):
123 stopdepth=None, cutfunc=None):
122 """Like revlog.ancestors(), but supports additional options, includes
124 """Like revlog.ancestors(), but supports additional options, includes
123 the given revs themselves, and returns a smartset
125 the given revs themselves, and returns a smartset
124
126
125 Scan ends at the stopdepth (exlusive) if specified. Revisions found
127 Scan ends at the stopdepth (exlusive) if specified. Revisions found
126 earlier than the startdepth are omitted.
128 earlier than the startdepth are omitted.
127
129
128 If cutfunc is provided, it will be used to cut the traversal of the DAG.
130 If cutfunc is provided, it will be used to cut the traversal of the DAG.
129 When cutfunc(X) returns True, the DAG traversal stops - revision X and
131 When cutfunc(X) returns True, the DAG traversal stops - revision X and
130 X's ancestors in the traversal path will be skipped. This could be an
132 X's ancestors in the traversal path will be skipped. This could be an
131 optimization sometimes.
133 optimization sometimes.
132
134
133 Note: if Y is an ancestor of X, cutfunc(X) returning True does not
135 Note: if Y is an ancestor of X, cutfunc(X) returning True does not
134 necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
136 necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
135 return True in this case. For example,
137 return True in this case. For example,
136
138
137 D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
139 D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
138 |\ # will include "A", because the path D -> C -> A was not cut.
140 |\ # will include "A", because the path D -> C -> A was not cut.
139 B C # If "B" gets cut, "A" might want to be cut too.
141 B C # If "B" gets cut, "A" might want to be cut too.
140 |/
142 |/
141 A
143 A
142 """
144 """
143 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth,
145 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth,
144 cutfunc)
146 cutfunc)
145 return generatorset(gen, iterasc=False)
147 return generatorset(gen, iterasc=False)
146
148
147 def _genrevdescendants(repo, revs, followfirst):
149 def _genrevdescendants(repo, revs, followfirst):
148 if followfirst:
150 if followfirst:
149 cut = 1
151 cut = 1
150 else:
152 else:
151 cut = None
153 cut = None
152
154
153 cl = repo.changelog
155 cl = repo.changelog
154 first = revs.min()
156 first = revs.min()
155 nullrev = node.nullrev
157 nullrev = node.nullrev
156 if first == nullrev:
158 if first == nullrev:
157 # Are there nodes with a null first parent and a non-null
159 # Are there nodes with a null first parent and a non-null
158 # second one? Maybe. Do we care? Probably not.
160 # second one? Maybe. Do we care? Probably not.
159 yield first
161 yield first
160 for i in cl:
162 for i in cl:
161 yield i
163 yield i
162 else:
164 else:
163 seen = set(revs)
165 seen = set(revs)
164 for i in cl.revs(first):
166 for i in cl.revs(first):
165 if i in seen:
167 if i in seen:
166 yield i
168 yield i
167 continue
169 continue
168 for x in cl.parentrevs(i)[:cut]:
170 for x in cl.parentrevs(i)[:cut]:
169 if x != nullrev and x in seen:
171 if x != nullrev and x in seen:
170 seen.add(i)
172 seen.add(i)
171 yield i
173 yield i
172 break
174 break
173
175
174 def _builddescendantsmap(repo, startrev, followfirst):
176 def _builddescendantsmap(repo, startrev, followfirst):
175 """Build map of 'rev -> child revs', offset from startrev"""
177 """Build map of 'rev -> child revs', offset from startrev"""
176 cl = repo.changelog
178 cl = repo.changelog
177 nullrev = node.nullrev
179 nullrev = node.nullrev
178 descmap = [[] for _rev in xrange(startrev, len(cl))]
180 descmap = [[] for _rev in xrange(startrev, len(cl))]
179 for currev in cl.revs(startrev + 1):
181 for currev in cl.revs(startrev + 1):
180 p1rev, p2rev = cl.parentrevs(currev)
182 p1rev, p2rev = cl.parentrevs(currev)
181 if p1rev >= startrev:
183 if p1rev >= startrev:
182 descmap[p1rev - startrev].append(currev)
184 descmap[p1rev - startrev].append(currev)
183 if not followfirst and p2rev != nullrev and p2rev >= startrev:
185 if not followfirst and p2rev != nullrev and p2rev >= startrev:
184 descmap[p2rev - startrev].append(currev)
186 descmap[p2rev - startrev].append(currev)
185 return descmap
187 return descmap
186
188
187 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
189 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
188 startrev = revs.min()
190 startrev = revs.min()
189 descmap = _builddescendantsmap(repo, startrev, followfirst)
191 descmap = _builddescendantsmap(repo, startrev, followfirst)
190 def pfunc(rev):
192 def pfunc(rev):
191 return descmap[rev - startrev]
193 return descmap[rev - startrev]
192 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
194 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
193
195
194 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
196 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
195 """Like revlog.descendants() but supports additional options, includes
197 """Like revlog.descendants() but supports additional options, includes
196 the given revs themselves, and returns a smartset
198 the given revs themselves, and returns a smartset
197
199
198 Scan ends at the stopdepth (exlusive) if specified. Revisions found
200 Scan ends at the stopdepth (exlusive) if specified. Revisions found
199 earlier than the startdepth are omitted.
201 earlier than the startdepth are omitted.
200 """
202 """
201 if startdepth is None and stopdepth is None:
203 if startdepth is None and stopdepth is None:
202 gen = _genrevdescendants(repo, revs, followfirst)
204 gen = _genrevdescendants(repo, revs, followfirst)
203 else:
205 else:
204 gen = _genrevdescendantsofdepth(repo, revs, followfirst,
206 gen = _genrevdescendantsofdepth(repo, revs, followfirst,
205 startdepth, stopdepth)
207 startdepth, stopdepth)
206 return generatorset(gen, iterasc=True)
208 return generatorset(gen, iterasc=True)
207
209
208 def _reachablerootspure(repo, minroot, roots, heads, includepath):
210 def _reachablerootspure(repo, minroot, roots, heads, includepath):
209 """return (heads(::<roots> and ::<heads>))
211 """return (heads(::<roots> and ::<heads>))
210
212
211 If includepath is True, return (<roots>::<heads>)."""
213 If includepath is True, return (<roots>::<heads>)."""
212 if not roots:
214 if not roots:
213 return []
215 return []
214 parentrevs = repo.changelog.parentrevs
216 parentrevs = repo.changelog.parentrevs
215 roots = set(roots)
217 roots = set(roots)
216 visit = list(heads)
218 visit = list(heads)
217 reachable = set()
219 reachable = set()
218 seen = {}
220 seen = {}
219 # prefetch all the things! (because python is slow)
221 # prefetch all the things! (because python is slow)
220 reached = reachable.add
222 reached = reachable.add
221 dovisit = visit.append
223 dovisit = visit.append
222 nextvisit = visit.pop
224 nextvisit = visit.pop
223 # open-code the post-order traversal due to the tiny size of
225 # open-code the post-order traversal due to the tiny size of
224 # sys.getrecursionlimit()
226 # sys.getrecursionlimit()
225 while visit:
227 while visit:
226 rev = nextvisit()
228 rev = nextvisit()
227 if rev in roots:
229 if rev in roots:
228 reached(rev)
230 reached(rev)
229 if not includepath:
231 if not includepath:
230 continue
232 continue
231 parents = parentrevs(rev)
233 parents = parentrevs(rev)
232 seen[rev] = parents
234 seen[rev] = parents
233 for parent in parents:
235 for parent in parents:
234 if parent >= minroot and parent not in seen:
236 if parent >= minroot and parent not in seen:
235 dovisit(parent)
237 dovisit(parent)
236 if not reachable:
238 if not reachable:
237 return baseset()
239 return baseset()
238 if not includepath:
240 if not includepath:
239 return reachable
241 return reachable
240 for rev in sorted(seen):
242 for rev in sorted(seen):
241 for parent in seen[rev]:
243 for parent in seen[rev]:
242 if parent in reachable:
244 if parent in reachable:
243 reached(rev)
245 reached(rev)
244 return reachable
246 return reachable
245
247
246 def reachableroots(repo, roots, heads, includepath=False):
248 def reachableroots(repo, roots, heads, includepath=False):
247 """return (heads(::<roots> and ::<heads>))
249 """return (heads(::<roots> and ::<heads>))
248
250
249 If includepath is True, return (<roots>::<heads>)."""
251 If includepath is True, return (<roots>::<heads>)."""
250 if not roots:
252 if not roots:
251 return baseset()
253 return baseset()
252 minroot = roots.min()
254 minroot = roots.min()
253 roots = list(roots)
255 roots = list(roots)
254 heads = list(heads)
256 heads = list(heads)
255 try:
257 try:
256 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
258 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
257 except AttributeError:
259 except AttributeError:
258 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
260 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
259 revs = baseset(revs)
261 revs = baseset(revs)
260 revs.sort()
262 revs.sort()
261 return revs
263 return revs
262
264
263 def _changesrange(fctx1, fctx2, linerange2, diffopts):
265 def _changesrange(fctx1, fctx2, linerange2, diffopts):
264 """Return `(diffinrange, linerange1)` where `diffinrange` is True
266 """Return `(diffinrange, linerange1)` where `diffinrange` is True
265 if diff from fctx2 to fctx1 has changes in linerange2 and
267 if diff from fctx2 to fctx1 has changes in linerange2 and
266 `linerange1` is the new line range for fctx1.
268 `linerange1` is the new line range for fctx1.
267 """
269 """
268 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
270 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
269 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
271 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
270 diffinrange = any(stype == '!' for _, stype in filteredblocks)
272 diffinrange = any(stype == '!' for _, stype in filteredblocks)
271 return diffinrange, linerange1
273 return diffinrange, linerange1
272
274
273 def blockancestors(fctx, fromline, toline, followfirst=False):
275 def blockancestors(fctx, fromline, toline, followfirst=False):
274 """Yield ancestors of `fctx` with respect to the block of lines within
276 """Yield ancestors of `fctx` with respect to the block of lines within
275 `fromline`-`toline` range.
277 `fromline`-`toline` range.
276 """
278 """
277 diffopts = patch.diffopts(fctx._repo.ui)
279 diffopts = patch.diffopts(fctx._repo.ui)
278 fctx = fctx.introfilectx()
280 fctx = fctx.introfilectx()
279 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
281 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
280 while visit:
282 while visit:
281 c, linerange2 = visit.pop(max(visit))
283 c, linerange2 = visit.pop(max(visit))
282 pl = c.parents()
284 pl = c.parents()
283 if followfirst:
285 if followfirst:
284 pl = pl[:1]
286 pl = pl[:1]
285 if not pl:
287 if not pl:
286 # The block originates from the initial revision.
288 # The block originates from the initial revision.
287 yield c, linerange2
289 yield c, linerange2
288 continue
290 continue
289 inrange = False
291 inrange = False
290 for p in pl:
292 for p in pl:
291 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
293 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
292 inrange = inrange or inrangep
294 inrange = inrange or inrangep
293 if linerange1[0] == linerange1[1]:
295 if linerange1[0] == linerange1[1]:
294 # Parent's linerange is empty, meaning that the block got
296 # Parent's linerange is empty, meaning that the block got
295 # introduced in this revision; no need to go futher in this
297 # introduced in this revision; no need to go futher in this
296 # branch.
298 # branch.
297 continue
299 continue
298 # Set _descendantrev with 'c' (a known descendant) so that, when
300 # Set _descendantrev with 'c' (a known descendant) so that, when
299 # _adjustlinkrev is called for 'p', it receives this descendant
301 # _adjustlinkrev is called for 'p', it receives this descendant
300 # (as srcrev) instead possibly topmost introrev.
302 # (as srcrev) instead possibly topmost introrev.
301 p._descendantrev = c.rev()
303 p._descendantrev = c.rev()
302 visit[p.linkrev(), p.filenode()] = p, linerange1
304 visit[p.linkrev(), p.filenode()] = p, linerange1
303 if inrange:
305 if inrange:
304 yield c, linerange2
306 yield c, linerange2
305
307
306 def blockdescendants(fctx, fromline, toline):
308 def blockdescendants(fctx, fromline, toline):
307 """Yield descendants of `fctx` with respect to the block of lines within
309 """Yield descendants of `fctx` with respect to the block of lines within
308 `fromline`-`toline` range.
310 `fromline`-`toline` range.
309 """
311 """
310 # First possibly yield 'fctx' if it has changes in range with respect to
312 # First possibly yield 'fctx' if it has changes in range with respect to
311 # its parents.
313 # its parents.
312 try:
314 try:
313 c, linerange1 = next(blockancestors(fctx, fromline, toline))
315 c, linerange1 = next(blockancestors(fctx, fromline, toline))
314 except StopIteration:
316 except StopIteration:
315 pass
317 pass
316 else:
318 else:
317 if c == fctx:
319 if c == fctx:
318 yield c, linerange1
320 yield c, linerange1
319
321
320 diffopts = patch.diffopts(fctx._repo.ui)
322 diffopts = patch.diffopts(fctx._repo.ui)
321 fl = fctx.filelog()
323 fl = fctx.filelog()
322 seen = {fctx.filerev(): (fctx, (fromline, toline))}
324 seen = {fctx.filerev(): (fctx, (fromline, toline))}
323 for i in fl.descendants([fctx.filerev()]):
325 for i in fl.descendants([fctx.filerev()]):
324 c = fctx.filectx(i)
326 c = fctx.filectx(i)
325 inrange = False
327 inrange = False
326 for x in fl.parentrevs(i):
328 for x in fl.parentrevs(i):
327 try:
329 try:
328 p, linerange2 = seen[x]
330 p, linerange2 = seen[x]
329 except KeyError:
331 except KeyError:
330 # nullrev or other branch
332 # nullrev or other branch
331 continue
333 continue
332 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
334 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
333 inrange = inrange or inrangep
335 inrange = inrange or inrangep
334 # If revision 'i' has been seen (it's a merge) and the line range
336 # If revision 'i' has been seen (it's a merge) and the line range
335 # previously computed differs from the one we just got, we take the
337 # previously computed differs from the one we just got, we take the
336 # surrounding interval. This is conservative but avoids loosing
338 # surrounding interval. This is conservative but avoids loosing
337 # information.
339 # information.
338 if i in seen and seen[i][1] != linerange1:
340 if i in seen and seen[i][1] != linerange1:
339 lbs, ubs = zip(linerange1, seen[i][1])
341 lbs, ubs = zip(linerange1, seen[i][1])
340 linerange1 = min(lbs), max(ubs)
342 linerange1 = min(lbs), max(ubs)
341 seen[i] = c, linerange1
343 seen[i] = c, linerange1
342 if inrange:
344 if inrange:
343 yield c, linerange1
345 yield c, linerange1
344
346
345 def toposort(revs, parentsfunc, firstbranch=()):
347 def toposort(revs, parentsfunc, firstbranch=()):
346 """Yield revisions from heads to roots one (topo) branch at a time.
348 """Yield revisions from heads to roots one (topo) branch at a time.
347
349
348 This function aims to be used by a graph generator that wishes to minimize
350 This function aims to be used by a graph generator that wishes to minimize
349 the number of parallel branches and their interleaving.
351 the number of parallel branches and their interleaving.
350
352
351 Example iteration order (numbers show the "true" order in a changelog):
353 Example iteration order (numbers show the "true" order in a changelog):
352
354
353 o 4
355 o 4
354 |
356 |
355 o 1
357 o 1
356 |
358 |
357 | o 3
359 | o 3
358 | |
360 | |
359 | o 2
361 | o 2
360 |/
362 |/
361 o 0
363 o 0
362
364
363 Note that the ancestors of merges are understood by the current
365 Note that the ancestors of merges are understood by the current
364 algorithm to be on the same branch. This means no reordering will
366 algorithm to be on the same branch. This means no reordering will
365 occur behind a merge.
367 occur behind a merge.
366 """
368 """
367
369
368 ### Quick summary of the algorithm
370 ### Quick summary of the algorithm
369 #
371 #
370 # This function is based around a "retention" principle. We keep revisions
372 # This function is based around a "retention" principle. We keep revisions
371 # in memory until we are ready to emit a whole branch that immediately
373 # in memory until we are ready to emit a whole branch that immediately
372 # "merges" into an existing one. This reduces the number of parallel
374 # "merges" into an existing one. This reduces the number of parallel
373 # branches with interleaved revisions.
375 # branches with interleaved revisions.
374 #
376 #
375 # During iteration revs are split into two groups:
377 # During iteration revs are split into two groups:
376 # A) revision already emitted
378 # A) revision already emitted
377 # B) revision in "retention". They are stored as different subgroups.
379 # B) revision in "retention". They are stored as different subgroups.
378 #
380 #
379 # for each REV, we do the following logic:
381 # for each REV, we do the following logic:
380 #
382 #
381 # 1) if REV is a parent of (A), we will emit it. If there is a
383 # 1) if REV is a parent of (A), we will emit it. If there is a
382 # retention group ((B) above) that is blocked on REV being
384 # retention group ((B) above) that is blocked on REV being
383 # available, we emit all the revisions out of that retention
385 # available, we emit all the revisions out of that retention
384 # group first.
386 # group first.
385 #
387 #
386 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
388 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
387 # available, if such subgroup exist, we add REV to it and the subgroup is
389 # available, if such subgroup exist, we add REV to it and the subgroup is
388 # now awaiting for REV.parents() to be available.
390 # now awaiting for REV.parents() to be available.
389 #
391 #
390 # 3) finally if no such group existed in (B), we create a new subgroup.
392 # 3) finally if no such group existed in (B), we create a new subgroup.
391 #
393 #
392 #
394 #
393 # To bootstrap the algorithm, we emit the tipmost revision (which
395 # To bootstrap the algorithm, we emit the tipmost revision (which
394 # puts it in group (A) from above).
396 # puts it in group (A) from above).
395
397
396 revs.sort(reverse=True)
398 revs.sort(reverse=True)
397
399
398 # Set of parents of revision that have been emitted. They can be considered
400 # Set of parents of revision that have been emitted. They can be considered
399 # unblocked as the graph generator is already aware of them so there is no
401 # unblocked as the graph generator is already aware of them so there is no
400 # need to delay the revisions that reference them.
402 # need to delay the revisions that reference them.
401 #
403 #
402 # If someone wants to prioritize a branch over the others, pre-filling this
404 # If someone wants to prioritize a branch over the others, pre-filling this
403 # set will force all other branches to wait until this branch is ready to be
405 # set will force all other branches to wait until this branch is ready to be
404 # emitted.
406 # emitted.
405 unblocked = set(firstbranch)
407 unblocked = set(firstbranch)
406
408
407 # list of groups waiting to be displayed, each group is defined by:
409 # list of groups waiting to be displayed, each group is defined by:
408 #
410 #
409 # (revs: lists of revs waiting to be displayed,
411 # (revs: lists of revs waiting to be displayed,
410 # blocked: set of that cannot be displayed before those in 'revs')
412 # blocked: set of that cannot be displayed before those in 'revs')
411 #
413 #
412 # The second value ('blocked') correspond to parents of any revision in the
414 # The second value ('blocked') correspond to parents of any revision in the
413 # group ('revs') that is not itself contained in the group. The main idea
415 # group ('revs') that is not itself contained in the group. The main idea
414 # of this algorithm is to delay as much as possible the emission of any
416 # of this algorithm is to delay as much as possible the emission of any
415 # revision. This means waiting for the moment we are about to display
417 # revision. This means waiting for the moment we are about to display
416 # these parents to display the revs in a group.
418 # these parents to display the revs in a group.
417 #
419 #
418 # This first implementation is smart until it encounters a merge: it will
420 # This first implementation is smart until it encounters a merge: it will
419 # emit revs as soon as any parent is about to be emitted and can grow an
421 # emit revs as soon as any parent is about to be emitted and can grow an
420 # arbitrary number of revs in 'blocked'. In practice this mean we properly
422 # arbitrary number of revs in 'blocked'. In practice this mean we properly
421 # retains new branches but gives up on any special ordering for ancestors
423 # retains new branches but gives up on any special ordering for ancestors
422 # of merges. The implementation can be improved to handle this better.
424 # of merges. The implementation can be improved to handle this better.
423 #
425 #
424 # The first subgroup is special. It corresponds to all the revision that
426 # The first subgroup is special. It corresponds to all the revision that
425 # were already emitted. The 'revs' lists is expected to be empty and the
427 # were already emitted. The 'revs' lists is expected to be empty and the
426 # 'blocked' set contains the parents revisions of already emitted revision.
428 # 'blocked' set contains the parents revisions of already emitted revision.
427 #
429 #
428 # You could pre-seed the <parents> set of groups[0] to a specific
430 # You could pre-seed the <parents> set of groups[0] to a specific
429 # changesets to select what the first emitted branch should be.
431 # changesets to select what the first emitted branch should be.
430 groups = [([], unblocked)]
432 groups = [([], unblocked)]
431 pendingheap = []
433 pendingheap = []
432 pendingset = set()
434 pendingset = set()
433
435
434 heapq.heapify(pendingheap)
436 heapq.heapify(pendingheap)
435 heappop = heapq.heappop
437 heappop = heapq.heappop
436 heappush = heapq.heappush
438 heappush = heapq.heappush
437 for currentrev in revs:
439 for currentrev in revs:
438 # Heap works with smallest element, we want highest so we invert
440 # Heap works with smallest element, we want highest so we invert
439 if currentrev not in pendingset:
441 if currentrev not in pendingset:
440 heappush(pendingheap, -currentrev)
442 heappush(pendingheap, -currentrev)
441 pendingset.add(currentrev)
443 pendingset.add(currentrev)
442 # iterates on pending rev until after the current rev have been
444 # iterates on pending rev until after the current rev have been
443 # processed.
445 # processed.
444 rev = None
446 rev = None
445 while rev != currentrev:
447 while rev != currentrev:
446 rev = -heappop(pendingheap)
448 rev = -heappop(pendingheap)
447 pendingset.remove(rev)
449 pendingset.remove(rev)
448
450
449 # Seek for a subgroup blocked, waiting for the current revision.
451 # Seek for a subgroup blocked, waiting for the current revision.
450 matching = [i for i, g in enumerate(groups) if rev in g[1]]
452 matching = [i for i, g in enumerate(groups) if rev in g[1]]
451
453
452 if matching:
454 if matching:
453 # The main idea is to gather together all sets that are blocked
455 # The main idea is to gather together all sets that are blocked
454 # on the same revision.
456 # on the same revision.
455 #
457 #
456 # Groups are merged when a common blocking ancestor is
458 # Groups are merged when a common blocking ancestor is
457 # observed. For example, given two groups:
459 # observed. For example, given two groups:
458 #
460 #
459 # revs [5, 4] waiting for 1
461 # revs [5, 4] waiting for 1
460 # revs [3, 2] waiting for 1
462 # revs [3, 2] waiting for 1
461 #
463 #
462 # These two groups will be merged when we process
464 # These two groups will be merged when we process
463 # 1. In theory, we could have merged the groups when
465 # 1. In theory, we could have merged the groups when
464 # we added 2 to the group it is now in (we could have
466 # we added 2 to the group it is now in (we could have
465 # noticed the groups were both blocked on 1 then), but
467 # noticed the groups were both blocked on 1 then), but
466 # the way it works now makes the algorithm simpler.
468 # the way it works now makes the algorithm simpler.
467 #
469 #
468 # We also always keep the oldest subgroup first. We can
470 # We also always keep the oldest subgroup first. We can
469 # probably improve the behavior by having the longest set
471 # probably improve the behavior by having the longest set
470 # first. That way, graph algorithms could minimise the length
472 # first. That way, graph algorithms could minimise the length
471 # of parallel lines their drawing. This is currently not done.
473 # of parallel lines their drawing. This is currently not done.
472 targetidx = matching.pop(0)
474 targetidx = matching.pop(0)
473 trevs, tparents = groups[targetidx]
475 trevs, tparents = groups[targetidx]
474 for i in matching:
476 for i in matching:
475 gr = groups[i]
477 gr = groups[i]
476 trevs.extend(gr[0])
478 trevs.extend(gr[0])
477 tparents |= gr[1]
479 tparents |= gr[1]
478 # delete all merged subgroups (except the one we kept)
480 # delete all merged subgroups (except the one we kept)
479 # (starting from the last subgroup for performance and
481 # (starting from the last subgroup for performance and
480 # sanity reasons)
482 # sanity reasons)
481 for i in reversed(matching):
483 for i in reversed(matching):
482 del groups[i]
484 del groups[i]
483 else:
485 else:
484 # This is a new head. We create a new subgroup for it.
486 # This is a new head. We create a new subgroup for it.
485 targetidx = len(groups)
487 targetidx = len(groups)
486 groups.append(([], {rev}))
488 groups.append(([], {rev}))
487
489
488 gr = groups[targetidx]
490 gr = groups[targetidx]
489
491
490 # We now add the current nodes to this subgroups. This is done
492 # We now add the current nodes to this subgroups. This is done
491 # after the subgroup merging because all elements from a subgroup
493 # after the subgroup merging because all elements from a subgroup
492 # that relied on this rev must precede it.
494 # that relied on this rev must precede it.
493 #
495 #
494 # we also update the <parents> set to include the parents of the
496 # we also update the <parents> set to include the parents of the
495 # new nodes.
497 # new nodes.
496 if rev == currentrev: # only display stuff in rev
498 if rev == currentrev: # only display stuff in rev
497 gr[0].append(rev)
499 gr[0].append(rev)
498 gr[1].remove(rev)
500 gr[1].remove(rev)
499 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
501 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
500 gr[1].update(parents)
502 gr[1].update(parents)
501 for p in parents:
503 for p in parents:
502 if p not in pendingset:
504 if p not in pendingset:
503 pendingset.add(p)
505 pendingset.add(p)
504 heappush(pendingheap, -p)
506 heappush(pendingheap, -p)
505
507
506 # Look for a subgroup to display
508 # Look for a subgroup to display
507 #
509 #
508 # When unblocked is empty (if clause), we were not waiting for any
510 # When unblocked is empty (if clause), we were not waiting for any
509 # revisions during the first iteration (if no priority was given) or
511 # revisions during the first iteration (if no priority was given) or
510 # if we emitted a whole disconnected set of the graph (reached a
512 # if we emitted a whole disconnected set of the graph (reached a
511 # root). In that case we arbitrarily take the oldest known
513 # root). In that case we arbitrarily take the oldest known
512 # subgroup. The heuristic could probably be better.
514 # subgroup. The heuristic could probably be better.
513 #
515 #
514 # Otherwise (elif clause) if the subgroup is blocked on
516 # Otherwise (elif clause) if the subgroup is blocked on
515 # a revision we just emitted, we can safely emit it as
517 # a revision we just emitted, we can safely emit it as
516 # well.
518 # well.
517 if not unblocked:
519 if not unblocked:
518 if len(groups) > 1: # display other subset
520 if len(groups) > 1: # display other subset
519 targetidx = 1
521 targetidx = 1
520 gr = groups[1]
522 gr = groups[1]
521 elif not gr[1] & unblocked:
523 elif not gr[1] & unblocked:
522 gr = None
524 gr = None
523
525
524 if gr is not None:
526 if gr is not None:
525 # update the set of awaited revisions with the one from the
527 # update the set of awaited revisions with the one from the
526 # subgroup
528 # subgroup
527 unblocked |= gr[1]
529 unblocked |= gr[1]
528 # output all revisions in the subgroup
530 # output all revisions in the subgroup
529 for r in gr[0]:
531 for r in gr[0]:
530 yield r
532 yield r
531 # delete the subgroup that you just output
533 # delete the subgroup that you just output
532 # unless it is groups[0] in which case you just empty it.
534 # unless it is groups[0] in which case you just empty it.
533 if targetidx:
535 if targetidx:
534 del groups[targetidx]
536 del groups[targetidx]
535 else:
537 else:
536 gr[0][:] = []
538 gr[0][:] = []
537 # Check if we have some subgroup waiting for revisions we are not going to
539 # Check if we have some subgroup waiting for revisions we are not going to
538 # iterate over
540 # iterate over
539 for g in groups:
541 for g in groups:
540 for r in g[0]:
542 for r in g[0]:
541 yield r
543 yield r
@@ -1,2224 +1,2222 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 dagop,
14 dagop,
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 obsutil,
22 obsutil,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 registrar,
25 registrar,
26 repoview,
26 repoview,
27 revsetlang,
27 revsetlang,
28 scmutil,
28 scmutil,
29 smartset,
29 smartset,
30 util,
30 util,
31 )
31 )
32
32
33 # helpers for processing parsed tree
33 # helpers for processing parsed tree
34 getsymbol = revsetlang.getsymbol
34 getsymbol = revsetlang.getsymbol
35 getstring = revsetlang.getstring
35 getstring = revsetlang.getstring
36 getinteger = revsetlang.getinteger
36 getinteger = revsetlang.getinteger
37 getboolean = revsetlang.getboolean
37 getboolean = revsetlang.getboolean
38 getlist = revsetlang.getlist
38 getlist = revsetlang.getlist
39 getrange = revsetlang.getrange
39 getrange = revsetlang.getrange
40 getargs = revsetlang.getargs
40 getargs = revsetlang.getargs
41 getargsdict = revsetlang.getargsdict
41 getargsdict = revsetlang.getargsdict
42
42
43 baseset = smartset.baseset
43 baseset = smartset.baseset
44 generatorset = smartset.generatorset
44 generatorset = smartset.generatorset
45 spanset = smartset.spanset
45 spanset = smartset.spanset
46 fullreposet = smartset.fullreposet
46 fullreposet = smartset.fullreposet
47
47
48 # Constants for ordering requirement, used in getset():
48 # Constants for ordering requirement, used in getset():
49 #
49 #
50 # If 'define', any nested functions and operations MAY change the ordering of
50 # If 'define', any nested functions and operations MAY change the ordering of
51 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
51 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
52 # it). If 'follow', any nested functions and operations MUST take the ordering
52 # it). If 'follow', any nested functions and operations MUST take the ordering
53 # specified by the first operand to the '&' operator.
53 # specified by the first operand to the '&' operator.
54 #
54 #
55 # For instance,
55 # For instance,
56 #
56 #
57 # X & (Y | Z)
57 # X & (Y | Z)
58 # ^ ^^^^^^^
58 # ^ ^^^^^^^
59 # | follow
59 # | follow
60 # define
60 # define
61 #
61 #
62 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
62 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
63 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
63 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
64 #
64 #
65 # 'any' means the order doesn't matter. For instance,
65 # 'any' means the order doesn't matter. For instance,
66 #
66 #
67 # (X & !Y) | ancestors(Z)
67 # (X & !Y) | ancestors(Z)
68 # ^ ^
68 # ^ ^
69 # any any
69 # any any
70 #
70 #
71 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
71 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
72 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
72 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
73 # since 'ancestors' does not care about the order of its argument.
73 # since 'ancestors' does not care about the order of its argument.
74 #
74 #
75 # Currently, most revsets do not care about the order, so 'define' is
75 # Currently, most revsets do not care about the order, so 'define' is
76 # equivalent to 'follow' for them, and the resulting order is based on the
76 # equivalent to 'follow' for them, and the resulting order is based on the
77 # 'subset' parameter passed down to them:
77 # 'subset' parameter passed down to them:
78 #
78 #
79 # m = revset.match(...)
79 # m = revset.match(...)
80 # m(repo, subset, order=defineorder)
80 # m(repo, subset, order=defineorder)
81 # ^^^^^^
81 # ^^^^^^
82 # For most revsets, 'define' means using the order this subset provides
82 # For most revsets, 'define' means using the order this subset provides
83 #
83 #
84 # There are a few revsets that always redefine the order if 'define' is
84 # There are a few revsets that always redefine the order if 'define' is
85 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
85 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
86 anyorder = 'any' # don't care the order, could be even random-shuffled
86 anyorder = 'any' # don't care the order, could be even random-shuffled
87 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
87 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
88 followorder = 'follow' # MUST follow the current order
88 followorder = 'follow' # MUST follow the current order
89
89
90 # helpers
90 # helpers
91
91
92 def getset(repo, subset, x, order=defineorder):
92 def getset(repo, subset, x, order=defineorder):
93 if not x:
93 if not x:
94 raise error.ParseError(_("missing argument"))
94 raise error.ParseError(_("missing argument"))
95 return methods[x[0]](repo, subset, *x[1:], order=order)
95 return methods[x[0]](repo, subset, *x[1:], order=order)
96
96
97 def _getrevsource(repo, r):
97 def _getrevsource(repo, r):
98 extra = repo[r].extra()
98 extra = repo[r].extra()
99 for label in ('source', 'transplant_source', 'rebase_source'):
99 for label in ('source', 'transplant_source', 'rebase_source'):
100 if label in extra:
100 if label in extra:
101 try:
101 try:
102 return repo[extra[label]].rev()
102 return repo[extra[label]].rev()
103 except error.RepoLookupError:
103 except error.RepoLookupError:
104 pass
104 pass
105 return None
105 return None
106
106
107 # operator methods
107 # operator methods
108
108
109 def stringset(repo, subset, x, order):
109 def stringset(repo, subset, x, order):
110 x = scmutil.intrev(repo[x])
110 x = scmutil.intrev(repo[x])
111 if (x in subset
111 if (x in subset
112 or x == node.nullrev and isinstance(subset, fullreposet)):
112 or x == node.nullrev and isinstance(subset, fullreposet)):
113 return baseset([x])
113 return baseset([x])
114 return baseset()
114 return baseset()
115
115
116 def rangeset(repo, subset, x, y, order):
116 def rangeset(repo, subset, x, y, order):
117 m = getset(repo, fullreposet(repo), x)
117 m = getset(repo, fullreposet(repo), x)
118 n = getset(repo, fullreposet(repo), y)
118 n = getset(repo, fullreposet(repo), y)
119
119
120 if not m or not n:
120 if not m or not n:
121 return baseset()
121 return baseset()
122 return _makerangeset(repo, subset, m.first(), n.last(), order)
122 return _makerangeset(repo, subset, m.first(), n.last(), order)
123
123
124 def rangeall(repo, subset, x, order):
124 def rangeall(repo, subset, x, order):
125 assert x is None
125 assert x is None
126 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
126 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
127
127
128 def rangepre(repo, subset, y, order):
128 def rangepre(repo, subset, y, order):
129 # ':y' can't be rewritten to '0:y' since '0' may be hidden
129 # ':y' can't be rewritten to '0:y' since '0' may be hidden
130 n = getset(repo, fullreposet(repo), y)
130 n = getset(repo, fullreposet(repo), y)
131 if not n:
131 if not n:
132 return baseset()
132 return baseset()
133 return _makerangeset(repo, subset, 0, n.last(), order)
133 return _makerangeset(repo, subset, 0, n.last(), order)
134
134
135 def rangepost(repo, subset, x, order):
135 def rangepost(repo, subset, x, order):
136 m = getset(repo, fullreposet(repo), x)
136 m = getset(repo, fullreposet(repo), x)
137 if not m:
137 if not m:
138 return baseset()
138 return baseset()
139 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
139 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
140
140
141 def _makerangeset(repo, subset, m, n, order):
141 def _makerangeset(repo, subset, m, n, order):
142 if m == n:
142 if m == n:
143 r = baseset([m])
143 r = baseset([m])
144 elif n == node.wdirrev:
144 elif n == node.wdirrev:
145 r = spanset(repo, m, len(repo)) + baseset([n])
145 r = spanset(repo, m, len(repo)) + baseset([n])
146 elif m == node.wdirrev:
146 elif m == node.wdirrev:
147 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
147 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
148 elif m < n:
148 elif m < n:
149 r = spanset(repo, m, n + 1)
149 r = spanset(repo, m, n + 1)
150 else:
150 else:
151 r = spanset(repo, m, n - 1)
151 r = spanset(repo, m, n - 1)
152
152
153 if order == defineorder:
153 if order == defineorder:
154 return r & subset
154 return r & subset
155 else:
155 else:
156 # carrying the sorting over when possible would be more efficient
156 # carrying the sorting over when possible would be more efficient
157 return subset & r
157 return subset & r
158
158
159 def dagrange(repo, subset, x, y, order):
159 def dagrange(repo, subset, x, y, order):
160 r = fullreposet(repo)
160 r = fullreposet(repo)
161 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
161 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
162 includepath=True)
162 includepath=True)
163 return subset & xs
163 return subset & xs
164
164
165 def andset(repo, subset, x, y, order):
165 def andset(repo, subset, x, y, order):
166 if order == anyorder:
166 if order == anyorder:
167 yorder = anyorder
167 yorder = anyorder
168 else:
168 else:
169 yorder = followorder
169 yorder = followorder
170 return getset(repo, getset(repo, subset, x, order), y, yorder)
170 return getset(repo, getset(repo, subset, x, order), y, yorder)
171
171
172 def andsmallyset(repo, subset, x, y, order):
172 def andsmallyset(repo, subset, x, y, order):
173 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
173 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
174 if order == anyorder:
174 if order == anyorder:
175 yorder = anyorder
175 yorder = anyorder
176 else:
176 else:
177 yorder = followorder
177 yorder = followorder
178 return getset(repo, getset(repo, subset, y, yorder), x, order)
178 return getset(repo, getset(repo, subset, y, yorder), x, order)
179
179
180 def differenceset(repo, subset, x, y, order):
180 def differenceset(repo, subset, x, y, order):
181 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
181 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
182
182
183 def _orsetlist(repo, subset, xs, order):
183 def _orsetlist(repo, subset, xs, order):
184 assert xs
184 assert xs
185 if len(xs) == 1:
185 if len(xs) == 1:
186 return getset(repo, subset, xs[0], order)
186 return getset(repo, subset, xs[0], order)
187 p = len(xs) // 2
187 p = len(xs) // 2
188 a = _orsetlist(repo, subset, xs[:p], order)
188 a = _orsetlist(repo, subset, xs[:p], order)
189 b = _orsetlist(repo, subset, xs[p:], order)
189 b = _orsetlist(repo, subset, xs[p:], order)
190 return a + b
190 return a + b
191
191
192 def orset(repo, subset, x, order):
192 def orset(repo, subset, x, order):
193 xs = getlist(x)
193 xs = getlist(x)
194 if order == followorder:
194 if order == followorder:
195 # slow path to take the subset order
195 # slow path to take the subset order
196 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
196 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
197 else:
197 else:
198 return _orsetlist(repo, subset, xs, order)
198 return _orsetlist(repo, subset, xs, order)
199
199
200 def notset(repo, subset, x, order):
200 def notset(repo, subset, x, order):
201 return subset - getset(repo, subset, x, anyorder)
201 return subset - getset(repo, subset, x, anyorder)
202
202
203 def relationset(repo, subset, x, y, order):
203 def relationset(repo, subset, x, y, order):
204 raise error.ParseError(_("can't use a relation in this context"))
204 raise error.ParseError(_("can't use a relation in this context"))
205
205
206 def relsubscriptset(repo, subset, x, y, z, order):
206 def relsubscriptset(repo, subset, x, y, z, order):
207 # this is pretty basic implementation of 'x#y[z]' operator, still
207 # this is pretty basic implementation of 'x#y[z]' operator, still
208 # experimental so undocumented. see the wiki for further ideas.
208 # experimental so undocumented. see the wiki for further ideas.
209 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
209 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
210 rel = getsymbol(y)
210 rel = getsymbol(y)
211 n = getinteger(z, _("relation subscript must be an integer"))
211 n = getinteger(z, _("relation subscript must be an integer"))
212
212
213 # TODO: perhaps this should be a table of relation functions
213 # TODO: perhaps this should be a table of relation functions
214 if rel in ('g', 'generations'):
214 if rel in ('g', 'generations'):
215 # TODO: support range, rewrite tests, and drop startdepth argument
215 # TODO: support range, rewrite tests, and drop startdepth argument
216 # from ancestors() and descendants() predicates
216 # from ancestors() and descendants() predicates
217 if n <= 0:
217 if n <= 0:
218 n = -n
218 n = -n
219 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
219 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
220 else:
220 else:
221 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
221 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
222
222
223 raise error.UnknownIdentifier(rel, ['generations'])
223 raise error.UnknownIdentifier(rel, ['generations'])
224
224
225 def subscriptset(repo, subset, x, y, order):
225 def subscriptset(repo, subset, x, y, order):
226 raise error.ParseError(_("can't use a subscript in this context"))
226 raise error.ParseError(_("can't use a subscript in this context"))
227
227
228 def listset(repo, subset, *xs, **opts):
228 def listset(repo, subset, *xs, **opts):
229 raise error.ParseError(_("can't use a list in this context"),
229 raise error.ParseError(_("can't use a list in this context"),
230 hint=_('see hg help "revsets.x or y"'))
230 hint=_('see hg help "revsets.x or y"'))
231
231
232 def keyvaluepair(repo, subset, k, v, order):
232 def keyvaluepair(repo, subset, k, v, order):
233 raise error.ParseError(_("can't use a key-value pair in this context"))
233 raise error.ParseError(_("can't use a key-value pair in this context"))
234
234
235 def func(repo, subset, a, b, order):
235 def func(repo, subset, a, b, order):
236 f = getsymbol(a)
236 f = getsymbol(a)
237 if f in symbols:
237 if f in symbols:
238 func = symbols[f]
238 func = symbols[f]
239 if getattr(func, '_takeorder', False):
239 if getattr(func, '_takeorder', False):
240 return func(repo, subset, b, order)
240 return func(repo, subset, b, order)
241 return func(repo, subset, b)
241 return func(repo, subset, b)
242
242
243 keep = lambda fn: getattr(fn, '__doc__', None) is not None
243 keep = lambda fn: getattr(fn, '__doc__', None) is not None
244
244
245 syms = [s for (s, fn) in symbols.items() if keep(fn)]
245 syms = [s for (s, fn) in symbols.items() if keep(fn)]
246 raise error.UnknownIdentifier(f, syms)
246 raise error.UnknownIdentifier(f, syms)
247
247
248 # functions
248 # functions
249
249
250 # symbols are callables like:
250 # symbols are callables like:
251 # fn(repo, subset, x)
251 # fn(repo, subset, x)
252 # with:
252 # with:
253 # repo - current repository instance
253 # repo - current repository instance
254 # subset - of revisions to be examined
254 # subset - of revisions to be examined
255 # x - argument in tree form
255 # x - argument in tree form
256 symbols = revsetlang.symbols
256 symbols = revsetlang.symbols
257
257
258 # symbols which can't be used for a DoS attack for any given input
258 # symbols which can't be used for a DoS attack for any given input
259 # (e.g. those which accept regexes as plain strings shouldn't be included)
259 # (e.g. those which accept regexes as plain strings shouldn't be included)
260 # functions that just return a lot of changesets (like all) don't count here
260 # functions that just return a lot of changesets (like all) don't count here
261 safesymbols = set()
261 safesymbols = set()
262
262
263 predicate = registrar.revsetpredicate()
263 predicate = registrar.revsetpredicate()
264
264
265 @predicate('_destupdate')
265 @predicate('_destupdate')
266 def _destupdate(repo, subset, x):
266 def _destupdate(repo, subset, x):
267 # experimental revset for update destination
267 # experimental revset for update destination
268 args = getargsdict(x, 'limit', 'clean')
268 args = getargsdict(x, 'limit', 'clean')
269 return subset & baseset([destutil.destupdate(repo, **args)[0]])
269 return subset & baseset([destutil.destupdate(repo, **args)[0]])
270
270
271 @predicate('_destmerge')
271 @predicate('_destmerge')
272 def _destmerge(repo, subset, x):
272 def _destmerge(repo, subset, x):
273 # experimental revset for merge destination
273 # experimental revset for merge destination
274 sourceset = None
274 sourceset = None
275 if x is not None:
275 if x is not None:
276 sourceset = getset(repo, fullreposet(repo), x)
276 sourceset = getset(repo, fullreposet(repo), x)
277 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
277 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
278
278
279 @predicate('adds(pattern)', safe=True, weight=30)
279 @predicate('adds(pattern)', safe=True, weight=30)
280 def adds(repo, subset, x):
280 def adds(repo, subset, x):
281 """Changesets that add a file matching pattern.
281 """Changesets that add a file matching pattern.
282
282
283 The pattern without explicit kind like ``glob:`` is expected to be
283 The pattern without explicit kind like ``glob:`` is expected to be
284 relative to the current directory and match against a file or a
284 relative to the current directory and match against a file or a
285 directory.
285 directory.
286 """
286 """
287 # i18n: "adds" is a keyword
287 # i18n: "adds" is a keyword
288 pat = getstring(x, _("adds requires a pattern"))
288 pat = getstring(x, _("adds requires a pattern"))
289 return checkstatus(repo, subset, pat, 1)
289 return checkstatus(repo, subset, pat, 1)
290
290
291 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
291 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
292 def ancestor(repo, subset, x):
292 def ancestor(repo, subset, x):
293 """A greatest common ancestor of the changesets.
293 """A greatest common ancestor of the changesets.
294
294
295 Accepts 0 or more changesets.
295 Accepts 0 or more changesets.
296 Will return empty list when passed no args.
296 Will return empty list when passed no args.
297 Greatest common ancestor of a single changeset is that changeset.
297 Greatest common ancestor of a single changeset is that changeset.
298 """
298 """
299 # i18n: "ancestor" is a keyword
299 # i18n: "ancestor" is a keyword
300 l = getlist(x)
300 l = getlist(x)
301 rl = fullreposet(repo)
301 rl = fullreposet(repo)
302 anc = None
302 anc = None
303
303
304 # (getset(repo, rl, i) for i in l) generates a list of lists
304 # (getset(repo, rl, i) for i in l) generates a list of lists
305 for revs in (getset(repo, rl, i) for i in l):
305 for revs in (getset(repo, rl, i) for i in l):
306 for r in revs:
306 for r in revs:
307 if anc is None:
307 if anc is None:
308 anc = repo[r]
308 anc = repo[r]
309 else:
309 else:
310 anc = anc.ancestor(repo[r])
310 anc = anc.ancestor(repo[r])
311
311
312 if anc is not None and anc.rev() in subset:
312 if anc is not None and anc.rev() in subset:
313 return baseset([anc.rev()])
313 return baseset([anc.rev()])
314 return baseset()
314 return baseset()
315
315
316 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
316 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
317 stopdepth=None):
317 stopdepth=None):
318 heads = getset(repo, fullreposet(repo), x)
318 heads = getset(repo, fullreposet(repo), x)
319 if not heads:
319 if not heads:
320 return baseset()
320 return baseset()
321 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
321 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
322 return subset & s
322 return subset & s
323
323
324 @predicate('ancestors(set[, depth])', safe=True)
324 @predicate('ancestors(set[, depth])', safe=True)
325 def ancestors(repo, subset, x):
325 def ancestors(repo, subset, x):
326 """Changesets that are ancestors of changesets in set, including the
326 """Changesets that are ancestors of changesets in set, including the
327 given changesets themselves.
327 given changesets themselves.
328
328
329 If depth is specified, the result only includes changesets up to
329 If depth is specified, the result only includes changesets up to
330 the specified generation.
330 the specified generation.
331 """
331 """
332 # startdepth is for internal use only until we can decide the UI
332 # startdepth is for internal use only until we can decide the UI
333 args = getargsdict(x, 'ancestors', 'set depth startdepth')
333 args = getargsdict(x, 'ancestors', 'set depth startdepth')
334 if 'set' not in args:
334 if 'set' not in args:
335 # i18n: "ancestors" is a keyword
335 # i18n: "ancestors" is a keyword
336 raise error.ParseError(_('ancestors takes at least 1 argument'))
336 raise error.ParseError(_('ancestors takes at least 1 argument'))
337 startdepth = stopdepth = None
337 startdepth = stopdepth = None
338 if 'startdepth' in args:
338 if 'startdepth' in args:
339 n = getinteger(args['startdepth'],
339 n = getinteger(args['startdepth'],
340 "ancestors expects an integer startdepth")
340 "ancestors expects an integer startdepth")
341 if n < 0:
341 if n < 0:
342 raise error.ParseError("negative startdepth")
342 raise error.ParseError("negative startdepth")
343 startdepth = n
343 startdepth = n
344 if 'depth' in args:
344 if 'depth' in args:
345 # i18n: "ancestors" is a keyword
345 # i18n: "ancestors" is a keyword
346 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
346 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
347 if n < 0:
347 if n < 0:
348 raise error.ParseError(_("negative depth"))
348 raise error.ParseError(_("negative depth"))
349 stopdepth = n + 1
349 stopdepth = n + 1
350 return _ancestors(repo, subset, args['set'],
350 return _ancestors(repo, subset, args['set'],
351 startdepth=startdepth, stopdepth=stopdepth)
351 startdepth=startdepth, stopdepth=stopdepth)
352
352
353 @predicate('_firstancestors', safe=True)
353 @predicate('_firstancestors', safe=True)
354 def _firstancestors(repo, subset, x):
354 def _firstancestors(repo, subset, x):
355 # ``_firstancestors(set)``
355 # ``_firstancestors(set)``
356 # Like ``ancestors(set)`` but follows only the first parents.
356 # Like ``ancestors(set)`` but follows only the first parents.
357 return _ancestors(repo, subset, x, followfirst=True)
357 return _ancestors(repo, subset, x, followfirst=True)
358
358
359 def _childrenspec(repo, subset, x, n, order):
359 def _childrenspec(repo, subset, x, n, order):
360 """Changesets that are the Nth child of a changeset
360 """Changesets that are the Nth child of a changeset
361 in set.
361 in set.
362 """
362 """
363 cs = set()
363 cs = set()
364 for r in getset(repo, fullreposet(repo), x):
364 for r in getset(repo, fullreposet(repo), x):
365 for i in range(n):
365 for i in range(n):
366 c = repo[r].children()
366 c = repo[r].children()
367 if len(c) == 0:
367 if len(c) == 0:
368 break
368 break
369 if len(c) > 1:
369 if len(c) > 1:
370 raise error.RepoLookupError(
370 raise error.RepoLookupError(
371 _("revision in set has more than one child"))
371 _("revision in set has more than one child"))
372 r = c[0].rev()
372 r = c[0].rev()
373 else:
373 else:
374 cs.add(r)
374 cs.add(r)
375 return subset & cs
375 return subset & cs
376
376
377 def ancestorspec(repo, subset, x, n, order):
377 def ancestorspec(repo, subset, x, n, order):
378 """``set~n``
378 """``set~n``
379 Changesets that are the Nth ancestor (first parents only) of a changeset
379 Changesets that are the Nth ancestor (first parents only) of a changeset
380 in set.
380 in set.
381 """
381 """
382 n = getinteger(n, _("~ expects a number"))
382 n = getinteger(n, _("~ expects a number"))
383 if n < 0:
383 if n < 0:
384 # children lookup
384 # children lookup
385 return _childrenspec(repo, subset, x, -n, order)
385 return _childrenspec(repo, subset, x, -n, order)
386 ps = set()
386 ps = set()
387 cl = repo.changelog
387 cl = repo.changelog
388 for r in getset(repo, fullreposet(repo), x):
388 for r in getset(repo, fullreposet(repo), x):
389 for i in range(n):
389 for i in range(n):
390 try:
390 try:
391 r = cl.parentrevs(r)[0]
391 r = cl.parentrevs(r)[0]
392 except error.WdirUnsupported:
392 except error.WdirUnsupported:
393 r = repo[r].parents()[0].rev()
393 r = repo[r].parents()[0].rev()
394 ps.add(r)
394 ps.add(r)
395 return subset & ps
395 return subset & ps
396
396
397 @predicate('author(string)', safe=True, weight=10)
397 @predicate('author(string)', safe=True, weight=10)
398 def author(repo, subset, x):
398 def author(repo, subset, x):
399 """Alias for ``user(string)``.
399 """Alias for ``user(string)``.
400 """
400 """
401 # i18n: "author" is a keyword
401 # i18n: "author" is a keyword
402 n = getstring(x, _("author requires a string"))
402 n = getstring(x, _("author requires a string"))
403 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
403 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
404 return subset.filter(lambda x: matcher(repo[x].user()),
404 return subset.filter(lambda x: matcher(repo[x].user()),
405 condrepr=('<user %r>', n))
405 condrepr=('<user %r>', n))
406
406
407 @predicate('bisect(string)', safe=True)
407 @predicate('bisect(string)', safe=True)
408 def bisect(repo, subset, x):
408 def bisect(repo, subset, x):
409 """Changesets marked in the specified bisect status:
409 """Changesets marked in the specified bisect status:
410
410
411 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
411 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
412 - ``goods``, ``bads`` : csets topologically good/bad
412 - ``goods``, ``bads`` : csets topologically good/bad
413 - ``range`` : csets taking part in the bisection
413 - ``range`` : csets taking part in the bisection
414 - ``pruned`` : csets that are goods, bads or skipped
414 - ``pruned`` : csets that are goods, bads or skipped
415 - ``untested`` : csets whose fate is yet unknown
415 - ``untested`` : csets whose fate is yet unknown
416 - ``ignored`` : csets ignored due to DAG topology
416 - ``ignored`` : csets ignored due to DAG topology
417 - ``current`` : the cset currently being bisected
417 - ``current`` : the cset currently being bisected
418 """
418 """
419 # i18n: "bisect" is a keyword
419 # i18n: "bisect" is a keyword
420 status = getstring(x, _("bisect requires a string")).lower()
420 status = getstring(x, _("bisect requires a string")).lower()
421 state = set(hbisect.get(repo, status))
421 state = set(hbisect.get(repo, status))
422 return subset & state
422 return subset & state
423
423
424 # Backward-compatibility
424 # Backward-compatibility
425 # - no help entry so that we do not advertise it any more
425 # - no help entry so that we do not advertise it any more
426 @predicate('bisected', safe=True)
426 @predicate('bisected', safe=True)
427 def bisected(repo, subset, x):
427 def bisected(repo, subset, x):
428 return bisect(repo, subset, x)
428 return bisect(repo, subset, x)
429
429
430 @predicate('bookmark([name])', safe=True)
430 @predicate('bookmark([name])', safe=True)
431 def bookmark(repo, subset, x):
431 def bookmark(repo, subset, x):
432 """The named bookmark or all bookmarks.
432 """The named bookmark or all bookmarks.
433
433
434 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
434 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
435 """
435 """
436 # i18n: "bookmark" is a keyword
436 # i18n: "bookmark" is a keyword
437 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
437 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
438 if args:
438 if args:
439 bm = getstring(args[0],
439 bm = getstring(args[0],
440 # i18n: "bookmark" is a keyword
440 # i18n: "bookmark" is a keyword
441 _('the argument to bookmark must be a string'))
441 _('the argument to bookmark must be a string'))
442 kind, pattern, matcher = util.stringmatcher(bm)
442 kind, pattern, matcher = util.stringmatcher(bm)
443 bms = set()
443 bms = set()
444 if kind == 'literal':
444 if kind == 'literal':
445 bmrev = repo._bookmarks.get(pattern, None)
445 bmrev = repo._bookmarks.get(pattern, None)
446 if not bmrev:
446 if not bmrev:
447 raise error.RepoLookupError(_("bookmark '%s' does not exist")
447 raise error.RepoLookupError(_("bookmark '%s' does not exist")
448 % pattern)
448 % pattern)
449 bms.add(repo[bmrev].rev())
449 bms.add(repo[bmrev].rev())
450 else:
450 else:
451 matchrevs = set()
451 matchrevs = set()
452 for name, bmrev in repo._bookmarks.iteritems():
452 for name, bmrev in repo._bookmarks.iteritems():
453 if matcher(name):
453 if matcher(name):
454 matchrevs.add(bmrev)
454 matchrevs.add(bmrev)
455 if not matchrevs:
455 if not matchrevs:
456 raise error.RepoLookupError(_("no bookmarks exist"
456 raise error.RepoLookupError(_("no bookmarks exist"
457 " that match '%s'") % pattern)
457 " that match '%s'") % pattern)
458 for bmrev in matchrevs:
458 for bmrev in matchrevs:
459 bms.add(repo[bmrev].rev())
459 bms.add(repo[bmrev].rev())
460 else:
460 else:
461 bms = {repo[r].rev() for r in repo._bookmarks.values()}
461 bms = {repo[r].rev() for r in repo._bookmarks.values()}
462 bms -= {node.nullrev}
462 bms -= {node.nullrev}
463 return subset & bms
463 return subset & bms
464
464
465 @predicate('branch(string or set)', safe=True, weight=10)
465 @predicate('branch(string or set)', safe=True, weight=10)
466 def branch(repo, subset, x):
466 def branch(repo, subset, x):
467 """
467 """
468 All changesets belonging to the given branch or the branches of the given
468 All changesets belonging to the given branch or the branches of the given
469 changesets.
469 changesets.
470
470
471 Pattern matching is supported for `string`. See
471 Pattern matching is supported for `string`. See
472 :hg:`help revisions.patterns`.
472 :hg:`help revisions.patterns`.
473 """
473 """
474 getbi = repo.revbranchcache().branchinfo
474 getbi = repo.revbranchcache().branchinfo
475 def getbranch(r):
475 def getbranch(r):
476 try:
476 try:
477 return getbi(r)[0]
477 return getbi(r)[0]
478 except error.WdirUnsupported:
478 except error.WdirUnsupported:
479 return repo[r].branch()
479 return repo[r].branch()
480
480
481 try:
481 try:
482 b = getstring(x, '')
482 b = getstring(x, '')
483 except error.ParseError:
483 except error.ParseError:
484 # not a string, but another revspec, e.g. tip()
484 # not a string, but another revspec, e.g. tip()
485 pass
485 pass
486 else:
486 else:
487 kind, pattern, matcher = util.stringmatcher(b)
487 kind, pattern, matcher = util.stringmatcher(b)
488 if kind == 'literal':
488 if kind == 'literal':
489 # note: falls through to the revspec case if no branch with
489 # note: falls through to the revspec case if no branch with
490 # this name exists and pattern kind is not specified explicitly
490 # this name exists and pattern kind is not specified explicitly
491 if pattern in repo.branchmap():
491 if pattern in repo.branchmap():
492 return subset.filter(lambda r: matcher(getbranch(r)),
492 return subset.filter(lambda r: matcher(getbranch(r)),
493 condrepr=('<branch %r>', b))
493 condrepr=('<branch %r>', b))
494 if b.startswith('literal:'):
494 if b.startswith('literal:'):
495 raise error.RepoLookupError(_("branch '%s' does not exist")
495 raise error.RepoLookupError(_("branch '%s' does not exist")
496 % pattern)
496 % pattern)
497 else:
497 else:
498 return subset.filter(lambda r: matcher(getbranch(r)),
498 return subset.filter(lambda r: matcher(getbranch(r)),
499 condrepr=('<branch %r>', b))
499 condrepr=('<branch %r>', b))
500
500
501 s = getset(repo, fullreposet(repo), x)
501 s = getset(repo, fullreposet(repo), x)
502 b = set()
502 b = set()
503 for r in s:
503 for r in s:
504 b.add(getbranch(r))
504 b.add(getbranch(r))
505 c = s.__contains__
505 c = s.__contains__
506 return subset.filter(lambda r: c(r) or getbranch(r) in b,
506 return subset.filter(lambda r: c(r) or getbranch(r) in b,
507 condrepr=lambda: '<branch %r>' % sorted(b))
507 condrepr=lambda: '<branch %r>' % sorted(b))
508
508
509 @predicate('bumped()', safe=True)
509 @predicate('bumped()', safe=True)
510 def bumped(repo, subset, x):
510 def bumped(repo, subset, x):
511 msg = ("'bumped()' is deprecated, "
511 msg = ("'bumped()' is deprecated, "
512 "use 'phasedivergent()'")
512 "use 'phasedivergent()'")
513 repo.ui.deprecwarn(msg, '4.4')
513 repo.ui.deprecwarn(msg, '4.4')
514
514
515 return phasedivergent(repo, subset, x)
515 return phasedivergent(repo, subset, x)
516
516
517 @predicate('phasedivergent()', safe=True)
517 @predicate('phasedivergent()', safe=True)
518 def phasedivergent(repo, subset, x):
518 def phasedivergent(repo, subset, x):
519 """Mutable changesets marked as successors of public changesets.
519 """Mutable changesets marked as successors of public changesets.
520
520
521 Only non-public and non-obsolete changesets can be `phasedivergent`.
521 Only non-public and non-obsolete changesets can be `phasedivergent`.
522 (EXPERIMENTAL)
522 (EXPERIMENTAL)
523 """
523 """
524 # i18n: "phasedivergent" is a keyword
524 # i18n: "phasedivergent" is a keyword
525 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
525 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
526 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
526 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
527 return subset & phasedivergent
527 return subset & phasedivergent
528
528
529 @predicate('bundle()', safe=True)
529 @predicate('bundle()', safe=True)
530 def bundle(repo, subset, x):
530 def bundle(repo, subset, x):
531 """Changesets in the bundle.
531 """Changesets in the bundle.
532
532
533 Bundle must be specified by the -R option."""
533 Bundle must be specified by the -R option."""
534
534
535 try:
535 try:
536 bundlerevs = repo.changelog.bundlerevs
536 bundlerevs = repo.changelog.bundlerevs
537 except AttributeError:
537 except AttributeError:
538 raise error.Abort(_("no bundle provided - specify with -R"))
538 raise error.Abort(_("no bundle provided - specify with -R"))
539 return subset & bundlerevs
539 return subset & bundlerevs
540
540
541 def checkstatus(repo, subset, pat, field):
541 def checkstatus(repo, subset, pat, field):
542 hasset = matchmod.patkind(pat) == 'set'
542 hasset = matchmod.patkind(pat) == 'set'
543
543
544 mcache = [None]
544 mcache = [None]
545 def matches(x):
545 def matches(x):
546 c = repo[x]
546 c = repo[x]
547 if not mcache[0] or hasset:
547 if not mcache[0] or hasset:
548 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
548 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
549 m = mcache[0]
549 m = mcache[0]
550 fname = None
550 fname = None
551 if not m.anypats() and len(m.files()) == 1:
551 if not m.anypats() and len(m.files()) == 1:
552 fname = m.files()[0]
552 fname = m.files()[0]
553 if fname is not None:
553 if fname is not None:
554 if fname not in c.files():
554 if fname not in c.files():
555 return False
555 return False
556 else:
556 else:
557 for f in c.files():
557 for f in c.files():
558 if m(f):
558 if m(f):
559 break
559 break
560 else:
560 else:
561 return False
561 return False
562 files = repo.status(c.p1().node(), c.node())[field]
562 files = repo.status(c.p1().node(), c.node())[field]
563 if fname is not None:
563 if fname is not None:
564 if fname in files:
564 if fname in files:
565 return True
565 return True
566 else:
566 else:
567 for f in files:
567 for f in files:
568 if m(f):
568 if m(f):
569 return True
569 return True
570
570
571 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
571 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
572
572
573 def _children(repo, subset, parentset):
573 def _children(repo, subset, parentset):
574 if not parentset:
574 if not parentset:
575 return baseset()
575 return baseset()
576 cs = set()
576 cs = set()
577 pr = repo.changelog.parentrevs
577 pr = repo.changelog.parentrevs
578 minrev = parentset.min()
578 minrev = parentset.min()
579 nullrev = node.nullrev
579 nullrev = node.nullrev
580 for r in subset:
580 for r in subset:
581 if r <= minrev:
581 if r <= minrev:
582 continue
582 continue
583 p1, p2 = pr(r)
583 p1, p2 = pr(r)
584 if p1 in parentset:
584 if p1 in parentset:
585 cs.add(r)
585 cs.add(r)
586 if p2 != nullrev and p2 in parentset:
586 if p2 != nullrev and p2 in parentset:
587 cs.add(r)
587 cs.add(r)
588 return baseset(cs)
588 return baseset(cs)
589
589
590 @predicate('children(set)', safe=True)
590 @predicate('children(set)', safe=True)
591 def children(repo, subset, x):
591 def children(repo, subset, x):
592 """Child changesets of changesets in set.
592 """Child changesets of changesets in set.
593 """
593 """
594 s = getset(repo, fullreposet(repo), x)
594 s = getset(repo, fullreposet(repo), x)
595 cs = _children(repo, subset, s)
595 cs = _children(repo, subset, s)
596 return subset & cs
596 return subset & cs
597
597
598 @predicate('closed()', safe=True, weight=10)
598 @predicate('closed()', safe=True, weight=10)
599 def closed(repo, subset, x):
599 def closed(repo, subset, x):
600 """Changeset is closed.
600 """Changeset is closed.
601 """
601 """
602 # i18n: "closed" is a keyword
602 # i18n: "closed" is a keyword
603 getargs(x, 0, 0, _("closed takes no arguments"))
603 getargs(x, 0, 0, _("closed takes no arguments"))
604 return subset.filter(lambda r: repo[r].closesbranch(),
604 return subset.filter(lambda r: repo[r].closesbranch(),
605 condrepr='<branch closed>')
605 condrepr='<branch closed>')
606
606
607 @predicate('contains(pattern)', weight=100)
607 @predicate('contains(pattern)', weight=100)
608 def contains(repo, subset, x):
608 def contains(repo, subset, x):
609 """The revision's manifest contains a file matching pattern (but might not
609 """The revision's manifest contains a file matching pattern (but might not
610 modify it). See :hg:`help patterns` for information about file patterns.
610 modify it). See :hg:`help patterns` for information about file patterns.
611
611
612 The pattern without explicit kind like ``glob:`` is expected to be
612 The pattern without explicit kind like ``glob:`` is expected to be
613 relative to the current directory and match against a file exactly
613 relative to the current directory and match against a file exactly
614 for efficiency.
614 for efficiency.
615 """
615 """
616 # i18n: "contains" is a keyword
616 # i18n: "contains" is a keyword
617 pat = getstring(x, _("contains requires a pattern"))
617 pat = getstring(x, _("contains requires a pattern"))
618
618
619 def matches(x):
619 def matches(x):
620 if not matchmod.patkind(pat):
620 if not matchmod.patkind(pat):
621 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
621 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
622 if pats in repo[x]:
622 if pats in repo[x]:
623 return True
623 return True
624 else:
624 else:
625 c = repo[x]
625 c = repo[x]
626 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
626 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
627 for f in c.manifest():
627 for f in c.manifest():
628 if m(f):
628 if m(f):
629 return True
629 return True
630 return False
630 return False
631
631
632 return subset.filter(matches, condrepr=('<contains %r>', pat))
632 return subset.filter(matches, condrepr=('<contains %r>', pat))
633
633
634 @predicate('converted([id])', safe=True)
634 @predicate('converted([id])', safe=True)
635 def converted(repo, subset, x):
635 def converted(repo, subset, x):
636 """Changesets converted from the given identifier in the old repository if
636 """Changesets converted from the given identifier in the old repository if
637 present, or all converted changesets if no identifier is specified.
637 present, or all converted changesets if no identifier is specified.
638 """
638 """
639
639
640 # There is exactly no chance of resolving the revision, so do a simple
640 # There is exactly no chance of resolving the revision, so do a simple
641 # string compare and hope for the best
641 # string compare and hope for the best
642
642
643 rev = None
643 rev = None
644 # i18n: "converted" is a keyword
644 # i18n: "converted" is a keyword
645 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
645 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
646 if l:
646 if l:
647 # i18n: "converted" is a keyword
647 # i18n: "converted" is a keyword
648 rev = getstring(l[0], _('converted requires a revision'))
648 rev = getstring(l[0], _('converted requires a revision'))
649
649
650 def _matchvalue(r):
650 def _matchvalue(r):
651 source = repo[r].extra().get('convert_revision', None)
651 source = repo[r].extra().get('convert_revision', None)
652 return source is not None and (rev is None or source.startswith(rev))
652 return source is not None and (rev is None or source.startswith(rev))
653
653
654 return subset.filter(lambda r: _matchvalue(r),
654 return subset.filter(lambda r: _matchvalue(r),
655 condrepr=('<converted %r>', rev))
655 condrepr=('<converted %r>', rev))
656
656
657 @predicate('date(interval)', safe=True, weight=10)
657 @predicate('date(interval)', safe=True, weight=10)
658 def date(repo, subset, x):
658 def date(repo, subset, x):
659 """Changesets within the interval, see :hg:`help dates`.
659 """Changesets within the interval, see :hg:`help dates`.
660 """
660 """
661 # i18n: "date" is a keyword
661 # i18n: "date" is a keyword
662 ds = getstring(x, _("date requires a string"))
662 ds = getstring(x, _("date requires a string"))
663 dm = util.matchdate(ds)
663 dm = util.matchdate(ds)
664 return subset.filter(lambda x: dm(repo[x].date()[0]),
664 return subset.filter(lambda x: dm(repo[x].date()[0]),
665 condrepr=('<date %r>', ds))
665 condrepr=('<date %r>', ds))
666
666
667 @predicate('desc(string)', safe=True, weight=10)
667 @predicate('desc(string)', safe=True, weight=10)
668 def desc(repo, subset, x):
668 def desc(repo, subset, x):
669 """Search commit message for string. The match is case-insensitive.
669 """Search commit message for string. The match is case-insensitive.
670
670
671 Pattern matching is supported for `string`. See
671 Pattern matching is supported for `string`. See
672 :hg:`help revisions.patterns`.
672 :hg:`help revisions.patterns`.
673 """
673 """
674 # i18n: "desc" is a keyword
674 # i18n: "desc" is a keyword
675 ds = getstring(x, _("desc requires a string"))
675 ds = getstring(x, _("desc requires a string"))
676
676
677 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
677 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
678
678
679 return subset.filter(lambda r: matcher(repo[r].description()),
679 return subset.filter(lambda r: matcher(repo[r].description()),
680 condrepr=('<desc %r>', ds))
680 condrepr=('<desc %r>', ds))
681
681
682 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
682 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
683 stopdepth=None):
683 stopdepth=None):
684 roots = getset(repo, fullreposet(repo), x)
684 roots = getset(repo, fullreposet(repo), x)
685 if not roots:
685 if not roots:
686 return baseset()
686 return baseset()
687 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
687 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
688 return subset & s
688 return subset & s
689
689
690 @predicate('descendants(set[, depth])', safe=True)
690 @predicate('descendants(set[, depth])', safe=True)
691 def descendants(repo, subset, x):
691 def descendants(repo, subset, x):
692 """Changesets which are descendants of changesets in set, including the
692 """Changesets which are descendants of changesets in set, including the
693 given changesets themselves.
693 given changesets themselves.
694
694
695 If depth is specified, the result only includes changesets up to
695 If depth is specified, the result only includes changesets up to
696 the specified generation.
696 the specified generation.
697 """
697 """
698 # startdepth is for internal use only until we can decide the UI
698 # startdepth is for internal use only until we can decide the UI
699 args = getargsdict(x, 'descendants', 'set depth startdepth')
699 args = getargsdict(x, 'descendants', 'set depth startdepth')
700 if 'set' not in args:
700 if 'set' not in args:
701 # i18n: "descendants" is a keyword
701 # i18n: "descendants" is a keyword
702 raise error.ParseError(_('descendants takes at least 1 argument'))
702 raise error.ParseError(_('descendants takes at least 1 argument'))
703 startdepth = stopdepth = None
703 startdepth = stopdepth = None
704 if 'startdepth' in args:
704 if 'startdepth' in args:
705 n = getinteger(args['startdepth'],
705 n = getinteger(args['startdepth'],
706 "descendants expects an integer startdepth")
706 "descendants expects an integer startdepth")
707 if n < 0:
707 if n < 0:
708 raise error.ParseError("negative startdepth")
708 raise error.ParseError("negative startdepth")
709 startdepth = n
709 startdepth = n
710 if 'depth' in args:
710 if 'depth' in args:
711 # i18n: "descendants" is a keyword
711 # i18n: "descendants" is a keyword
712 n = getinteger(args['depth'], _("descendants expects an integer depth"))
712 n = getinteger(args['depth'], _("descendants expects an integer depth"))
713 if n < 0:
713 if n < 0:
714 raise error.ParseError(_("negative depth"))
714 raise error.ParseError(_("negative depth"))
715 stopdepth = n + 1
715 stopdepth = n + 1
716 return _descendants(repo, subset, args['set'],
716 return _descendants(repo, subset, args['set'],
717 startdepth=startdepth, stopdepth=stopdepth)
717 startdepth=startdepth, stopdepth=stopdepth)
718
718
719 @predicate('_firstdescendants', safe=True)
719 @predicate('_firstdescendants', safe=True)
720 def _firstdescendants(repo, subset, x):
720 def _firstdescendants(repo, subset, x):
721 # ``_firstdescendants(set)``
721 # ``_firstdescendants(set)``
722 # Like ``descendants(set)`` but follows only the first parents.
722 # Like ``descendants(set)`` but follows only the first parents.
723 return _descendants(repo, subset, x, followfirst=True)
723 return _descendants(repo, subset, x, followfirst=True)
724
724
725 @predicate('destination([set])', safe=True, weight=10)
725 @predicate('destination([set])', safe=True, weight=10)
726 def destination(repo, subset, x):
726 def destination(repo, subset, x):
727 """Changesets that were created by a graft, transplant or rebase operation,
727 """Changesets that were created by a graft, transplant or rebase operation,
728 with the given revisions specified as the source. Omitting the optional set
728 with the given revisions specified as the source. Omitting the optional set
729 is the same as passing all().
729 is the same as passing all().
730 """
730 """
731 if x is not None:
731 if x is not None:
732 sources = getset(repo, fullreposet(repo), x)
732 sources = getset(repo, fullreposet(repo), x)
733 else:
733 else:
734 sources = fullreposet(repo)
734 sources = fullreposet(repo)
735
735
736 dests = set()
736 dests = set()
737
737
738 # subset contains all of the possible destinations that can be returned, so
738 # subset contains all of the possible destinations that can be returned, so
739 # iterate over them and see if their source(s) were provided in the arg set.
739 # iterate over them and see if their source(s) were provided in the arg set.
740 # Even if the immediate src of r is not in the arg set, src's source (or
740 # Even if the immediate src of r is not in the arg set, src's source (or
741 # further back) may be. Scanning back further than the immediate src allows
741 # further back) may be. Scanning back further than the immediate src allows
742 # transitive transplants and rebases to yield the same results as transitive
742 # transitive transplants and rebases to yield the same results as transitive
743 # grafts.
743 # grafts.
744 for r in subset:
744 for r in subset:
745 src = _getrevsource(repo, r)
745 src = _getrevsource(repo, r)
746 lineage = None
746 lineage = None
747
747
748 while src is not None:
748 while src is not None:
749 if lineage is None:
749 if lineage is None:
750 lineage = list()
750 lineage = list()
751
751
752 lineage.append(r)
752 lineage.append(r)
753
753
754 # The visited lineage is a match if the current source is in the arg
754 # The visited lineage is a match if the current source is in the arg
755 # set. Since every candidate dest is visited by way of iterating
755 # set. Since every candidate dest is visited by way of iterating
756 # subset, any dests further back in the lineage will be tested by a
756 # subset, any dests further back in the lineage will be tested by a
757 # different iteration over subset. Likewise, if the src was already
757 # different iteration over subset. Likewise, if the src was already
758 # selected, the current lineage can be selected without going back
758 # selected, the current lineage can be selected without going back
759 # further.
759 # further.
760 if src in sources or src in dests:
760 if src in sources or src in dests:
761 dests.update(lineage)
761 dests.update(lineage)
762 break
762 break
763
763
764 r = src
764 r = src
765 src = _getrevsource(repo, r)
765 src = _getrevsource(repo, r)
766
766
767 return subset.filter(dests.__contains__,
767 return subset.filter(dests.__contains__,
768 condrepr=lambda: '<destination %r>' % sorted(dests))
768 condrepr=lambda: '<destination %r>' % sorted(dests))
769
769
770 @predicate('divergent()', safe=True)
770 @predicate('divergent()', safe=True)
771 def divergent(repo, subset, x):
771 def divergent(repo, subset, x):
772 msg = ("'divergent()' is deprecated, "
772 msg = ("'divergent()' is deprecated, "
773 "use 'contentdivergent()'")
773 "use 'contentdivergent()'")
774 repo.ui.deprecwarn(msg, '4.4')
774 repo.ui.deprecwarn(msg, '4.4')
775
775
776 return contentdivergent(repo, subset, x)
776 return contentdivergent(repo, subset, x)
777
777
778 @predicate('contentdivergent()', safe=True)
778 @predicate('contentdivergent()', safe=True)
779 def contentdivergent(repo, subset, x):
779 def contentdivergent(repo, subset, x):
780 """
780 """
781 Final successors of changesets with an alternative set of final
781 Final successors of changesets with an alternative set of final
782 successors. (EXPERIMENTAL)
782 successors. (EXPERIMENTAL)
783 """
783 """
784 # i18n: "contentdivergent" is a keyword
784 # i18n: "contentdivergent" is a keyword
785 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
785 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
786 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
786 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
787 return subset & contentdivergent
787 return subset & contentdivergent
788
788
789 @predicate('extdata(source)', safe=False, weight=100)
789 @predicate('extdata(source)', safe=False, weight=100)
790 def extdata(repo, subset, x):
790 def extdata(repo, subset, x):
791 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
791 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
792 # i18n: "extdata" is a keyword
792 # i18n: "extdata" is a keyword
793 args = getargsdict(x, 'extdata', 'source')
793 args = getargsdict(x, 'extdata', 'source')
794 source = getstring(args.get('source'),
794 source = getstring(args.get('source'),
795 # i18n: "extdata" is a keyword
795 # i18n: "extdata" is a keyword
796 _('extdata takes at least 1 string argument'))
796 _('extdata takes at least 1 string argument'))
797 data = scmutil.extdatasource(repo, source)
797 data = scmutil.extdatasource(repo, source)
798 return subset & baseset(data)
798 return subset & baseset(data)
799
799
800 @predicate('extinct()', safe=True)
800 @predicate('extinct()', safe=True)
801 def extinct(repo, subset, x):
801 def extinct(repo, subset, x):
802 """Obsolete changesets with obsolete descendants only.
802 """Obsolete changesets with obsolete descendants only.
803 """
803 """
804 # i18n: "extinct" is a keyword
804 # i18n: "extinct" is a keyword
805 getargs(x, 0, 0, _("extinct takes no arguments"))
805 getargs(x, 0, 0, _("extinct takes no arguments"))
806 extincts = obsmod.getrevs(repo, 'extinct')
806 extincts = obsmod.getrevs(repo, 'extinct')
807 return subset & extincts
807 return subset & extincts
808
808
809 @predicate('extra(label, [value])', safe=True)
809 @predicate('extra(label, [value])', safe=True)
810 def extra(repo, subset, x):
810 def extra(repo, subset, x):
811 """Changesets with the given label in the extra metadata, with the given
811 """Changesets with the given label in the extra metadata, with the given
812 optional value.
812 optional value.
813
813
814 Pattern matching is supported for `value`. See
814 Pattern matching is supported for `value`. See
815 :hg:`help revisions.patterns`.
815 :hg:`help revisions.patterns`.
816 """
816 """
817 args = getargsdict(x, 'extra', 'label value')
817 args = getargsdict(x, 'extra', 'label value')
818 if 'label' not in args:
818 if 'label' not in args:
819 # i18n: "extra" is a keyword
819 # i18n: "extra" is a keyword
820 raise error.ParseError(_('extra takes at least 1 argument'))
820 raise error.ParseError(_('extra takes at least 1 argument'))
821 # i18n: "extra" is a keyword
821 # i18n: "extra" is a keyword
822 label = getstring(args['label'], _('first argument to extra must be '
822 label = getstring(args['label'], _('first argument to extra must be '
823 'a string'))
823 'a string'))
824 value = None
824 value = None
825
825
826 if 'value' in args:
826 if 'value' in args:
827 # i18n: "extra" is a keyword
827 # i18n: "extra" is a keyword
828 value = getstring(args['value'], _('second argument to extra must be '
828 value = getstring(args['value'], _('second argument to extra must be '
829 'a string'))
829 'a string'))
830 kind, value, matcher = util.stringmatcher(value)
830 kind, value, matcher = util.stringmatcher(value)
831
831
832 def _matchvalue(r):
832 def _matchvalue(r):
833 extra = repo[r].extra()
833 extra = repo[r].extra()
834 return label in extra and (value is None or matcher(extra[label]))
834 return label in extra and (value is None or matcher(extra[label]))
835
835
836 return subset.filter(lambda r: _matchvalue(r),
836 return subset.filter(lambda r: _matchvalue(r),
837 condrepr=('<extra[%r] %r>', label, value))
837 condrepr=('<extra[%r] %r>', label, value))
838
838
839 @predicate('filelog(pattern)', safe=True)
839 @predicate('filelog(pattern)', safe=True)
840 def filelog(repo, subset, x):
840 def filelog(repo, subset, x):
841 """Changesets connected to the specified filelog.
841 """Changesets connected to the specified filelog.
842
842
843 For performance reasons, visits only revisions mentioned in the file-level
843 For performance reasons, visits only revisions mentioned in the file-level
844 filelog, rather than filtering through all changesets (much faster, but
844 filelog, rather than filtering through all changesets (much faster, but
845 doesn't include deletes or duplicate changes). For a slower, more accurate
845 doesn't include deletes or duplicate changes). For a slower, more accurate
846 result, use ``file()``.
846 result, use ``file()``.
847
847
848 The pattern without explicit kind like ``glob:`` is expected to be
848 The pattern without explicit kind like ``glob:`` is expected to be
849 relative to the current directory and match against a file exactly
849 relative to the current directory and match against a file exactly
850 for efficiency.
850 for efficiency.
851
851
852 If some linkrev points to revisions filtered by the current repoview, we'll
852 If some linkrev points to revisions filtered by the current repoview, we'll
853 work around it to return a non-filtered value.
853 work around it to return a non-filtered value.
854 """
854 """
855
855
856 # i18n: "filelog" is a keyword
856 # i18n: "filelog" is a keyword
857 pat = getstring(x, _("filelog requires a pattern"))
857 pat = getstring(x, _("filelog requires a pattern"))
858 s = set()
858 s = set()
859 cl = repo.changelog
859 cl = repo.changelog
860
860
861 if not matchmod.patkind(pat):
861 if not matchmod.patkind(pat):
862 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
862 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
863 files = [f]
863 files = [f]
864 else:
864 else:
865 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
865 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
866 files = (f for f in repo[None] if m(f))
866 files = (f for f in repo[None] if m(f))
867
867
868 for f in files:
868 for f in files:
869 fl = repo.file(f)
869 fl = repo.file(f)
870 known = {}
870 known = {}
871 scanpos = 0
871 scanpos = 0
872 for fr in list(fl):
872 for fr in list(fl):
873 fn = fl.node(fr)
873 fn = fl.node(fr)
874 if fn in known:
874 if fn in known:
875 s.add(known[fn])
875 s.add(known[fn])
876 continue
876 continue
877
877
878 lr = fl.linkrev(fr)
878 lr = fl.linkrev(fr)
879 if lr in cl:
879 if lr in cl:
880 s.add(lr)
880 s.add(lr)
881 elif scanpos is not None:
881 elif scanpos is not None:
882 # lowest matching changeset is filtered, scan further
882 # lowest matching changeset is filtered, scan further
883 # ahead in changelog
883 # ahead in changelog
884 start = max(lr, scanpos) + 1
884 start = max(lr, scanpos) + 1
885 scanpos = None
885 scanpos = None
886 for r in cl.revs(start):
886 for r in cl.revs(start):
887 # minimize parsing of non-matching entries
887 # minimize parsing of non-matching entries
888 if f in cl.revision(r) and f in cl.readfiles(r):
888 if f in cl.revision(r) and f in cl.readfiles(r):
889 try:
889 try:
890 # try to use manifest delta fastpath
890 # try to use manifest delta fastpath
891 n = repo[r].filenode(f)
891 n = repo[r].filenode(f)
892 if n not in known:
892 if n not in known:
893 if n == fn:
893 if n == fn:
894 s.add(r)
894 s.add(r)
895 scanpos = r
895 scanpos = r
896 break
896 break
897 else:
897 else:
898 known[n] = r
898 known[n] = r
899 except error.ManifestLookupError:
899 except error.ManifestLookupError:
900 # deletion in changelog
900 # deletion in changelog
901 continue
901 continue
902
902
903 return subset & s
903 return subset & s
904
904
905 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
905 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
906 def first(repo, subset, x, order):
906 def first(repo, subset, x, order):
907 """An alias for limit().
907 """An alias for limit().
908 """
908 """
909 return limit(repo, subset, x, order)
909 return limit(repo, subset, x, order)
910
910
911 def _follow(repo, subset, x, name, followfirst=False):
911 def _follow(repo, subset, x, name, followfirst=False):
912 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
912 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
913 "and an optional revset") % name)
913 "and an optional revset") % name)
914 c = repo['.']
914 c = repo['.']
915 if l:
915 if l:
916 x = getstring(l[0], _("%s expected a pattern") % name)
916 x = getstring(l[0], _("%s expected a pattern") % name)
917 rev = None
917 rev = None
918 if len(l) >= 2:
918 if len(l) >= 2:
919 revs = getset(repo, fullreposet(repo), l[1])
919 revs = getset(repo, fullreposet(repo), l[1])
920 if len(revs) != 1:
920 if len(revs) != 1:
921 raise error.RepoLookupError(
921 raise error.RepoLookupError(
922 _("%s expected one starting revision") % name)
922 _("%s expected one starting revision") % name)
923 rev = revs.last()
923 rev = revs.last()
924 c = repo[rev]
924 c = repo[rev]
925 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
925 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
926 ctx=repo[rev], default='path')
926 ctx=repo[rev], default='path')
927
927
928 files = c.manifest().walk(matcher)
928 files = c.manifest().walk(matcher)
929
929
930 s = set()
930 fctxs = [c[f].introfilectx() for f in files]
931 for fname in files:
931 a = dagop.filectxancestors(fctxs, followfirst)
932 fctx = c[fname].introfilectx()
932 s = set(c.rev() for c in a)
933 a = dagop.filectxancestors(fctx, followfirst)
934 s = s.union(set(c.rev() for c in a))
935 else:
933 else:
936 s = dagop.revancestors(repo, baseset([c.rev()]), followfirst)
934 s = dagop.revancestors(repo, baseset([c.rev()]), followfirst)
937
935
938 return subset & s
936 return subset & s
939
937
940 @predicate('follow([pattern[, startrev]])', safe=True)
938 @predicate('follow([pattern[, startrev]])', safe=True)
941 def follow(repo, subset, x):
939 def follow(repo, subset, x):
942 """
940 """
943 An alias for ``::.`` (ancestors of the working directory's first parent).
941 An alias for ``::.`` (ancestors of the working directory's first parent).
944 If pattern is specified, the histories of files matching given
942 If pattern is specified, the histories of files matching given
945 pattern in the revision given by startrev are followed, including copies.
943 pattern in the revision given by startrev are followed, including copies.
946 """
944 """
947 return _follow(repo, subset, x, 'follow')
945 return _follow(repo, subset, x, 'follow')
948
946
949 @predicate('_followfirst', safe=True)
947 @predicate('_followfirst', safe=True)
950 def _followfirst(repo, subset, x):
948 def _followfirst(repo, subset, x):
951 # ``followfirst([pattern[, startrev]])``
949 # ``followfirst([pattern[, startrev]])``
952 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
950 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
953 # of every revisions or files revisions.
951 # of every revisions or files revisions.
954 return _follow(repo, subset, x, '_followfirst', followfirst=True)
952 return _follow(repo, subset, x, '_followfirst', followfirst=True)
955
953
956 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
954 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
957 safe=True)
955 safe=True)
958 def followlines(repo, subset, x):
956 def followlines(repo, subset, x):
959 """Changesets modifying `file` in line range ('fromline', 'toline').
957 """Changesets modifying `file` in line range ('fromline', 'toline').
960
958
961 Line range corresponds to 'file' content at 'startrev' and should hence be
959 Line range corresponds to 'file' content at 'startrev' and should hence be
962 consistent with file size. If startrev is not specified, working directory's
960 consistent with file size. If startrev is not specified, working directory's
963 parent is used.
961 parent is used.
964
962
965 By default, ancestors of 'startrev' are returned. If 'descend' is True,
963 By default, ancestors of 'startrev' are returned. If 'descend' is True,
966 descendants of 'startrev' are returned though renames are (currently) not
964 descendants of 'startrev' are returned though renames are (currently) not
967 followed in this direction.
965 followed in this direction.
968 """
966 """
969 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
967 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
970 if len(args['lines']) != 1:
968 if len(args['lines']) != 1:
971 raise error.ParseError(_("followlines requires a line range"))
969 raise error.ParseError(_("followlines requires a line range"))
972
970
973 rev = '.'
971 rev = '.'
974 if 'startrev' in args:
972 if 'startrev' in args:
975 revs = getset(repo, fullreposet(repo), args['startrev'])
973 revs = getset(repo, fullreposet(repo), args['startrev'])
976 if len(revs) != 1:
974 if len(revs) != 1:
977 raise error.ParseError(
975 raise error.ParseError(
978 # i18n: "followlines" is a keyword
976 # i18n: "followlines" is a keyword
979 _("followlines expects exactly one revision"))
977 _("followlines expects exactly one revision"))
980 rev = revs.last()
978 rev = revs.last()
981
979
982 pat = getstring(args['file'], _("followlines requires a pattern"))
980 pat = getstring(args['file'], _("followlines requires a pattern"))
983 # i18n: "followlines" is a keyword
981 # i18n: "followlines" is a keyword
984 msg = _("followlines expects exactly one file")
982 msg = _("followlines expects exactly one file")
985 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
983 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
986 # i18n: "followlines" is a keyword
984 # i18n: "followlines" is a keyword
987 lr = getrange(args['lines'][0], _("followlines expects a line range"))
985 lr = getrange(args['lines'][0], _("followlines expects a line range"))
988 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
986 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
989 for a in lr]
987 for a in lr]
990 fromline, toline = util.processlinerange(fromline, toline)
988 fromline, toline = util.processlinerange(fromline, toline)
991
989
992 fctx = repo[rev].filectx(fname)
990 fctx = repo[rev].filectx(fname)
993 descend = False
991 descend = False
994 if 'descend' in args:
992 if 'descend' in args:
995 descend = getboolean(args['descend'],
993 descend = getboolean(args['descend'],
996 # i18n: "descend" is a keyword
994 # i18n: "descend" is a keyword
997 _("descend argument must be a boolean"))
995 _("descend argument must be a boolean"))
998 if descend:
996 if descend:
999 rs = generatorset(
997 rs = generatorset(
1000 (c.rev() for c, _linerange
998 (c.rev() for c, _linerange
1001 in dagop.blockdescendants(fctx, fromline, toline)),
999 in dagop.blockdescendants(fctx, fromline, toline)),
1002 iterasc=True)
1000 iterasc=True)
1003 else:
1001 else:
1004 rs = generatorset(
1002 rs = generatorset(
1005 (c.rev() for c, _linerange
1003 (c.rev() for c, _linerange
1006 in dagop.blockancestors(fctx, fromline, toline)),
1004 in dagop.blockancestors(fctx, fromline, toline)),
1007 iterasc=False)
1005 iterasc=False)
1008 return subset & rs
1006 return subset & rs
1009
1007
1010 @predicate('all()', safe=True)
1008 @predicate('all()', safe=True)
1011 def getall(repo, subset, x):
1009 def getall(repo, subset, x):
1012 """All changesets, the same as ``0:tip``.
1010 """All changesets, the same as ``0:tip``.
1013 """
1011 """
1014 # i18n: "all" is a keyword
1012 # i18n: "all" is a keyword
1015 getargs(x, 0, 0, _("all takes no arguments"))
1013 getargs(x, 0, 0, _("all takes no arguments"))
1016 return subset & spanset(repo) # drop "null" if any
1014 return subset & spanset(repo) # drop "null" if any
1017
1015
1018 @predicate('grep(regex)', weight=10)
1016 @predicate('grep(regex)', weight=10)
1019 def grep(repo, subset, x):
1017 def grep(repo, subset, x):
1020 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1018 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1021 to ensure special escape characters are handled correctly. Unlike
1019 to ensure special escape characters are handled correctly. Unlike
1022 ``keyword(string)``, the match is case-sensitive.
1020 ``keyword(string)``, the match is case-sensitive.
1023 """
1021 """
1024 try:
1022 try:
1025 # i18n: "grep" is a keyword
1023 # i18n: "grep" is a keyword
1026 gr = re.compile(getstring(x, _("grep requires a string")))
1024 gr = re.compile(getstring(x, _("grep requires a string")))
1027 except re.error as e:
1025 except re.error as e:
1028 raise error.ParseError(_('invalid match pattern: %s') % e)
1026 raise error.ParseError(_('invalid match pattern: %s') % e)
1029
1027
1030 def matches(x):
1028 def matches(x):
1031 c = repo[x]
1029 c = repo[x]
1032 for e in c.files() + [c.user(), c.description()]:
1030 for e in c.files() + [c.user(), c.description()]:
1033 if gr.search(e):
1031 if gr.search(e):
1034 return True
1032 return True
1035 return False
1033 return False
1036
1034
1037 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1035 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1038
1036
1039 @predicate('_matchfiles', safe=True)
1037 @predicate('_matchfiles', safe=True)
1040 def _matchfiles(repo, subset, x):
1038 def _matchfiles(repo, subset, x):
1041 # _matchfiles takes a revset list of prefixed arguments:
1039 # _matchfiles takes a revset list of prefixed arguments:
1042 #
1040 #
1043 # [p:foo, i:bar, x:baz]
1041 # [p:foo, i:bar, x:baz]
1044 #
1042 #
1045 # builds a match object from them and filters subset. Allowed
1043 # builds a match object from them and filters subset. Allowed
1046 # prefixes are 'p:' for regular patterns, 'i:' for include
1044 # prefixes are 'p:' for regular patterns, 'i:' for include
1047 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1045 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1048 # a revision identifier, or the empty string to reference the
1046 # a revision identifier, or the empty string to reference the
1049 # working directory, from which the match object is
1047 # working directory, from which the match object is
1050 # initialized. Use 'd:' to set the default matching mode, default
1048 # initialized. Use 'd:' to set the default matching mode, default
1051 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1049 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1052
1050
1053 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1051 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1054 pats, inc, exc = [], [], []
1052 pats, inc, exc = [], [], []
1055 rev, default = None, None
1053 rev, default = None, None
1056 for arg in l:
1054 for arg in l:
1057 s = getstring(arg, "_matchfiles requires string arguments")
1055 s = getstring(arg, "_matchfiles requires string arguments")
1058 prefix, value = s[:2], s[2:]
1056 prefix, value = s[:2], s[2:]
1059 if prefix == 'p:':
1057 if prefix == 'p:':
1060 pats.append(value)
1058 pats.append(value)
1061 elif prefix == 'i:':
1059 elif prefix == 'i:':
1062 inc.append(value)
1060 inc.append(value)
1063 elif prefix == 'x:':
1061 elif prefix == 'x:':
1064 exc.append(value)
1062 exc.append(value)
1065 elif prefix == 'r:':
1063 elif prefix == 'r:':
1066 if rev is not None:
1064 if rev is not None:
1067 raise error.ParseError('_matchfiles expected at most one '
1065 raise error.ParseError('_matchfiles expected at most one '
1068 'revision')
1066 'revision')
1069 if value != '': # empty means working directory; leave rev as None
1067 if value != '': # empty means working directory; leave rev as None
1070 rev = value
1068 rev = value
1071 elif prefix == 'd:':
1069 elif prefix == 'd:':
1072 if default is not None:
1070 if default is not None:
1073 raise error.ParseError('_matchfiles expected at most one '
1071 raise error.ParseError('_matchfiles expected at most one '
1074 'default mode')
1072 'default mode')
1075 default = value
1073 default = value
1076 else:
1074 else:
1077 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1075 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1078 if not default:
1076 if not default:
1079 default = 'glob'
1077 default = 'glob'
1080
1078
1081 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1079 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1082 exclude=exc, ctx=repo[rev], default=default)
1080 exclude=exc, ctx=repo[rev], default=default)
1083
1081
1084 # This directly read the changelog data as creating changectx for all
1082 # This directly read the changelog data as creating changectx for all
1085 # revisions is quite expensive.
1083 # revisions is quite expensive.
1086 getfiles = repo.changelog.readfiles
1084 getfiles = repo.changelog.readfiles
1087 wdirrev = node.wdirrev
1085 wdirrev = node.wdirrev
1088 def matches(x):
1086 def matches(x):
1089 if x == wdirrev:
1087 if x == wdirrev:
1090 files = repo[x].files()
1088 files = repo[x].files()
1091 else:
1089 else:
1092 files = getfiles(x)
1090 files = getfiles(x)
1093 for f in files:
1091 for f in files:
1094 if m(f):
1092 if m(f):
1095 return True
1093 return True
1096 return False
1094 return False
1097
1095
1098 return subset.filter(matches,
1096 return subset.filter(matches,
1099 condrepr=('<matchfiles patterns=%r, include=%r '
1097 condrepr=('<matchfiles patterns=%r, include=%r '
1100 'exclude=%r, default=%r, rev=%r>',
1098 'exclude=%r, default=%r, rev=%r>',
1101 pats, inc, exc, default, rev))
1099 pats, inc, exc, default, rev))
1102
1100
1103 @predicate('file(pattern)', safe=True, weight=10)
1101 @predicate('file(pattern)', safe=True, weight=10)
1104 def hasfile(repo, subset, x):
1102 def hasfile(repo, subset, x):
1105 """Changesets affecting files matched by pattern.
1103 """Changesets affecting files matched by pattern.
1106
1104
1107 For a faster but less accurate result, consider using ``filelog()``
1105 For a faster but less accurate result, consider using ``filelog()``
1108 instead.
1106 instead.
1109
1107
1110 This predicate uses ``glob:`` as the default kind of pattern.
1108 This predicate uses ``glob:`` as the default kind of pattern.
1111 """
1109 """
1112 # i18n: "file" is a keyword
1110 # i18n: "file" is a keyword
1113 pat = getstring(x, _("file requires a pattern"))
1111 pat = getstring(x, _("file requires a pattern"))
1114 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1112 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1115
1113
1116 @predicate('head()', safe=True)
1114 @predicate('head()', safe=True)
1117 def head(repo, subset, x):
1115 def head(repo, subset, x):
1118 """Changeset is a named branch head.
1116 """Changeset is a named branch head.
1119 """
1117 """
1120 # i18n: "head" is a keyword
1118 # i18n: "head" is a keyword
1121 getargs(x, 0, 0, _("head takes no arguments"))
1119 getargs(x, 0, 0, _("head takes no arguments"))
1122 hs = set()
1120 hs = set()
1123 cl = repo.changelog
1121 cl = repo.changelog
1124 for ls in repo.branchmap().itervalues():
1122 for ls in repo.branchmap().itervalues():
1125 hs.update(cl.rev(h) for h in ls)
1123 hs.update(cl.rev(h) for h in ls)
1126 return subset & baseset(hs)
1124 return subset & baseset(hs)
1127
1125
1128 @predicate('heads(set)', safe=True)
1126 @predicate('heads(set)', safe=True)
1129 def heads(repo, subset, x):
1127 def heads(repo, subset, x):
1130 """Members of set with no children in set.
1128 """Members of set with no children in set.
1131 """
1129 """
1132 s = getset(repo, subset, x)
1130 s = getset(repo, subset, x)
1133 ps = parents(repo, subset, x)
1131 ps = parents(repo, subset, x)
1134 return s - ps
1132 return s - ps
1135
1133
1136 @predicate('hidden()', safe=True)
1134 @predicate('hidden()', safe=True)
1137 def hidden(repo, subset, x):
1135 def hidden(repo, subset, x):
1138 """Hidden changesets.
1136 """Hidden changesets.
1139 """
1137 """
1140 # i18n: "hidden" is a keyword
1138 # i18n: "hidden" is a keyword
1141 getargs(x, 0, 0, _("hidden takes no arguments"))
1139 getargs(x, 0, 0, _("hidden takes no arguments"))
1142 hiddenrevs = repoview.filterrevs(repo, 'visible')
1140 hiddenrevs = repoview.filterrevs(repo, 'visible')
1143 return subset & hiddenrevs
1141 return subset & hiddenrevs
1144
1142
1145 @predicate('keyword(string)', safe=True, weight=10)
1143 @predicate('keyword(string)', safe=True, weight=10)
1146 def keyword(repo, subset, x):
1144 def keyword(repo, subset, x):
1147 """Search commit message, user name, and names of changed files for
1145 """Search commit message, user name, and names of changed files for
1148 string. The match is case-insensitive.
1146 string. The match is case-insensitive.
1149
1147
1150 For a regular expression or case sensitive search of these fields, use
1148 For a regular expression or case sensitive search of these fields, use
1151 ``grep(regex)``.
1149 ``grep(regex)``.
1152 """
1150 """
1153 # i18n: "keyword" is a keyword
1151 # i18n: "keyword" is a keyword
1154 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1152 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1155
1153
1156 def matches(r):
1154 def matches(r):
1157 c = repo[r]
1155 c = repo[r]
1158 return any(kw in encoding.lower(t)
1156 return any(kw in encoding.lower(t)
1159 for t in c.files() + [c.user(), c.description()])
1157 for t in c.files() + [c.user(), c.description()])
1160
1158
1161 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1159 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1162
1160
1163 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1161 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1164 def limit(repo, subset, x, order):
1162 def limit(repo, subset, x, order):
1165 """First n members of set, defaulting to 1, starting from offset.
1163 """First n members of set, defaulting to 1, starting from offset.
1166 """
1164 """
1167 args = getargsdict(x, 'limit', 'set n offset')
1165 args = getargsdict(x, 'limit', 'set n offset')
1168 if 'set' not in args:
1166 if 'set' not in args:
1169 # i18n: "limit" is a keyword
1167 # i18n: "limit" is a keyword
1170 raise error.ParseError(_("limit requires one to three arguments"))
1168 raise error.ParseError(_("limit requires one to three arguments"))
1171 # i18n: "limit" is a keyword
1169 # i18n: "limit" is a keyword
1172 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1170 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1173 if lim < 0:
1171 if lim < 0:
1174 raise error.ParseError(_("negative number to select"))
1172 raise error.ParseError(_("negative number to select"))
1175 # i18n: "limit" is a keyword
1173 # i18n: "limit" is a keyword
1176 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1174 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1177 if ofs < 0:
1175 if ofs < 0:
1178 raise error.ParseError(_("negative offset"))
1176 raise error.ParseError(_("negative offset"))
1179 os = getset(repo, fullreposet(repo), args['set'])
1177 os = getset(repo, fullreposet(repo), args['set'])
1180 ls = os.slice(ofs, ofs + lim)
1178 ls = os.slice(ofs, ofs + lim)
1181 if order == followorder and lim > 1:
1179 if order == followorder and lim > 1:
1182 return subset & ls
1180 return subset & ls
1183 return ls & subset
1181 return ls & subset
1184
1182
1185 @predicate('last(set, [n])', safe=True, takeorder=True)
1183 @predicate('last(set, [n])', safe=True, takeorder=True)
1186 def last(repo, subset, x, order):
1184 def last(repo, subset, x, order):
1187 """Last n members of set, defaulting to 1.
1185 """Last n members of set, defaulting to 1.
1188 """
1186 """
1189 # i18n: "last" is a keyword
1187 # i18n: "last" is a keyword
1190 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1188 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1191 lim = 1
1189 lim = 1
1192 if len(l) == 2:
1190 if len(l) == 2:
1193 # i18n: "last" is a keyword
1191 # i18n: "last" is a keyword
1194 lim = getinteger(l[1], _("last expects a number"))
1192 lim = getinteger(l[1], _("last expects a number"))
1195 if lim < 0:
1193 if lim < 0:
1196 raise error.ParseError(_("negative number to select"))
1194 raise error.ParseError(_("negative number to select"))
1197 os = getset(repo, fullreposet(repo), l[0])
1195 os = getset(repo, fullreposet(repo), l[0])
1198 os.reverse()
1196 os.reverse()
1199 ls = os.slice(0, lim)
1197 ls = os.slice(0, lim)
1200 if order == followorder and lim > 1:
1198 if order == followorder and lim > 1:
1201 return subset & ls
1199 return subset & ls
1202 ls.reverse()
1200 ls.reverse()
1203 return ls & subset
1201 return ls & subset
1204
1202
1205 @predicate('max(set)', safe=True)
1203 @predicate('max(set)', safe=True)
1206 def maxrev(repo, subset, x):
1204 def maxrev(repo, subset, x):
1207 """Changeset with highest revision number in set.
1205 """Changeset with highest revision number in set.
1208 """
1206 """
1209 os = getset(repo, fullreposet(repo), x)
1207 os = getset(repo, fullreposet(repo), x)
1210 try:
1208 try:
1211 m = os.max()
1209 m = os.max()
1212 if m in subset:
1210 if m in subset:
1213 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1211 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1214 except ValueError:
1212 except ValueError:
1215 # os.max() throws a ValueError when the collection is empty.
1213 # os.max() throws a ValueError when the collection is empty.
1216 # Same as python's max().
1214 # Same as python's max().
1217 pass
1215 pass
1218 return baseset(datarepr=('<max %r, %r>', subset, os))
1216 return baseset(datarepr=('<max %r, %r>', subset, os))
1219
1217
1220 @predicate('merge()', safe=True)
1218 @predicate('merge()', safe=True)
1221 def merge(repo, subset, x):
1219 def merge(repo, subset, x):
1222 """Changeset is a merge changeset.
1220 """Changeset is a merge changeset.
1223 """
1221 """
1224 # i18n: "merge" is a keyword
1222 # i18n: "merge" is a keyword
1225 getargs(x, 0, 0, _("merge takes no arguments"))
1223 getargs(x, 0, 0, _("merge takes no arguments"))
1226 cl = repo.changelog
1224 cl = repo.changelog
1227 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1225 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1228 condrepr='<merge>')
1226 condrepr='<merge>')
1229
1227
1230 @predicate('branchpoint()', safe=True)
1228 @predicate('branchpoint()', safe=True)
1231 def branchpoint(repo, subset, x):
1229 def branchpoint(repo, subset, x):
1232 """Changesets with more than one child.
1230 """Changesets with more than one child.
1233 """
1231 """
1234 # i18n: "branchpoint" is a keyword
1232 # i18n: "branchpoint" is a keyword
1235 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1233 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1236 cl = repo.changelog
1234 cl = repo.changelog
1237 if not subset:
1235 if not subset:
1238 return baseset()
1236 return baseset()
1239 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1237 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1240 # (and if it is not, it should.)
1238 # (and if it is not, it should.)
1241 baserev = min(subset)
1239 baserev = min(subset)
1242 parentscount = [0]*(len(repo) - baserev)
1240 parentscount = [0]*(len(repo) - baserev)
1243 for r in cl.revs(start=baserev + 1):
1241 for r in cl.revs(start=baserev + 1):
1244 for p in cl.parentrevs(r):
1242 for p in cl.parentrevs(r):
1245 if p >= baserev:
1243 if p >= baserev:
1246 parentscount[p - baserev] += 1
1244 parentscount[p - baserev] += 1
1247 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1245 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1248 condrepr='<branchpoint>')
1246 condrepr='<branchpoint>')
1249
1247
1250 @predicate('min(set)', safe=True)
1248 @predicate('min(set)', safe=True)
1251 def minrev(repo, subset, x):
1249 def minrev(repo, subset, x):
1252 """Changeset with lowest revision number in set.
1250 """Changeset with lowest revision number in set.
1253 """
1251 """
1254 os = getset(repo, fullreposet(repo), x)
1252 os = getset(repo, fullreposet(repo), x)
1255 try:
1253 try:
1256 m = os.min()
1254 m = os.min()
1257 if m in subset:
1255 if m in subset:
1258 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1256 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1259 except ValueError:
1257 except ValueError:
1260 # os.min() throws a ValueError when the collection is empty.
1258 # os.min() throws a ValueError when the collection is empty.
1261 # Same as python's min().
1259 # Same as python's min().
1262 pass
1260 pass
1263 return baseset(datarepr=('<min %r, %r>', subset, os))
1261 return baseset(datarepr=('<min %r, %r>', subset, os))
1264
1262
1265 @predicate('modifies(pattern)', safe=True, weight=30)
1263 @predicate('modifies(pattern)', safe=True, weight=30)
1266 def modifies(repo, subset, x):
1264 def modifies(repo, subset, x):
1267 """Changesets modifying files matched by pattern.
1265 """Changesets modifying files matched by pattern.
1268
1266
1269 The pattern without explicit kind like ``glob:`` is expected to be
1267 The pattern without explicit kind like ``glob:`` is expected to be
1270 relative to the current directory and match against a file or a
1268 relative to the current directory and match against a file or a
1271 directory.
1269 directory.
1272 """
1270 """
1273 # i18n: "modifies" is a keyword
1271 # i18n: "modifies" is a keyword
1274 pat = getstring(x, _("modifies requires a pattern"))
1272 pat = getstring(x, _("modifies requires a pattern"))
1275 return checkstatus(repo, subset, pat, 0)
1273 return checkstatus(repo, subset, pat, 0)
1276
1274
1277 @predicate('named(namespace)')
1275 @predicate('named(namespace)')
1278 def named(repo, subset, x):
1276 def named(repo, subset, x):
1279 """The changesets in a given namespace.
1277 """The changesets in a given namespace.
1280
1278
1281 Pattern matching is supported for `namespace`. See
1279 Pattern matching is supported for `namespace`. See
1282 :hg:`help revisions.patterns`.
1280 :hg:`help revisions.patterns`.
1283 """
1281 """
1284 # i18n: "named" is a keyword
1282 # i18n: "named" is a keyword
1285 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1283 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1286
1284
1287 ns = getstring(args[0],
1285 ns = getstring(args[0],
1288 # i18n: "named" is a keyword
1286 # i18n: "named" is a keyword
1289 _('the argument to named must be a string'))
1287 _('the argument to named must be a string'))
1290 kind, pattern, matcher = util.stringmatcher(ns)
1288 kind, pattern, matcher = util.stringmatcher(ns)
1291 namespaces = set()
1289 namespaces = set()
1292 if kind == 'literal':
1290 if kind == 'literal':
1293 if pattern not in repo.names:
1291 if pattern not in repo.names:
1294 raise error.RepoLookupError(_("namespace '%s' does not exist")
1292 raise error.RepoLookupError(_("namespace '%s' does not exist")
1295 % ns)
1293 % ns)
1296 namespaces.add(repo.names[pattern])
1294 namespaces.add(repo.names[pattern])
1297 else:
1295 else:
1298 for name, ns in repo.names.iteritems():
1296 for name, ns in repo.names.iteritems():
1299 if matcher(name):
1297 if matcher(name):
1300 namespaces.add(ns)
1298 namespaces.add(ns)
1301 if not namespaces:
1299 if not namespaces:
1302 raise error.RepoLookupError(_("no namespace exists"
1300 raise error.RepoLookupError(_("no namespace exists"
1303 " that match '%s'") % pattern)
1301 " that match '%s'") % pattern)
1304
1302
1305 names = set()
1303 names = set()
1306 for ns in namespaces:
1304 for ns in namespaces:
1307 for name in ns.listnames(repo):
1305 for name in ns.listnames(repo):
1308 if name not in ns.deprecated:
1306 if name not in ns.deprecated:
1309 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1307 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1310
1308
1311 names -= {node.nullrev}
1309 names -= {node.nullrev}
1312 return subset & names
1310 return subset & names
1313
1311
1314 @predicate('id(string)', safe=True)
1312 @predicate('id(string)', safe=True)
1315 def node_(repo, subset, x):
1313 def node_(repo, subset, x):
1316 """Revision non-ambiguously specified by the given hex string prefix.
1314 """Revision non-ambiguously specified by the given hex string prefix.
1317 """
1315 """
1318 # i18n: "id" is a keyword
1316 # i18n: "id" is a keyword
1319 l = getargs(x, 1, 1, _("id requires one argument"))
1317 l = getargs(x, 1, 1, _("id requires one argument"))
1320 # i18n: "id" is a keyword
1318 # i18n: "id" is a keyword
1321 n = getstring(l[0], _("id requires a string"))
1319 n = getstring(l[0], _("id requires a string"))
1322 if len(n) == 40:
1320 if len(n) == 40:
1323 try:
1321 try:
1324 rn = repo.changelog.rev(node.bin(n))
1322 rn = repo.changelog.rev(node.bin(n))
1325 except error.WdirUnsupported:
1323 except error.WdirUnsupported:
1326 rn = node.wdirrev
1324 rn = node.wdirrev
1327 except (LookupError, TypeError):
1325 except (LookupError, TypeError):
1328 rn = None
1326 rn = None
1329 else:
1327 else:
1330 rn = None
1328 rn = None
1331 try:
1329 try:
1332 pm = repo.changelog._partialmatch(n)
1330 pm = repo.changelog._partialmatch(n)
1333 if pm is not None:
1331 if pm is not None:
1334 rn = repo.changelog.rev(pm)
1332 rn = repo.changelog.rev(pm)
1335 except error.WdirUnsupported:
1333 except error.WdirUnsupported:
1336 rn = node.wdirrev
1334 rn = node.wdirrev
1337
1335
1338 if rn is None:
1336 if rn is None:
1339 return baseset()
1337 return baseset()
1340 result = baseset([rn])
1338 result = baseset([rn])
1341 return result & subset
1339 return result & subset
1342
1340
1343 @predicate('obsolete()', safe=True)
1341 @predicate('obsolete()', safe=True)
1344 def obsolete(repo, subset, x):
1342 def obsolete(repo, subset, x):
1345 """Mutable changeset with a newer version."""
1343 """Mutable changeset with a newer version."""
1346 # i18n: "obsolete" is a keyword
1344 # i18n: "obsolete" is a keyword
1347 getargs(x, 0, 0, _("obsolete takes no arguments"))
1345 getargs(x, 0, 0, _("obsolete takes no arguments"))
1348 obsoletes = obsmod.getrevs(repo, 'obsolete')
1346 obsoletes = obsmod.getrevs(repo, 'obsolete')
1349 return subset & obsoletes
1347 return subset & obsoletes
1350
1348
1351 @predicate('only(set, [set])', safe=True)
1349 @predicate('only(set, [set])', safe=True)
1352 def only(repo, subset, x):
1350 def only(repo, subset, x):
1353 """Changesets that are ancestors of the first set that are not ancestors
1351 """Changesets that are ancestors of the first set that are not ancestors
1354 of any other head in the repo. If a second set is specified, the result
1352 of any other head in the repo. If a second set is specified, the result
1355 is ancestors of the first set that are not ancestors of the second set
1353 is ancestors of the first set that are not ancestors of the second set
1356 (i.e. ::<set1> - ::<set2>).
1354 (i.e. ::<set1> - ::<set2>).
1357 """
1355 """
1358 cl = repo.changelog
1356 cl = repo.changelog
1359 # i18n: "only" is a keyword
1357 # i18n: "only" is a keyword
1360 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1358 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1361 include = getset(repo, fullreposet(repo), args[0])
1359 include = getset(repo, fullreposet(repo), args[0])
1362 if len(args) == 1:
1360 if len(args) == 1:
1363 if not include:
1361 if not include:
1364 return baseset()
1362 return baseset()
1365
1363
1366 descendants = set(dagop.revdescendants(repo, include, False))
1364 descendants = set(dagop.revdescendants(repo, include, False))
1367 exclude = [rev for rev in cl.headrevs()
1365 exclude = [rev for rev in cl.headrevs()
1368 if not rev in descendants and not rev in include]
1366 if not rev in descendants and not rev in include]
1369 else:
1367 else:
1370 exclude = getset(repo, fullreposet(repo), args[1])
1368 exclude = getset(repo, fullreposet(repo), args[1])
1371
1369
1372 results = set(cl.findmissingrevs(common=exclude, heads=include))
1370 results = set(cl.findmissingrevs(common=exclude, heads=include))
1373 # XXX we should turn this into a baseset instead of a set, smartset may do
1371 # XXX we should turn this into a baseset instead of a set, smartset may do
1374 # some optimizations from the fact this is a baseset.
1372 # some optimizations from the fact this is a baseset.
1375 return subset & results
1373 return subset & results
1376
1374
1377 @predicate('origin([set])', safe=True)
1375 @predicate('origin([set])', safe=True)
1378 def origin(repo, subset, x):
1376 def origin(repo, subset, x):
1379 """
1377 """
1380 Changesets that were specified as a source for the grafts, transplants or
1378 Changesets that were specified as a source for the grafts, transplants or
1381 rebases that created the given revisions. Omitting the optional set is the
1379 rebases that created the given revisions. Omitting the optional set is the
1382 same as passing all(). If a changeset created by these operations is itself
1380 same as passing all(). If a changeset created by these operations is itself
1383 specified as a source for one of these operations, only the source changeset
1381 specified as a source for one of these operations, only the source changeset
1384 for the first operation is selected.
1382 for the first operation is selected.
1385 """
1383 """
1386 if x is not None:
1384 if x is not None:
1387 dests = getset(repo, fullreposet(repo), x)
1385 dests = getset(repo, fullreposet(repo), x)
1388 else:
1386 else:
1389 dests = fullreposet(repo)
1387 dests = fullreposet(repo)
1390
1388
1391 def _firstsrc(rev):
1389 def _firstsrc(rev):
1392 src = _getrevsource(repo, rev)
1390 src = _getrevsource(repo, rev)
1393 if src is None:
1391 if src is None:
1394 return None
1392 return None
1395
1393
1396 while True:
1394 while True:
1397 prev = _getrevsource(repo, src)
1395 prev = _getrevsource(repo, src)
1398
1396
1399 if prev is None:
1397 if prev is None:
1400 return src
1398 return src
1401 src = prev
1399 src = prev
1402
1400
1403 o = {_firstsrc(r) for r in dests}
1401 o = {_firstsrc(r) for r in dests}
1404 o -= {None}
1402 o -= {None}
1405 # XXX we should turn this into a baseset instead of a set, smartset may do
1403 # XXX we should turn this into a baseset instead of a set, smartset may do
1406 # some optimizations from the fact this is a baseset.
1404 # some optimizations from the fact this is a baseset.
1407 return subset & o
1405 return subset & o
1408
1406
1409 @predicate('outgoing([path])', safe=False, weight=10)
1407 @predicate('outgoing([path])', safe=False, weight=10)
1410 def outgoing(repo, subset, x):
1408 def outgoing(repo, subset, x):
1411 """Changesets not found in the specified destination repository, or the
1409 """Changesets not found in the specified destination repository, or the
1412 default push location.
1410 default push location.
1413 """
1411 """
1414 # Avoid cycles.
1412 # Avoid cycles.
1415 from . import (
1413 from . import (
1416 discovery,
1414 discovery,
1417 hg,
1415 hg,
1418 )
1416 )
1419 # i18n: "outgoing" is a keyword
1417 # i18n: "outgoing" is a keyword
1420 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1418 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1421 # i18n: "outgoing" is a keyword
1419 # i18n: "outgoing" is a keyword
1422 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1420 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1423 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1421 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1424 dest, branches = hg.parseurl(dest)
1422 dest, branches = hg.parseurl(dest)
1425 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1423 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1426 if revs:
1424 if revs:
1427 revs = [repo.lookup(rev) for rev in revs]
1425 revs = [repo.lookup(rev) for rev in revs]
1428 other = hg.peer(repo, {}, dest)
1426 other = hg.peer(repo, {}, dest)
1429 repo.ui.pushbuffer()
1427 repo.ui.pushbuffer()
1430 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1428 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1431 repo.ui.popbuffer()
1429 repo.ui.popbuffer()
1432 cl = repo.changelog
1430 cl = repo.changelog
1433 o = {cl.rev(r) for r in outgoing.missing}
1431 o = {cl.rev(r) for r in outgoing.missing}
1434 return subset & o
1432 return subset & o
1435
1433
1436 @predicate('p1([set])', safe=True)
1434 @predicate('p1([set])', safe=True)
1437 def p1(repo, subset, x):
1435 def p1(repo, subset, x):
1438 """First parent of changesets in set, or the working directory.
1436 """First parent of changesets in set, or the working directory.
1439 """
1437 """
1440 if x is None:
1438 if x is None:
1441 p = repo[x].p1().rev()
1439 p = repo[x].p1().rev()
1442 if p >= 0:
1440 if p >= 0:
1443 return subset & baseset([p])
1441 return subset & baseset([p])
1444 return baseset()
1442 return baseset()
1445
1443
1446 ps = set()
1444 ps = set()
1447 cl = repo.changelog
1445 cl = repo.changelog
1448 for r in getset(repo, fullreposet(repo), x):
1446 for r in getset(repo, fullreposet(repo), x):
1449 try:
1447 try:
1450 ps.add(cl.parentrevs(r)[0])
1448 ps.add(cl.parentrevs(r)[0])
1451 except error.WdirUnsupported:
1449 except error.WdirUnsupported:
1452 ps.add(repo[r].parents()[0].rev())
1450 ps.add(repo[r].parents()[0].rev())
1453 ps -= {node.nullrev}
1451 ps -= {node.nullrev}
1454 # XXX we should turn this into a baseset instead of a set, smartset may do
1452 # XXX we should turn this into a baseset instead of a set, smartset may do
1455 # some optimizations from the fact this is a baseset.
1453 # some optimizations from the fact this is a baseset.
1456 return subset & ps
1454 return subset & ps
1457
1455
1458 @predicate('p2([set])', safe=True)
1456 @predicate('p2([set])', safe=True)
1459 def p2(repo, subset, x):
1457 def p2(repo, subset, x):
1460 """Second parent of changesets in set, or the working directory.
1458 """Second parent of changesets in set, or the working directory.
1461 """
1459 """
1462 if x is None:
1460 if x is None:
1463 ps = repo[x].parents()
1461 ps = repo[x].parents()
1464 try:
1462 try:
1465 p = ps[1].rev()
1463 p = ps[1].rev()
1466 if p >= 0:
1464 if p >= 0:
1467 return subset & baseset([p])
1465 return subset & baseset([p])
1468 return baseset()
1466 return baseset()
1469 except IndexError:
1467 except IndexError:
1470 return baseset()
1468 return baseset()
1471
1469
1472 ps = set()
1470 ps = set()
1473 cl = repo.changelog
1471 cl = repo.changelog
1474 for r in getset(repo, fullreposet(repo), x):
1472 for r in getset(repo, fullreposet(repo), x):
1475 try:
1473 try:
1476 ps.add(cl.parentrevs(r)[1])
1474 ps.add(cl.parentrevs(r)[1])
1477 except error.WdirUnsupported:
1475 except error.WdirUnsupported:
1478 parents = repo[r].parents()
1476 parents = repo[r].parents()
1479 if len(parents) == 2:
1477 if len(parents) == 2:
1480 ps.add(parents[1])
1478 ps.add(parents[1])
1481 ps -= {node.nullrev}
1479 ps -= {node.nullrev}
1482 # XXX we should turn this into a baseset instead of a set, smartset may do
1480 # XXX we should turn this into a baseset instead of a set, smartset may do
1483 # some optimizations from the fact this is a baseset.
1481 # some optimizations from the fact this is a baseset.
1484 return subset & ps
1482 return subset & ps
1485
1483
1486 def parentpost(repo, subset, x, order):
1484 def parentpost(repo, subset, x, order):
1487 return p1(repo, subset, x)
1485 return p1(repo, subset, x)
1488
1486
1489 @predicate('parents([set])', safe=True)
1487 @predicate('parents([set])', safe=True)
1490 def parents(repo, subset, x):
1488 def parents(repo, subset, x):
1491 """
1489 """
1492 The set of all parents for all changesets in set, or the working directory.
1490 The set of all parents for all changesets in set, or the working directory.
1493 """
1491 """
1494 if x is None:
1492 if x is None:
1495 ps = set(p.rev() for p in repo[x].parents())
1493 ps = set(p.rev() for p in repo[x].parents())
1496 else:
1494 else:
1497 ps = set()
1495 ps = set()
1498 cl = repo.changelog
1496 cl = repo.changelog
1499 up = ps.update
1497 up = ps.update
1500 parentrevs = cl.parentrevs
1498 parentrevs = cl.parentrevs
1501 for r in getset(repo, fullreposet(repo), x):
1499 for r in getset(repo, fullreposet(repo), x):
1502 try:
1500 try:
1503 up(parentrevs(r))
1501 up(parentrevs(r))
1504 except error.WdirUnsupported:
1502 except error.WdirUnsupported:
1505 up(p.rev() for p in repo[r].parents())
1503 up(p.rev() for p in repo[r].parents())
1506 ps -= {node.nullrev}
1504 ps -= {node.nullrev}
1507 return subset & ps
1505 return subset & ps
1508
1506
1509 def _phase(repo, subset, *targets):
1507 def _phase(repo, subset, *targets):
1510 """helper to select all rev in <targets> phases"""
1508 """helper to select all rev in <targets> phases"""
1511 s = repo._phasecache.getrevset(repo, targets)
1509 s = repo._phasecache.getrevset(repo, targets)
1512 return subset & s
1510 return subset & s
1513
1511
1514 @predicate('draft()', safe=True)
1512 @predicate('draft()', safe=True)
1515 def draft(repo, subset, x):
1513 def draft(repo, subset, x):
1516 """Changeset in draft phase."""
1514 """Changeset in draft phase."""
1517 # i18n: "draft" is a keyword
1515 # i18n: "draft" is a keyword
1518 getargs(x, 0, 0, _("draft takes no arguments"))
1516 getargs(x, 0, 0, _("draft takes no arguments"))
1519 target = phases.draft
1517 target = phases.draft
1520 return _phase(repo, subset, target)
1518 return _phase(repo, subset, target)
1521
1519
1522 @predicate('secret()', safe=True)
1520 @predicate('secret()', safe=True)
1523 def secret(repo, subset, x):
1521 def secret(repo, subset, x):
1524 """Changeset in secret phase."""
1522 """Changeset in secret phase."""
1525 # i18n: "secret" is a keyword
1523 # i18n: "secret" is a keyword
1526 getargs(x, 0, 0, _("secret takes no arguments"))
1524 getargs(x, 0, 0, _("secret takes no arguments"))
1527 target = phases.secret
1525 target = phases.secret
1528 return _phase(repo, subset, target)
1526 return _phase(repo, subset, target)
1529
1527
1530 def parentspec(repo, subset, x, n, order):
1528 def parentspec(repo, subset, x, n, order):
1531 """``set^0``
1529 """``set^0``
1532 The set.
1530 The set.
1533 ``set^1`` (or ``set^``), ``set^2``
1531 ``set^1`` (or ``set^``), ``set^2``
1534 First or second parent, respectively, of all changesets in set.
1532 First or second parent, respectively, of all changesets in set.
1535 """
1533 """
1536 try:
1534 try:
1537 n = int(n[1])
1535 n = int(n[1])
1538 if n not in (0, 1, 2):
1536 if n not in (0, 1, 2):
1539 raise ValueError
1537 raise ValueError
1540 except (TypeError, ValueError):
1538 except (TypeError, ValueError):
1541 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1539 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1542 ps = set()
1540 ps = set()
1543 cl = repo.changelog
1541 cl = repo.changelog
1544 for r in getset(repo, fullreposet(repo), x):
1542 for r in getset(repo, fullreposet(repo), x):
1545 if n == 0:
1543 if n == 0:
1546 ps.add(r)
1544 ps.add(r)
1547 elif n == 1:
1545 elif n == 1:
1548 try:
1546 try:
1549 ps.add(cl.parentrevs(r)[0])
1547 ps.add(cl.parentrevs(r)[0])
1550 except error.WdirUnsupported:
1548 except error.WdirUnsupported:
1551 ps.add(repo[r].parents()[0].rev())
1549 ps.add(repo[r].parents()[0].rev())
1552 else:
1550 else:
1553 try:
1551 try:
1554 parents = cl.parentrevs(r)
1552 parents = cl.parentrevs(r)
1555 if parents[1] != node.nullrev:
1553 if parents[1] != node.nullrev:
1556 ps.add(parents[1])
1554 ps.add(parents[1])
1557 except error.WdirUnsupported:
1555 except error.WdirUnsupported:
1558 parents = repo[r].parents()
1556 parents = repo[r].parents()
1559 if len(parents) == 2:
1557 if len(parents) == 2:
1560 ps.add(parents[1].rev())
1558 ps.add(parents[1].rev())
1561 return subset & ps
1559 return subset & ps
1562
1560
1563 @predicate('present(set)', safe=True, takeorder=True)
1561 @predicate('present(set)', safe=True, takeorder=True)
1564 def present(repo, subset, x, order):
1562 def present(repo, subset, x, order):
1565 """An empty set, if any revision in set isn't found; otherwise,
1563 """An empty set, if any revision in set isn't found; otherwise,
1566 all revisions in set.
1564 all revisions in set.
1567
1565
1568 If any of specified revisions is not present in the local repository,
1566 If any of specified revisions is not present in the local repository,
1569 the query is normally aborted. But this predicate allows the query
1567 the query is normally aborted. But this predicate allows the query
1570 to continue even in such cases.
1568 to continue even in such cases.
1571 """
1569 """
1572 try:
1570 try:
1573 return getset(repo, subset, x, order)
1571 return getset(repo, subset, x, order)
1574 except error.RepoLookupError:
1572 except error.RepoLookupError:
1575 return baseset()
1573 return baseset()
1576
1574
1577 # for internal use
1575 # for internal use
1578 @predicate('_notpublic', safe=True)
1576 @predicate('_notpublic', safe=True)
1579 def _notpublic(repo, subset, x):
1577 def _notpublic(repo, subset, x):
1580 getargs(x, 0, 0, "_notpublic takes no arguments")
1578 getargs(x, 0, 0, "_notpublic takes no arguments")
1581 return _phase(repo, subset, phases.draft, phases.secret)
1579 return _phase(repo, subset, phases.draft, phases.secret)
1582
1580
1583 # for internal use
1581 # for internal use
1584 @predicate('_phaseandancestors(phasename, set)', safe=True)
1582 @predicate('_phaseandancestors(phasename, set)', safe=True)
1585 def _phaseandancestors(repo, subset, x):
1583 def _phaseandancestors(repo, subset, x):
1586 # equivalent to (phasename() & ancestors(set)) but more efficient
1584 # equivalent to (phasename() & ancestors(set)) but more efficient
1587 # phasename could be one of 'draft', 'secret', or '_notpublic'
1585 # phasename could be one of 'draft', 'secret', or '_notpublic'
1588 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1586 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1589 phasename = getsymbol(args[0])
1587 phasename = getsymbol(args[0])
1590 s = getset(repo, fullreposet(repo), args[1])
1588 s = getset(repo, fullreposet(repo), args[1])
1591
1589
1592 draft = phases.draft
1590 draft = phases.draft
1593 secret = phases.secret
1591 secret = phases.secret
1594 phasenamemap = {
1592 phasenamemap = {
1595 '_notpublic': draft,
1593 '_notpublic': draft,
1596 'draft': draft, # follow secret's ancestors
1594 'draft': draft, # follow secret's ancestors
1597 'secret': secret,
1595 'secret': secret,
1598 }
1596 }
1599 if phasename not in phasenamemap:
1597 if phasename not in phasenamemap:
1600 raise error.ParseError('%r is not a valid phasename' % phasename)
1598 raise error.ParseError('%r is not a valid phasename' % phasename)
1601
1599
1602 minimalphase = phasenamemap[phasename]
1600 minimalphase = phasenamemap[phasename]
1603 getphase = repo._phasecache.phase
1601 getphase = repo._phasecache.phase
1604
1602
1605 def cutfunc(rev):
1603 def cutfunc(rev):
1606 return getphase(repo, rev) < minimalphase
1604 return getphase(repo, rev) < minimalphase
1607
1605
1608 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1606 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1609
1607
1610 if phasename == 'draft': # need to remove secret changesets
1608 if phasename == 'draft': # need to remove secret changesets
1611 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1609 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1612 return subset & revs
1610 return subset & revs
1613
1611
1614 @predicate('public()', safe=True)
1612 @predicate('public()', safe=True)
1615 def public(repo, subset, x):
1613 def public(repo, subset, x):
1616 """Changeset in public phase."""
1614 """Changeset in public phase."""
1617 # i18n: "public" is a keyword
1615 # i18n: "public" is a keyword
1618 getargs(x, 0, 0, _("public takes no arguments"))
1616 getargs(x, 0, 0, _("public takes no arguments"))
1619 phase = repo._phasecache.phase
1617 phase = repo._phasecache.phase
1620 target = phases.public
1618 target = phases.public
1621 condition = lambda r: phase(repo, r) == target
1619 condition = lambda r: phase(repo, r) == target
1622 return subset.filter(condition, condrepr=('<phase %r>', target),
1620 return subset.filter(condition, condrepr=('<phase %r>', target),
1623 cache=False)
1621 cache=False)
1624
1622
1625 @predicate('remote([id [,path]])', safe=False)
1623 @predicate('remote([id [,path]])', safe=False)
1626 def remote(repo, subset, x):
1624 def remote(repo, subset, x):
1627 """Local revision that corresponds to the given identifier in a
1625 """Local revision that corresponds to the given identifier in a
1628 remote repository, if present. Here, the '.' identifier is a
1626 remote repository, if present. Here, the '.' identifier is a
1629 synonym for the current local branch.
1627 synonym for the current local branch.
1630 """
1628 """
1631
1629
1632 from . import hg # avoid start-up nasties
1630 from . import hg # avoid start-up nasties
1633 # i18n: "remote" is a keyword
1631 # i18n: "remote" is a keyword
1634 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1632 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1635
1633
1636 q = '.'
1634 q = '.'
1637 if len(l) > 0:
1635 if len(l) > 0:
1638 # i18n: "remote" is a keyword
1636 # i18n: "remote" is a keyword
1639 q = getstring(l[0], _("remote requires a string id"))
1637 q = getstring(l[0], _("remote requires a string id"))
1640 if q == '.':
1638 if q == '.':
1641 q = repo['.'].branch()
1639 q = repo['.'].branch()
1642
1640
1643 dest = ''
1641 dest = ''
1644 if len(l) > 1:
1642 if len(l) > 1:
1645 # i18n: "remote" is a keyword
1643 # i18n: "remote" is a keyword
1646 dest = getstring(l[1], _("remote requires a repository path"))
1644 dest = getstring(l[1], _("remote requires a repository path"))
1647 dest = repo.ui.expandpath(dest or 'default')
1645 dest = repo.ui.expandpath(dest or 'default')
1648 dest, branches = hg.parseurl(dest)
1646 dest, branches = hg.parseurl(dest)
1649 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1647 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1650 if revs:
1648 if revs:
1651 revs = [repo.lookup(rev) for rev in revs]
1649 revs = [repo.lookup(rev) for rev in revs]
1652 other = hg.peer(repo, {}, dest)
1650 other = hg.peer(repo, {}, dest)
1653 n = other.lookup(q)
1651 n = other.lookup(q)
1654 if n in repo:
1652 if n in repo:
1655 r = repo[n].rev()
1653 r = repo[n].rev()
1656 if r in subset:
1654 if r in subset:
1657 return baseset([r])
1655 return baseset([r])
1658 return baseset()
1656 return baseset()
1659
1657
1660 @predicate('removes(pattern)', safe=True, weight=30)
1658 @predicate('removes(pattern)', safe=True, weight=30)
1661 def removes(repo, subset, x):
1659 def removes(repo, subset, x):
1662 """Changesets which remove files matching pattern.
1660 """Changesets which remove files matching pattern.
1663
1661
1664 The pattern without explicit kind like ``glob:`` is expected to be
1662 The pattern without explicit kind like ``glob:`` is expected to be
1665 relative to the current directory and match against a file or a
1663 relative to the current directory and match against a file or a
1666 directory.
1664 directory.
1667 """
1665 """
1668 # i18n: "removes" is a keyword
1666 # i18n: "removes" is a keyword
1669 pat = getstring(x, _("removes requires a pattern"))
1667 pat = getstring(x, _("removes requires a pattern"))
1670 return checkstatus(repo, subset, pat, 2)
1668 return checkstatus(repo, subset, pat, 2)
1671
1669
1672 @predicate('rev(number)', safe=True)
1670 @predicate('rev(number)', safe=True)
1673 def rev(repo, subset, x):
1671 def rev(repo, subset, x):
1674 """Revision with the given numeric identifier.
1672 """Revision with the given numeric identifier.
1675 """
1673 """
1676 # i18n: "rev" is a keyword
1674 # i18n: "rev" is a keyword
1677 l = getargs(x, 1, 1, _("rev requires one argument"))
1675 l = getargs(x, 1, 1, _("rev requires one argument"))
1678 try:
1676 try:
1679 # i18n: "rev" is a keyword
1677 # i18n: "rev" is a keyword
1680 l = int(getstring(l[0], _("rev requires a number")))
1678 l = int(getstring(l[0], _("rev requires a number")))
1681 except (TypeError, ValueError):
1679 except (TypeError, ValueError):
1682 # i18n: "rev" is a keyword
1680 # i18n: "rev" is a keyword
1683 raise error.ParseError(_("rev expects a number"))
1681 raise error.ParseError(_("rev expects a number"))
1684 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1682 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1685 return baseset()
1683 return baseset()
1686 return subset & baseset([l])
1684 return subset & baseset([l])
1687
1685
1688 @predicate('matching(revision [, field])', safe=True)
1686 @predicate('matching(revision [, field])', safe=True)
1689 def matching(repo, subset, x):
1687 def matching(repo, subset, x):
1690 """Changesets in which a given set of fields match the set of fields in the
1688 """Changesets in which a given set of fields match the set of fields in the
1691 selected revision or set.
1689 selected revision or set.
1692
1690
1693 To match more than one field pass the list of fields to match separated
1691 To match more than one field pass the list of fields to match separated
1694 by spaces (e.g. ``author description``).
1692 by spaces (e.g. ``author description``).
1695
1693
1696 Valid fields are most regular revision fields and some special fields.
1694 Valid fields are most regular revision fields and some special fields.
1697
1695
1698 Regular revision fields are ``description``, ``author``, ``branch``,
1696 Regular revision fields are ``description``, ``author``, ``branch``,
1699 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1697 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1700 and ``diff``.
1698 and ``diff``.
1701 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1699 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1702 contents of the revision. Two revisions matching their ``diff`` will
1700 contents of the revision. Two revisions matching their ``diff`` will
1703 also match their ``files``.
1701 also match their ``files``.
1704
1702
1705 Special fields are ``summary`` and ``metadata``:
1703 Special fields are ``summary`` and ``metadata``:
1706 ``summary`` matches the first line of the description.
1704 ``summary`` matches the first line of the description.
1707 ``metadata`` is equivalent to matching ``description user date``
1705 ``metadata`` is equivalent to matching ``description user date``
1708 (i.e. it matches the main metadata fields).
1706 (i.e. it matches the main metadata fields).
1709
1707
1710 ``metadata`` is the default field which is used when no fields are
1708 ``metadata`` is the default field which is used when no fields are
1711 specified. You can match more than one field at a time.
1709 specified. You can match more than one field at a time.
1712 """
1710 """
1713 # i18n: "matching" is a keyword
1711 # i18n: "matching" is a keyword
1714 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1712 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1715
1713
1716 revs = getset(repo, fullreposet(repo), l[0])
1714 revs = getset(repo, fullreposet(repo), l[0])
1717
1715
1718 fieldlist = ['metadata']
1716 fieldlist = ['metadata']
1719 if len(l) > 1:
1717 if len(l) > 1:
1720 fieldlist = getstring(l[1],
1718 fieldlist = getstring(l[1],
1721 # i18n: "matching" is a keyword
1719 # i18n: "matching" is a keyword
1722 _("matching requires a string "
1720 _("matching requires a string "
1723 "as its second argument")).split()
1721 "as its second argument")).split()
1724
1722
1725 # Make sure that there are no repeated fields,
1723 # Make sure that there are no repeated fields,
1726 # expand the 'special' 'metadata' field type
1724 # expand the 'special' 'metadata' field type
1727 # and check the 'files' whenever we check the 'diff'
1725 # and check the 'files' whenever we check the 'diff'
1728 fields = []
1726 fields = []
1729 for field in fieldlist:
1727 for field in fieldlist:
1730 if field == 'metadata':
1728 if field == 'metadata':
1731 fields += ['user', 'description', 'date']
1729 fields += ['user', 'description', 'date']
1732 elif field == 'diff':
1730 elif field == 'diff':
1733 # a revision matching the diff must also match the files
1731 # a revision matching the diff must also match the files
1734 # since matching the diff is very costly, make sure to
1732 # since matching the diff is very costly, make sure to
1735 # also match the files first
1733 # also match the files first
1736 fields += ['files', 'diff']
1734 fields += ['files', 'diff']
1737 else:
1735 else:
1738 if field == 'author':
1736 if field == 'author':
1739 field = 'user'
1737 field = 'user'
1740 fields.append(field)
1738 fields.append(field)
1741 fields = set(fields)
1739 fields = set(fields)
1742 if 'summary' in fields and 'description' in fields:
1740 if 'summary' in fields and 'description' in fields:
1743 # If a revision matches its description it also matches its summary
1741 # If a revision matches its description it also matches its summary
1744 fields.discard('summary')
1742 fields.discard('summary')
1745
1743
1746 # We may want to match more than one field
1744 # We may want to match more than one field
1747 # Not all fields take the same amount of time to be matched
1745 # Not all fields take the same amount of time to be matched
1748 # Sort the selected fields in order of increasing matching cost
1746 # Sort the selected fields in order of increasing matching cost
1749 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1747 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1750 'files', 'description', 'substate', 'diff']
1748 'files', 'description', 'substate', 'diff']
1751 def fieldkeyfunc(f):
1749 def fieldkeyfunc(f):
1752 try:
1750 try:
1753 return fieldorder.index(f)
1751 return fieldorder.index(f)
1754 except ValueError:
1752 except ValueError:
1755 # assume an unknown field is very costly
1753 # assume an unknown field is very costly
1756 return len(fieldorder)
1754 return len(fieldorder)
1757 fields = list(fields)
1755 fields = list(fields)
1758 fields.sort(key=fieldkeyfunc)
1756 fields.sort(key=fieldkeyfunc)
1759
1757
1760 # Each field will be matched with its own "getfield" function
1758 # Each field will be matched with its own "getfield" function
1761 # which will be added to the getfieldfuncs array of functions
1759 # which will be added to the getfieldfuncs array of functions
1762 getfieldfuncs = []
1760 getfieldfuncs = []
1763 _funcs = {
1761 _funcs = {
1764 'user': lambda r: repo[r].user(),
1762 'user': lambda r: repo[r].user(),
1765 'branch': lambda r: repo[r].branch(),
1763 'branch': lambda r: repo[r].branch(),
1766 'date': lambda r: repo[r].date(),
1764 'date': lambda r: repo[r].date(),
1767 'description': lambda r: repo[r].description(),
1765 'description': lambda r: repo[r].description(),
1768 'files': lambda r: repo[r].files(),
1766 'files': lambda r: repo[r].files(),
1769 'parents': lambda r: repo[r].parents(),
1767 'parents': lambda r: repo[r].parents(),
1770 'phase': lambda r: repo[r].phase(),
1768 'phase': lambda r: repo[r].phase(),
1771 'substate': lambda r: repo[r].substate,
1769 'substate': lambda r: repo[r].substate,
1772 'summary': lambda r: repo[r].description().splitlines()[0],
1770 'summary': lambda r: repo[r].description().splitlines()[0],
1773 'diff': lambda r: list(repo[r].diff(git=True),)
1771 'diff': lambda r: list(repo[r].diff(git=True),)
1774 }
1772 }
1775 for info in fields:
1773 for info in fields:
1776 getfield = _funcs.get(info, None)
1774 getfield = _funcs.get(info, None)
1777 if getfield is None:
1775 if getfield is None:
1778 raise error.ParseError(
1776 raise error.ParseError(
1779 # i18n: "matching" is a keyword
1777 # i18n: "matching" is a keyword
1780 _("unexpected field name passed to matching: %s") % info)
1778 _("unexpected field name passed to matching: %s") % info)
1781 getfieldfuncs.append(getfield)
1779 getfieldfuncs.append(getfield)
1782 # convert the getfield array of functions into a "getinfo" function
1780 # convert the getfield array of functions into a "getinfo" function
1783 # which returns an array of field values (or a single value if there
1781 # which returns an array of field values (or a single value if there
1784 # is only one field to match)
1782 # is only one field to match)
1785 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1783 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1786
1784
1787 def matches(x):
1785 def matches(x):
1788 for rev in revs:
1786 for rev in revs:
1789 target = getinfo(rev)
1787 target = getinfo(rev)
1790 match = True
1788 match = True
1791 for n, f in enumerate(getfieldfuncs):
1789 for n, f in enumerate(getfieldfuncs):
1792 if target[n] != f(x):
1790 if target[n] != f(x):
1793 match = False
1791 match = False
1794 if match:
1792 if match:
1795 return True
1793 return True
1796 return False
1794 return False
1797
1795
1798 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1796 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1799
1797
1800 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1798 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1801 def reverse(repo, subset, x, order):
1799 def reverse(repo, subset, x, order):
1802 """Reverse order of set.
1800 """Reverse order of set.
1803 """
1801 """
1804 l = getset(repo, subset, x, order)
1802 l = getset(repo, subset, x, order)
1805 if order == defineorder:
1803 if order == defineorder:
1806 l.reverse()
1804 l.reverse()
1807 return l
1805 return l
1808
1806
1809 @predicate('roots(set)', safe=True)
1807 @predicate('roots(set)', safe=True)
1810 def roots(repo, subset, x):
1808 def roots(repo, subset, x):
1811 """Changesets in set with no parent changeset in set.
1809 """Changesets in set with no parent changeset in set.
1812 """
1810 """
1813 s = getset(repo, fullreposet(repo), x)
1811 s = getset(repo, fullreposet(repo), x)
1814 parents = repo.changelog.parentrevs
1812 parents = repo.changelog.parentrevs
1815 def filter(r):
1813 def filter(r):
1816 for p in parents(r):
1814 for p in parents(r):
1817 if 0 <= p and p in s:
1815 if 0 <= p and p in s:
1818 return False
1816 return False
1819 return True
1817 return True
1820 return subset & s.filter(filter, condrepr='<roots>')
1818 return subset & s.filter(filter, condrepr='<roots>')
1821
1819
1822 _sortkeyfuncs = {
1820 _sortkeyfuncs = {
1823 'rev': lambda c: c.rev(),
1821 'rev': lambda c: c.rev(),
1824 'branch': lambda c: c.branch(),
1822 'branch': lambda c: c.branch(),
1825 'desc': lambda c: c.description(),
1823 'desc': lambda c: c.description(),
1826 'user': lambda c: c.user(),
1824 'user': lambda c: c.user(),
1827 'author': lambda c: c.user(),
1825 'author': lambda c: c.user(),
1828 'date': lambda c: c.date()[0],
1826 'date': lambda c: c.date()[0],
1829 }
1827 }
1830
1828
1831 def _getsortargs(x):
1829 def _getsortargs(x):
1832 """Parse sort options into (set, [(key, reverse)], opts)"""
1830 """Parse sort options into (set, [(key, reverse)], opts)"""
1833 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1831 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1834 if 'set' not in args:
1832 if 'set' not in args:
1835 # i18n: "sort" is a keyword
1833 # i18n: "sort" is a keyword
1836 raise error.ParseError(_('sort requires one or two arguments'))
1834 raise error.ParseError(_('sort requires one or two arguments'))
1837 keys = "rev"
1835 keys = "rev"
1838 if 'keys' in args:
1836 if 'keys' in args:
1839 # i18n: "sort" is a keyword
1837 # i18n: "sort" is a keyword
1840 keys = getstring(args['keys'], _("sort spec must be a string"))
1838 keys = getstring(args['keys'], _("sort spec must be a string"))
1841
1839
1842 keyflags = []
1840 keyflags = []
1843 for k in keys.split():
1841 for k in keys.split():
1844 fk = k
1842 fk = k
1845 reverse = (k[0] == '-')
1843 reverse = (k[0] == '-')
1846 if reverse:
1844 if reverse:
1847 k = k[1:]
1845 k = k[1:]
1848 if k not in _sortkeyfuncs and k != 'topo':
1846 if k not in _sortkeyfuncs and k != 'topo':
1849 raise error.ParseError(_("unknown sort key %r") % fk)
1847 raise error.ParseError(_("unknown sort key %r") % fk)
1850 keyflags.append((k, reverse))
1848 keyflags.append((k, reverse))
1851
1849
1852 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1850 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1853 # i18n: "topo" is a keyword
1851 # i18n: "topo" is a keyword
1854 raise error.ParseError(_('topo sort order cannot be combined '
1852 raise error.ParseError(_('topo sort order cannot be combined '
1855 'with other sort keys'))
1853 'with other sort keys'))
1856
1854
1857 opts = {}
1855 opts = {}
1858 if 'topo.firstbranch' in args:
1856 if 'topo.firstbranch' in args:
1859 if any(k == 'topo' for k, reverse in keyflags):
1857 if any(k == 'topo' for k, reverse in keyflags):
1860 opts['topo.firstbranch'] = args['topo.firstbranch']
1858 opts['topo.firstbranch'] = args['topo.firstbranch']
1861 else:
1859 else:
1862 # i18n: "topo" and "topo.firstbranch" are keywords
1860 # i18n: "topo" and "topo.firstbranch" are keywords
1863 raise error.ParseError(_('topo.firstbranch can only be used '
1861 raise error.ParseError(_('topo.firstbranch can only be used '
1864 'when using the topo sort key'))
1862 'when using the topo sort key'))
1865
1863
1866 return args['set'], keyflags, opts
1864 return args['set'], keyflags, opts
1867
1865
1868 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
1866 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
1869 weight=10)
1867 weight=10)
1870 def sort(repo, subset, x, order):
1868 def sort(repo, subset, x, order):
1871 """Sort set by keys. The default sort order is ascending, specify a key
1869 """Sort set by keys. The default sort order is ascending, specify a key
1872 as ``-key`` to sort in descending order.
1870 as ``-key`` to sort in descending order.
1873
1871
1874 The keys can be:
1872 The keys can be:
1875
1873
1876 - ``rev`` for the revision number,
1874 - ``rev`` for the revision number,
1877 - ``branch`` for the branch name,
1875 - ``branch`` for the branch name,
1878 - ``desc`` for the commit message (description),
1876 - ``desc`` for the commit message (description),
1879 - ``user`` for user name (``author`` can be used as an alias),
1877 - ``user`` for user name (``author`` can be used as an alias),
1880 - ``date`` for the commit date
1878 - ``date`` for the commit date
1881 - ``topo`` for a reverse topographical sort
1879 - ``topo`` for a reverse topographical sort
1882
1880
1883 The ``topo`` sort order cannot be combined with other sort keys. This sort
1881 The ``topo`` sort order cannot be combined with other sort keys. This sort
1884 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1882 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1885 specifies what topographical branches to prioritize in the sort.
1883 specifies what topographical branches to prioritize in the sort.
1886
1884
1887 """
1885 """
1888 s, keyflags, opts = _getsortargs(x)
1886 s, keyflags, opts = _getsortargs(x)
1889 revs = getset(repo, subset, s, order)
1887 revs = getset(repo, subset, s, order)
1890
1888
1891 if not keyflags or order != defineorder:
1889 if not keyflags or order != defineorder:
1892 return revs
1890 return revs
1893 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1891 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1894 revs.sort(reverse=keyflags[0][1])
1892 revs.sort(reverse=keyflags[0][1])
1895 return revs
1893 return revs
1896 elif keyflags[0][0] == "topo":
1894 elif keyflags[0][0] == "topo":
1897 firstbranch = ()
1895 firstbranch = ()
1898 if 'topo.firstbranch' in opts:
1896 if 'topo.firstbranch' in opts:
1899 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1897 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1900 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1898 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1901 firstbranch),
1899 firstbranch),
1902 istopo=True)
1900 istopo=True)
1903 if keyflags[0][1]:
1901 if keyflags[0][1]:
1904 revs.reverse()
1902 revs.reverse()
1905 return revs
1903 return revs
1906
1904
1907 # sort() is guaranteed to be stable
1905 # sort() is guaranteed to be stable
1908 ctxs = [repo[r] for r in revs]
1906 ctxs = [repo[r] for r in revs]
1909 for k, reverse in reversed(keyflags):
1907 for k, reverse in reversed(keyflags):
1910 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1908 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1911 return baseset([c.rev() for c in ctxs])
1909 return baseset([c.rev() for c in ctxs])
1912
1910
1913 @predicate('subrepo([pattern])')
1911 @predicate('subrepo([pattern])')
1914 def subrepo(repo, subset, x):
1912 def subrepo(repo, subset, x):
1915 """Changesets that add, modify or remove the given subrepo. If no subrepo
1913 """Changesets that add, modify or remove the given subrepo. If no subrepo
1916 pattern is named, any subrepo changes are returned.
1914 pattern is named, any subrepo changes are returned.
1917 """
1915 """
1918 # i18n: "subrepo" is a keyword
1916 # i18n: "subrepo" is a keyword
1919 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1917 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1920 pat = None
1918 pat = None
1921 if len(args) != 0:
1919 if len(args) != 0:
1922 pat = getstring(args[0], _("subrepo requires a pattern"))
1920 pat = getstring(args[0], _("subrepo requires a pattern"))
1923
1921
1924 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1922 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1925
1923
1926 def submatches(names):
1924 def submatches(names):
1927 k, p, m = util.stringmatcher(pat)
1925 k, p, m = util.stringmatcher(pat)
1928 for name in names:
1926 for name in names:
1929 if m(name):
1927 if m(name):
1930 yield name
1928 yield name
1931
1929
1932 def matches(x):
1930 def matches(x):
1933 c = repo[x]
1931 c = repo[x]
1934 s = repo.status(c.p1().node(), c.node(), match=m)
1932 s = repo.status(c.p1().node(), c.node(), match=m)
1935
1933
1936 if pat is None:
1934 if pat is None:
1937 return s.added or s.modified or s.removed
1935 return s.added or s.modified or s.removed
1938
1936
1939 if s.added:
1937 if s.added:
1940 return any(submatches(c.substate.keys()))
1938 return any(submatches(c.substate.keys()))
1941
1939
1942 if s.modified:
1940 if s.modified:
1943 subs = set(c.p1().substate.keys())
1941 subs = set(c.p1().substate.keys())
1944 subs.update(c.substate.keys())
1942 subs.update(c.substate.keys())
1945
1943
1946 for path in submatches(subs):
1944 for path in submatches(subs):
1947 if c.p1().substate.get(path) != c.substate.get(path):
1945 if c.p1().substate.get(path) != c.substate.get(path):
1948 return True
1946 return True
1949
1947
1950 if s.removed:
1948 if s.removed:
1951 return any(submatches(c.p1().substate.keys()))
1949 return any(submatches(c.p1().substate.keys()))
1952
1950
1953 return False
1951 return False
1954
1952
1955 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1953 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1956
1954
1957 def _mapbynodefunc(repo, s, f):
1955 def _mapbynodefunc(repo, s, f):
1958 """(repo, smartset, [node] -> [node]) -> smartset
1956 """(repo, smartset, [node] -> [node]) -> smartset
1959
1957
1960 Helper method to map a smartset to another smartset given a function only
1958 Helper method to map a smartset to another smartset given a function only
1961 talking about nodes. Handles converting between rev numbers and nodes, and
1959 talking about nodes. Handles converting between rev numbers and nodes, and
1962 filtering.
1960 filtering.
1963 """
1961 """
1964 cl = repo.unfiltered().changelog
1962 cl = repo.unfiltered().changelog
1965 torev = cl.rev
1963 torev = cl.rev
1966 tonode = cl.node
1964 tonode = cl.node
1967 nodemap = cl.nodemap
1965 nodemap = cl.nodemap
1968 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
1966 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
1969 return smartset.baseset(result - repo.changelog.filteredrevs)
1967 return smartset.baseset(result - repo.changelog.filteredrevs)
1970
1968
1971 @predicate('successors(set)', safe=True)
1969 @predicate('successors(set)', safe=True)
1972 def successors(repo, subset, x):
1970 def successors(repo, subset, x):
1973 """All successors for set, including the given set themselves"""
1971 """All successors for set, including the given set themselves"""
1974 s = getset(repo, fullreposet(repo), x)
1972 s = getset(repo, fullreposet(repo), x)
1975 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
1973 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
1976 d = _mapbynodefunc(repo, s, f)
1974 d = _mapbynodefunc(repo, s, f)
1977 return subset & d
1975 return subset & d
1978
1976
1979 def _substringmatcher(pattern, casesensitive=True):
1977 def _substringmatcher(pattern, casesensitive=True):
1980 kind, pattern, matcher = util.stringmatcher(pattern,
1978 kind, pattern, matcher = util.stringmatcher(pattern,
1981 casesensitive=casesensitive)
1979 casesensitive=casesensitive)
1982 if kind == 'literal':
1980 if kind == 'literal':
1983 if not casesensitive:
1981 if not casesensitive:
1984 pattern = encoding.lower(pattern)
1982 pattern = encoding.lower(pattern)
1985 matcher = lambda s: pattern in encoding.lower(s)
1983 matcher = lambda s: pattern in encoding.lower(s)
1986 else:
1984 else:
1987 matcher = lambda s: pattern in s
1985 matcher = lambda s: pattern in s
1988 return kind, pattern, matcher
1986 return kind, pattern, matcher
1989
1987
1990 @predicate('tag([name])', safe=True)
1988 @predicate('tag([name])', safe=True)
1991 def tag(repo, subset, x):
1989 def tag(repo, subset, x):
1992 """The specified tag by name, or all tagged revisions if no name is given.
1990 """The specified tag by name, or all tagged revisions if no name is given.
1993
1991
1994 Pattern matching is supported for `name`. See
1992 Pattern matching is supported for `name`. See
1995 :hg:`help revisions.patterns`.
1993 :hg:`help revisions.patterns`.
1996 """
1994 """
1997 # i18n: "tag" is a keyword
1995 # i18n: "tag" is a keyword
1998 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1996 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1999 cl = repo.changelog
1997 cl = repo.changelog
2000 if args:
1998 if args:
2001 pattern = getstring(args[0],
1999 pattern = getstring(args[0],
2002 # i18n: "tag" is a keyword
2000 # i18n: "tag" is a keyword
2003 _('the argument to tag must be a string'))
2001 _('the argument to tag must be a string'))
2004 kind, pattern, matcher = util.stringmatcher(pattern)
2002 kind, pattern, matcher = util.stringmatcher(pattern)
2005 if kind == 'literal':
2003 if kind == 'literal':
2006 # avoid resolving all tags
2004 # avoid resolving all tags
2007 tn = repo._tagscache.tags.get(pattern, None)
2005 tn = repo._tagscache.tags.get(pattern, None)
2008 if tn is None:
2006 if tn is None:
2009 raise error.RepoLookupError(_("tag '%s' does not exist")
2007 raise error.RepoLookupError(_("tag '%s' does not exist")
2010 % pattern)
2008 % pattern)
2011 s = {repo[tn].rev()}
2009 s = {repo[tn].rev()}
2012 else:
2010 else:
2013 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2011 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2014 else:
2012 else:
2015 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2013 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2016 return subset & s
2014 return subset & s
2017
2015
2018 @predicate('tagged', safe=True)
2016 @predicate('tagged', safe=True)
2019 def tagged(repo, subset, x):
2017 def tagged(repo, subset, x):
2020 return tag(repo, subset, x)
2018 return tag(repo, subset, x)
2021
2019
2022 @predicate('unstable()', safe=True)
2020 @predicate('unstable()', safe=True)
2023 def unstable(repo, subset, x):
2021 def unstable(repo, subset, x):
2024 msg = ("'unstable()' is deprecated, "
2022 msg = ("'unstable()' is deprecated, "
2025 "use 'orphan()'")
2023 "use 'orphan()'")
2026 repo.ui.deprecwarn(msg, '4.4')
2024 repo.ui.deprecwarn(msg, '4.4')
2027
2025
2028 return orphan(repo, subset, x)
2026 return orphan(repo, subset, x)
2029
2027
2030 @predicate('orphan()', safe=True)
2028 @predicate('orphan()', safe=True)
2031 def orphan(repo, subset, x):
2029 def orphan(repo, subset, x):
2032 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2030 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2033 """
2031 """
2034 # i18n: "orphan" is a keyword
2032 # i18n: "orphan" is a keyword
2035 getargs(x, 0, 0, _("orphan takes no arguments"))
2033 getargs(x, 0, 0, _("orphan takes no arguments"))
2036 orphan = obsmod.getrevs(repo, 'orphan')
2034 orphan = obsmod.getrevs(repo, 'orphan')
2037 return subset & orphan
2035 return subset & orphan
2038
2036
2039
2037
2040 @predicate('user(string)', safe=True, weight=10)
2038 @predicate('user(string)', safe=True, weight=10)
2041 def user(repo, subset, x):
2039 def user(repo, subset, x):
2042 """User name contains string. The match is case-insensitive.
2040 """User name contains string. The match is case-insensitive.
2043
2041
2044 Pattern matching is supported for `string`. See
2042 Pattern matching is supported for `string`. See
2045 :hg:`help revisions.patterns`.
2043 :hg:`help revisions.patterns`.
2046 """
2044 """
2047 return author(repo, subset, x)
2045 return author(repo, subset, x)
2048
2046
2049 @predicate('wdir()', safe=True, weight=0)
2047 @predicate('wdir()', safe=True, weight=0)
2050 def wdir(repo, subset, x):
2048 def wdir(repo, subset, x):
2051 """Working directory. (EXPERIMENTAL)"""
2049 """Working directory. (EXPERIMENTAL)"""
2052 # i18n: "wdir" is a keyword
2050 # i18n: "wdir" is a keyword
2053 getargs(x, 0, 0, _("wdir takes no arguments"))
2051 getargs(x, 0, 0, _("wdir takes no arguments"))
2054 if node.wdirrev in subset or isinstance(subset, fullreposet):
2052 if node.wdirrev in subset or isinstance(subset, fullreposet):
2055 return baseset([node.wdirrev])
2053 return baseset([node.wdirrev])
2056 return baseset()
2054 return baseset()
2057
2055
2058 def _orderedlist(repo, subset, x):
2056 def _orderedlist(repo, subset, x):
2059 s = getstring(x, "internal error")
2057 s = getstring(x, "internal error")
2060 if not s:
2058 if not s:
2061 return baseset()
2059 return baseset()
2062 # remove duplicates here. it's difficult for caller to deduplicate sets
2060 # remove duplicates here. it's difficult for caller to deduplicate sets
2063 # because different symbols can point to the same rev.
2061 # because different symbols can point to the same rev.
2064 cl = repo.changelog
2062 cl = repo.changelog
2065 ls = []
2063 ls = []
2066 seen = set()
2064 seen = set()
2067 for t in s.split('\0'):
2065 for t in s.split('\0'):
2068 try:
2066 try:
2069 # fast path for integer revision
2067 # fast path for integer revision
2070 r = int(t)
2068 r = int(t)
2071 if str(r) != t or r not in cl:
2069 if str(r) != t or r not in cl:
2072 raise ValueError
2070 raise ValueError
2073 revs = [r]
2071 revs = [r]
2074 except ValueError:
2072 except ValueError:
2075 revs = stringset(repo, subset, t, defineorder)
2073 revs = stringset(repo, subset, t, defineorder)
2076
2074
2077 for r in revs:
2075 for r in revs:
2078 if r in seen:
2076 if r in seen:
2079 continue
2077 continue
2080 if (r in subset
2078 if (r in subset
2081 or r == node.nullrev and isinstance(subset, fullreposet)):
2079 or r == node.nullrev and isinstance(subset, fullreposet)):
2082 ls.append(r)
2080 ls.append(r)
2083 seen.add(r)
2081 seen.add(r)
2084 return baseset(ls)
2082 return baseset(ls)
2085
2083
2086 # for internal use
2084 # for internal use
2087 @predicate('_list', safe=True, takeorder=True)
2085 @predicate('_list', safe=True, takeorder=True)
2088 def _list(repo, subset, x, order):
2086 def _list(repo, subset, x, order):
2089 if order == followorder:
2087 if order == followorder:
2090 # slow path to take the subset order
2088 # slow path to take the subset order
2091 return subset & _orderedlist(repo, fullreposet(repo), x)
2089 return subset & _orderedlist(repo, fullreposet(repo), x)
2092 else:
2090 else:
2093 return _orderedlist(repo, subset, x)
2091 return _orderedlist(repo, subset, x)
2094
2092
2095 def _orderedintlist(repo, subset, x):
2093 def _orderedintlist(repo, subset, x):
2096 s = getstring(x, "internal error")
2094 s = getstring(x, "internal error")
2097 if not s:
2095 if not s:
2098 return baseset()
2096 return baseset()
2099 ls = [int(r) for r in s.split('\0')]
2097 ls = [int(r) for r in s.split('\0')]
2100 s = subset
2098 s = subset
2101 return baseset([r for r in ls if r in s])
2099 return baseset([r for r in ls if r in s])
2102
2100
2103 # for internal use
2101 # for internal use
2104 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2102 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2105 def _intlist(repo, subset, x, order):
2103 def _intlist(repo, subset, x, order):
2106 if order == followorder:
2104 if order == followorder:
2107 # slow path to take the subset order
2105 # slow path to take the subset order
2108 return subset & _orderedintlist(repo, fullreposet(repo), x)
2106 return subset & _orderedintlist(repo, fullreposet(repo), x)
2109 else:
2107 else:
2110 return _orderedintlist(repo, subset, x)
2108 return _orderedintlist(repo, subset, x)
2111
2109
2112 def _orderedhexlist(repo, subset, x):
2110 def _orderedhexlist(repo, subset, x):
2113 s = getstring(x, "internal error")
2111 s = getstring(x, "internal error")
2114 if not s:
2112 if not s:
2115 return baseset()
2113 return baseset()
2116 cl = repo.changelog
2114 cl = repo.changelog
2117 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2115 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2118 s = subset
2116 s = subset
2119 return baseset([r for r in ls if r in s])
2117 return baseset([r for r in ls if r in s])
2120
2118
2121 # for internal use
2119 # for internal use
2122 @predicate('_hexlist', safe=True, takeorder=True)
2120 @predicate('_hexlist', safe=True, takeorder=True)
2123 def _hexlist(repo, subset, x, order):
2121 def _hexlist(repo, subset, x, order):
2124 if order == followorder:
2122 if order == followorder:
2125 # slow path to take the subset order
2123 # slow path to take the subset order
2126 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2124 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2127 else:
2125 else:
2128 return _orderedhexlist(repo, subset, x)
2126 return _orderedhexlist(repo, subset, x)
2129
2127
2130 methods = {
2128 methods = {
2131 "range": rangeset,
2129 "range": rangeset,
2132 "rangeall": rangeall,
2130 "rangeall": rangeall,
2133 "rangepre": rangepre,
2131 "rangepre": rangepre,
2134 "rangepost": rangepost,
2132 "rangepost": rangepost,
2135 "dagrange": dagrange,
2133 "dagrange": dagrange,
2136 "string": stringset,
2134 "string": stringset,
2137 "symbol": stringset,
2135 "symbol": stringset,
2138 "and": andset,
2136 "and": andset,
2139 "andsmally": andsmallyset,
2137 "andsmally": andsmallyset,
2140 "or": orset,
2138 "or": orset,
2141 "not": notset,
2139 "not": notset,
2142 "difference": differenceset,
2140 "difference": differenceset,
2143 "relation": relationset,
2141 "relation": relationset,
2144 "relsubscript": relsubscriptset,
2142 "relsubscript": relsubscriptset,
2145 "subscript": subscriptset,
2143 "subscript": subscriptset,
2146 "list": listset,
2144 "list": listset,
2147 "keyvalue": keyvaluepair,
2145 "keyvalue": keyvaluepair,
2148 "func": func,
2146 "func": func,
2149 "ancestor": ancestorspec,
2147 "ancestor": ancestorspec,
2150 "parent": parentspec,
2148 "parent": parentspec,
2151 "parentpost": parentpost,
2149 "parentpost": parentpost,
2152 }
2150 }
2153
2151
2154 def posttreebuilthook(tree, repo):
2152 def posttreebuilthook(tree, repo):
2155 # hook for extensions to execute code on the optimized tree
2153 # hook for extensions to execute code on the optimized tree
2156 pass
2154 pass
2157
2155
2158 def match(ui, spec, repo=None):
2156 def match(ui, spec, repo=None):
2159 """Create a matcher for a single revision spec"""
2157 """Create a matcher for a single revision spec"""
2160 return matchany(ui, [spec], repo=repo)
2158 return matchany(ui, [spec], repo=repo)
2161
2159
2162 def matchany(ui, specs, repo=None, localalias=None):
2160 def matchany(ui, specs, repo=None, localalias=None):
2163 """Create a matcher that will include any revisions matching one of the
2161 """Create a matcher that will include any revisions matching one of the
2164 given specs
2162 given specs
2165
2163
2166 If localalias is not None, it is a dict {name: definitionstring}. It takes
2164 If localalias is not None, it is a dict {name: definitionstring}. It takes
2167 precedence over [revsetalias] config section.
2165 precedence over [revsetalias] config section.
2168 """
2166 """
2169 if not specs:
2167 if not specs:
2170 def mfunc(repo, subset=None):
2168 def mfunc(repo, subset=None):
2171 return baseset()
2169 return baseset()
2172 return mfunc
2170 return mfunc
2173 if not all(specs):
2171 if not all(specs):
2174 raise error.ParseError(_("empty query"))
2172 raise error.ParseError(_("empty query"))
2175 lookup = None
2173 lookup = None
2176 if repo:
2174 if repo:
2177 lookup = repo.__contains__
2175 lookup = repo.__contains__
2178 if len(specs) == 1:
2176 if len(specs) == 1:
2179 tree = revsetlang.parse(specs[0], lookup)
2177 tree = revsetlang.parse(specs[0], lookup)
2180 else:
2178 else:
2181 tree = ('or',
2179 tree = ('or',
2182 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2180 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2183
2181
2184 aliases = []
2182 aliases = []
2185 warn = None
2183 warn = None
2186 if ui:
2184 if ui:
2187 aliases.extend(ui.configitems('revsetalias'))
2185 aliases.extend(ui.configitems('revsetalias'))
2188 warn = ui.warn
2186 warn = ui.warn
2189 if localalias:
2187 if localalias:
2190 aliases.extend(localalias.items())
2188 aliases.extend(localalias.items())
2191 if aliases:
2189 if aliases:
2192 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2190 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2193 tree = revsetlang.foldconcat(tree)
2191 tree = revsetlang.foldconcat(tree)
2194 tree = revsetlang.analyze(tree)
2192 tree = revsetlang.analyze(tree)
2195 tree = revsetlang.optimize(tree)
2193 tree = revsetlang.optimize(tree)
2196 posttreebuilthook(tree, repo)
2194 posttreebuilthook(tree, repo)
2197 return makematcher(tree)
2195 return makematcher(tree)
2198
2196
2199 def makematcher(tree):
2197 def makematcher(tree):
2200 """Create a matcher from an evaluatable tree"""
2198 """Create a matcher from an evaluatable tree"""
2201 def mfunc(repo, subset=None, order=None):
2199 def mfunc(repo, subset=None, order=None):
2202 if order is None:
2200 if order is None:
2203 if subset is None:
2201 if subset is None:
2204 order = defineorder # 'x'
2202 order = defineorder # 'x'
2205 else:
2203 else:
2206 order = followorder # 'subset & x'
2204 order = followorder # 'subset & x'
2207 if subset is None:
2205 if subset is None:
2208 subset = fullreposet(repo)
2206 subset = fullreposet(repo)
2209 return getset(repo, subset, tree, order)
2207 return getset(repo, subset, tree, order)
2210 return mfunc
2208 return mfunc
2211
2209
2212 def loadpredicate(ui, extname, registrarobj):
2210 def loadpredicate(ui, extname, registrarobj):
2213 """Load revset predicates from specified registrarobj
2211 """Load revset predicates from specified registrarobj
2214 """
2212 """
2215 for name, func in registrarobj._table.iteritems():
2213 for name, func in registrarobj._table.iteritems():
2216 symbols[name] = func
2214 symbols[name] = func
2217 if func._safe:
2215 if func._safe:
2218 safesymbols.add(name)
2216 safesymbols.add(name)
2219
2217
2220 # load built-in predicates explicitly to setup safesymbols
2218 # load built-in predicates explicitly to setup safesymbols
2221 loadpredicate(None, None, predicate)
2219 loadpredicate(None, None, predicate)
2222
2220
2223 # tell hggettext to extract docstrings from these functions:
2221 # tell hggettext to extract docstrings from these functions:
2224 i18nfunctions = symbols.values()
2222 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now