##// END OF EJS Templates
revset: add 'takeorder' attribute to mark functions that need ordering flag...
Yuya Nishihara -
r29933:91a95ad9 default
parent child Browse files
Show More
@@ -1,244 +1,248 b''
1 # registrar.py - utilities to register function for specific purpose
1 # registrar.py - utilities to register function for specific purpose
2 #
2 #
3 # Copyright FUJIWARA Katsunori <foozy@lares.dti.ne.jp> and others
3 # Copyright FUJIWARA Katsunori <foozy@lares.dti.ne.jp> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 util,
11 util,
12 )
12 )
13
13
14 class _funcregistrarbase(object):
14 class _funcregistrarbase(object):
15 """Base of decorator to register a fuction for specific purpose
15 """Base of decorator to register a fuction for specific purpose
16
16
17 This decorator stores decorated functions into own dict 'table'.
17 This decorator stores decorated functions into own dict 'table'.
18
18
19 The least derived class can be defined by overriding 'formatdoc',
19 The least derived class can be defined by overriding 'formatdoc',
20 for example::
20 for example::
21
21
22 class keyword(_funcregistrarbase):
22 class keyword(_funcregistrarbase):
23 _docformat = ":%s: %s"
23 _docformat = ":%s: %s"
24
24
25 This should be used as below:
25 This should be used as below:
26
26
27 keyword = registrar.keyword()
27 keyword = registrar.keyword()
28
28
29 @keyword('bar')
29 @keyword('bar')
30 def barfunc(*args, **kwargs):
30 def barfunc(*args, **kwargs):
31 '''Explanation of bar keyword ....
31 '''Explanation of bar keyword ....
32 '''
32 '''
33 pass
33 pass
34
34
35 In this case:
35 In this case:
36
36
37 - 'barfunc' is stored as 'bar' in '_table' of an instance 'keyword' above
37 - 'barfunc' is stored as 'bar' in '_table' of an instance 'keyword' above
38 - 'barfunc.__doc__' becomes ":bar: Explanation of bar keyword"
38 - 'barfunc.__doc__' becomes ":bar: Explanation of bar keyword"
39 """
39 """
40 def __init__(self, table=None):
40 def __init__(self, table=None):
41 if table is None:
41 if table is None:
42 self._table = {}
42 self._table = {}
43 else:
43 else:
44 self._table = table
44 self._table = table
45
45
46 def __call__(self, decl, *args, **kwargs):
46 def __call__(self, decl, *args, **kwargs):
47 return lambda func: self._doregister(func, decl, *args, **kwargs)
47 return lambda func: self._doregister(func, decl, *args, **kwargs)
48
48
49 def _doregister(self, func, decl, *args, **kwargs):
49 def _doregister(self, func, decl, *args, **kwargs):
50 name = self._getname(decl)
50 name = self._getname(decl)
51
51
52 if func.__doc__ and not util.safehasattr(func, '_origdoc'):
52 if func.__doc__ and not util.safehasattr(func, '_origdoc'):
53 doc = func.__doc__.strip()
53 doc = func.__doc__.strip()
54 func._origdoc = doc
54 func._origdoc = doc
55 func.__doc__ = self._formatdoc(decl, doc)
55 func.__doc__ = self._formatdoc(decl, doc)
56
56
57 self._table[name] = func
57 self._table[name] = func
58 self._extrasetup(name, func, *args, **kwargs)
58 self._extrasetup(name, func, *args, **kwargs)
59
59
60 return func
60 return func
61
61
62 def _parsefuncdecl(self, decl):
62 def _parsefuncdecl(self, decl):
63 """Parse function declaration and return the name of function in it
63 """Parse function declaration and return the name of function in it
64 """
64 """
65 i = decl.find('(')
65 i = decl.find('(')
66 if i >= 0:
66 if i >= 0:
67 return decl[:i]
67 return decl[:i]
68 else:
68 else:
69 return decl
69 return decl
70
70
71 def _getname(self, decl):
71 def _getname(self, decl):
72 """Return the name of the registered function from decl
72 """Return the name of the registered function from decl
73
73
74 Derived class should override this, if it allows more
74 Derived class should override this, if it allows more
75 descriptive 'decl' string than just a name.
75 descriptive 'decl' string than just a name.
76 """
76 """
77 return decl
77 return decl
78
78
79 _docformat = None
79 _docformat = None
80
80
81 def _formatdoc(self, decl, doc):
81 def _formatdoc(self, decl, doc):
82 """Return formatted document of the registered function for help
82 """Return formatted document of the registered function for help
83
83
84 'doc' is '__doc__.strip()' of the registered function.
84 'doc' is '__doc__.strip()' of the registered function.
85 """
85 """
86 return self._docformat % (decl, doc)
86 return self._docformat % (decl, doc)
87
87
88 def _extrasetup(self, name, func):
88 def _extrasetup(self, name, func):
89 """Execute exra setup for registered function, if needed
89 """Execute exra setup for registered function, if needed
90 """
90 """
91 pass
91 pass
92
92
93 class revsetpredicate(_funcregistrarbase):
93 class revsetpredicate(_funcregistrarbase):
94 """Decorator to register revset predicate
94 """Decorator to register revset predicate
95
95
96 Usage::
96 Usage::
97
97
98 revsetpredicate = registrar.revsetpredicate()
98 revsetpredicate = registrar.revsetpredicate()
99
99
100 @revsetpredicate('mypredicate(arg1, arg2[, arg3])')
100 @revsetpredicate('mypredicate(arg1, arg2[, arg3])')
101 def mypredicatefunc(repo, subset, x):
101 def mypredicatefunc(repo, subset, x):
102 '''Explanation of this revset predicate ....
102 '''Explanation of this revset predicate ....
103 '''
103 '''
104 pass
104 pass
105
105
106 The first string argument is used also in online help.
106 The first string argument is used also in online help.
107
107
108 Optional argument 'safe' indicates whether a predicate is safe for
108 Optional argument 'safe' indicates whether a predicate is safe for
109 DoS attack (False by default).
109 DoS attack (False by default).
110
110
111 Optional argument 'takeorder' indicates whether a predicate function
112 takes ordering policy as the last argument.
113
111 'revsetpredicate' instance in example above can be used to
114 'revsetpredicate' instance in example above can be used to
112 decorate multiple functions.
115 decorate multiple functions.
113
116
114 Decorated functions are registered automatically at loading
117 Decorated functions are registered automatically at loading
115 extension, if an instance named as 'revsetpredicate' is used for
118 extension, if an instance named as 'revsetpredicate' is used for
116 decorating in extension.
119 decorating in extension.
117
120
118 Otherwise, explicit 'revset.loadpredicate()' is needed.
121 Otherwise, explicit 'revset.loadpredicate()' is needed.
119 """
122 """
120 _getname = _funcregistrarbase._parsefuncdecl
123 _getname = _funcregistrarbase._parsefuncdecl
121 _docformat = "``%s``\n %s"
124 _docformat = "``%s``\n %s"
122
125
123 def _extrasetup(self, name, func, safe=False):
126 def _extrasetup(self, name, func, safe=False, takeorder=False):
124 func._safe = safe
127 func._safe = safe
128 func._takeorder = takeorder
125
129
126 class filesetpredicate(_funcregistrarbase):
130 class filesetpredicate(_funcregistrarbase):
127 """Decorator to register fileset predicate
131 """Decorator to register fileset predicate
128
132
129 Usage::
133 Usage::
130
134
131 filesetpredicate = registrar.filesetpredicate()
135 filesetpredicate = registrar.filesetpredicate()
132
136
133 @filesetpredicate('mypredicate()')
137 @filesetpredicate('mypredicate()')
134 def mypredicatefunc(mctx, x):
138 def mypredicatefunc(mctx, x):
135 '''Explanation of this fileset predicate ....
139 '''Explanation of this fileset predicate ....
136 '''
140 '''
137 pass
141 pass
138
142
139 The first string argument is used also in online help.
143 The first string argument is used also in online help.
140
144
141 Optional argument 'callstatus' indicates whether a predicate
145 Optional argument 'callstatus' indicates whether a predicate
142 implies 'matchctx.status()' at runtime or not (False, by
146 implies 'matchctx.status()' at runtime or not (False, by
143 default).
147 default).
144
148
145 Optional argument 'callexisting' indicates whether a predicate
149 Optional argument 'callexisting' indicates whether a predicate
146 implies 'matchctx.existing()' at runtime or not (False, by
150 implies 'matchctx.existing()' at runtime or not (False, by
147 default).
151 default).
148
152
149 'filesetpredicate' instance in example above can be used to
153 'filesetpredicate' instance in example above can be used to
150 decorate multiple functions.
154 decorate multiple functions.
151
155
152 Decorated functions are registered automatically at loading
156 Decorated functions are registered automatically at loading
153 extension, if an instance named as 'filesetpredicate' is used for
157 extension, if an instance named as 'filesetpredicate' is used for
154 decorating in extension.
158 decorating in extension.
155
159
156 Otherwise, explicit 'fileset.loadpredicate()' is needed.
160 Otherwise, explicit 'fileset.loadpredicate()' is needed.
157 """
161 """
158 _getname = _funcregistrarbase._parsefuncdecl
162 _getname = _funcregistrarbase._parsefuncdecl
159 _docformat = "``%s``\n %s"
163 _docformat = "``%s``\n %s"
160
164
161 def _extrasetup(self, name, func, callstatus=False, callexisting=False):
165 def _extrasetup(self, name, func, callstatus=False, callexisting=False):
162 func._callstatus = callstatus
166 func._callstatus = callstatus
163 func._callexisting = callexisting
167 func._callexisting = callexisting
164
168
165 class _templateregistrarbase(_funcregistrarbase):
169 class _templateregistrarbase(_funcregistrarbase):
166 """Base of decorator to register functions as template specific one
170 """Base of decorator to register functions as template specific one
167 """
171 """
168 _docformat = ":%s: %s"
172 _docformat = ":%s: %s"
169
173
170 class templatekeyword(_templateregistrarbase):
174 class templatekeyword(_templateregistrarbase):
171 """Decorator to register template keyword
175 """Decorator to register template keyword
172
176
173 Usage::
177 Usage::
174
178
175 templaetkeyword = registrar.templatekeyword()
179 templaetkeyword = registrar.templatekeyword()
176
180
177 @templatekeyword('mykeyword')
181 @templatekeyword('mykeyword')
178 def mykeywordfunc(repo, ctx, templ, cache, revcache, **args):
182 def mykeywordfunc(repo, ctx, templ, cache, revcache, **args):
179 '''Explanation of this template keyword ....
183 '''Explanation of this template keyword ....
180 '''
184 '''
181 pass
185 pass
182
186
183 The first string argument is used also in online help.
187 The first string argument is used also in online help.
184
188
185 'templatekeyword' instance in example above can be used to
189 'templatekeyword' instance in example above can be used to
186 decorate multiple functions.
190 decorate multiple functions.
187
191
188 Decorated functions are registered automatically at loading
192 Decorated functions are registered automatically at loading
189 extension, if an instance named as 'templatekeyword' is used for
193 extension, if an instance named as 'templatekeyword' is used for
190 decorating in extension.
194 decorating in extension.
191
195
192 Otherwise, explicit 'templatekw.loadkeyword()' is needed.
196 Otherwise, explicit 'templatekw.loadkeyword()' is needed.
193 """
197 """
194
198
195 class templatefilter(_templateregistrarbase):
199 class templatefilter(_templateregistrarbase):
196 """Decorator to register template filer
200 """Decorator to register template filer
197
201
198 Usage::
202 Usage::
199
203
200 templatefilter = registrar.templatefilter()
204 templatefilter = registrar.templatefilter()
201
205
202 @templatefilter('myfilter')
206 @templatefilter('myfilter')
203 def myfilterfunc(text):
207 def myfilterfunc(text):
204 '''Explanation of this template filter ....
208 '''Explanation of this template filter ....
205 '''
209 '''
206 pass
210 pass
207
211
208 The first string argument is used also in online help.
212 The first string argument is used also in online help.
209
213
210 'templatefilter' instance in example above can be used to
214 'templatefilter' instance in example above can be used to
211 decorate multiple functions.
215 decorate multiple functions.
212
216
213 Decorated functions are registered automatically at loading
217 Decorated functions are registered automatically at loading
214 extension, if an instance named as 'templatefilter' is used for
218 extension, if an instance named as 'templatefilter' is used for
215 decorating in extension.
219 decorating in extension.
216
220
217 Otherwise, explicit 'templatefilters.loadkeyword()' is needed.
221 Otherwise, explicit 'templatefilters.loadkeyword()' is needed.
218 """
222 """
219
223
220 class templatefunc(_templateregistrarbase):
224 class templatefunc(_templateregistrarbase):
221 """Decorator to register template function
225 """Decorator to register template function
222
226
223 Usage::
227 Usage::
224
228
225 templatefunc = registrar.templatefunc()
229 templatefunc = registrar.templatefunc()
226
230
227 @templatefunc('myfunc(arg1, arg2[, arg3])')
231 @templatefunc('myfunc(arg1, arg2[, arg3])')
228 def myfuncfunc(context, mapping, args):
232 def myfuncfunc(context, mapping, args):
229 '''Explanation of this template function ....
233 '''Explanation of this template function ....
230 '''
234 '''
231 pass
235 pass
232
236
233 The first string argument is used also in online help.
237 The first string argument is used also in online help.
234
238
235 'templatefunc' instance in example above can be used to
239 'templatefunc' instance in example above can be used to
236 decorate multiple functions.
240 decorate multiple functions.
237
241
238 Decorated functions are registered automatically at loading
242 Decorated functions are registered automatically at loading
239 extension, if an instance named as 'templatefunc' is used for
243 extension, if an instance named as 'templatefunc' is used for
240 decorating in extension.
244 decorating in extension.
241
245
242 Otherwise, explicit 'templater.loadfunction()' is needed.
246 Otherwise, explicit 'templater.loadfunction()' is needed.
243 """
247 """
244 _getname = _funcregistrarbase._parsefuncdecl
248 _getname = _funcregistrarbase._parsefuncdecl
@@ -1,3795 +1,3798 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 parser,
22 parser,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 registrar,
25 registrar,
26 repoview,
26 repoview,
27 util,
27 util,
28 )
28 )
29
29
30 def _revancestors(repo, revs, followfirst):
30 def _revancestors(repo, revs, followfirst):
31 """Like revlog.ancestors(), but supports followfirst."""
31 """Like revlog.ancestors(), but supports followfirst."""
32 if followfirst:
32 if followfirst:
33 cut = 1
33 cut = 1
34 else:
34 else:
35 cut = None
35 cut = None
36 cl = repo.changelog
36 cl = repo.changelog
37
37
38 def iterate():
38 def iterate():
39 revs.sort(reverse=True)
39 revs.sort(reverse=True)
40 irevs = iter(revs)
40 irevs = iter(revs)
41 h = []
41 h = []
42
42
43 inputrev = next(irevs, None)
43 inputrev = next(irevs, None)
44 if inputrev is not None:
44 if inputrev is not None:
45 heapq.heappush(h, -inputrev)
45 heapq.heappush(h, -inputrev)
46
46
47 seen = set()
47 seen = set()
48 while h:
48 while h:
49 current = -heapq.heappop(h)
49 current = -heapq.heappop(h)
50 if current == inputrev:
50 if current == inputrev:
51 inputrev = next(irevs, None)
51 inputrev = next(irevs, None)
52 if inputrev is not None:
52 if inputrev is not None:
53 heapq.heappush(h, -inputrev)
53 heapq.heappush(h, -inputrev)
54 if current not in seen:
54 if current not in seen:
55 seen.add(current)
55 seen.add(current)
56 yield current
56 yield current
57 for parent in cl.parentrevs(current)[:cut]:
57 for parent in cl.parentrevs(current)[:cut]:
58 if parent != node.nullrev:
58 if parent != node.nullrev:
59 heapq.heappush(h, -parent)
59 heapq.heappush(h, -parent)
60
60
61 return generatorset(iterate(), iterasc=False)
61 return generatorset(iterate(), iterasc=False)
62
62
63 def _revdescendants(repo, revs, followfirst):
63 def _revdescendants(repo, revs, followfirst):
64 """Like revlog.descendants() but supports followfirst."""
64 """Like revlog.descendants() but supports followfirst."""
65 if followfirst:
65 if followfirst:
66 cut = 1
66 cut = 1
67 else:
67 else:
68 cut = None
68 cut = None
69
69
70 def iterate():
70 def iterate():
71 cl = repo.changelog
71 cl = repo.changelog
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 # smartset (and if it is not, it should.)
73 # smartset (and if it is not, it should.)
74 first = min(revs)
74 first = min(revs)
75 nullrev = node.nullrev
75 nullrev = node.nullrev
76 if first == nullrev:
76 if first == nullrev:
77 # Are there nodes with a null first parent and a non-null
77 # Are there nodes with a null first parent and a non-null
78 # second one? Maybe. Do we care? Probably not.
78 # second one? Maybe. Do we care? Probably not.
79 for i in cl:
79 for i in cl:
80 yield i
80 yield i
81 else:
81 else:
82 seen = set(revs)
82 seen = set(revs)
83 for i in cl.revs(first + 1):
83 for i in cl.revs(first + 1):
84 for x in cl.parentrevs(i)[:cut]:
84 for x in cl.parentrevs(i)[:cut]:
85 if x != nullrev and x in seen:
85 if x != nullrev and x in seen:
86 seen.add(i)
86 seen.add(i)
87 yield i
87 yield i
88 break
88 break
89
89
90 return generatorset(iterate(), iterasc=True)
90 return generatorset(iterate(), iterasc=True)
91
91
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 """return (heads(::<roots> and ::<heads>))
93 """return (heads(::<roots> and ::<heads>))
94
94
95 If includepath is True, return (<roots>::<heads>)."""
95 If includepath is True, return (<roots>::<heads>)."""
96 if not roots:
96 if not roots:
97 return []
97 return []
98 parentrevs = repo.changelog.parentrevs
98 parentrevs = repo.changelog.parentrevs
99 roots = set(roots)
99 roots = set(roots)
100 visit = list(heads)
100 visit = list(heads)
101 reachable = set()
101 reachable = set()
102 seen = {}
102 seen = {}
103 # prefetch all the things! (because python is slow)
103 # prefetch all the things! (because python is slow)
104 reached = reachable.add
104 reached = reachable.add
105 dovisit = visit.append
105 dovisit = visit.append
106 nextvisit = visit.pop
106 nextvisit = visit.pop
107 # open-code the post-order traversal due to the tiny size of
107 # open-code the post-order traversal due to the tiny size of
108 # sys.getrecursionlimit()
108 # sys.getrecursionlimit()
109 while visit:
109 while visit:
110 rev = nextvisit()
110 rev = nextvisit()
111 if rev in roots:
111 if rev in roots:
112 reached(rev)
112 reached(rev)
113 if not includepath:
113 if not includepath:
114 continue
114 continue
115 parents = parentrevs(rev)
115 parents = parentrevs(rev)
116 seen[rev] = parents
116 seen[rev] = parents
117 for parent in parents:
117 for parent in parents:
118 if parent >= minroot and parent not in seen:
118 if parent >= minroot and parent not in seen:
119 dovisit(parent)
119 dovisit(parent)
120 if not reachable:
120 if not reachable:
121 return baseset()
121 return baseset()
122 if not includepath:
122 if not includepath:
123 return reachable
123 return reachable
124 for rev in sorted(seen):
124 for rev in sorted(seen):
125 for parent in seen[rev]:
125 for parent in seen[rev]:
126 if parent in reachable:
126 if parent in reachable:
127 reached(rev)
127 reached(rev)
128 return reachable
128 return reachable
129
129
130 def reachableroots(repo, roots, heads, includepath=False):
130 def reachableroots(repo, roots, heads, includepath=False):
131 """return (heads(::<roots> and ::<heads>))
131 """return (heads(::<roots> and ::<heads>))
132
132
133 If includepath is True, return (<roots>::<heads>)."""
133 If includepath is True, return (<roots>::<heads>)."""
134 if not roots:
134 if not roots:
135 return baseset()
135 return baseset()
136 minroot = roots.min()
136 minroot = roots.min()
137 roots = list(roots)
137 roots = list(roots)
138 heads = list(heads)
138 heads = list(heads)
139 try:
139 try:
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 except AttributeError:
141 except AttributeError:
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 revs = baseset(revs)
143 revs = baseset(revs)
144 revs.sort()
144 revs.sort()
145 return revs
145 return revs
146
146
147 elements = {
147 elements = {
148 # token-type: binding-strength, primary, prefix, infix, suffix
148 # token-type: binding-strength, primary, prefix, infix, suffix
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 "##": (20, None, None, ("_concat", 20), None),
150 "##": (20, None, None, ("_concat", 20), None),
151 "~": (18, None, None, ("ancestor", 18), None),
151 "~": (18, None, None, ("ancestor", 18), None),
152 "^": (18, None, None, ("parent", 18), "parentpost"),
152 "^": (18, None, None, ("parent", 18), "parentpost"),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
157 "not": (10, None, ("not", 10), None, None),
157 "not": (10, None, ("not", 10), None, None),
158 "!": (10, None, ("not", 10), None, None),
158 "!": (10, None, ("not", 10), None, None),
159 "and": (5, None, None, ("and", 5), None),
159 "and": (5, None, None, ("and", 5), None),
160 "&": (5, None, None, ("and", 5), None),
160 "&": (5, None, None, ("and", 5), None),
161 "%": (5, None, None, ("only", 5), "onlypost"),
161 "%": (5, None, None, ("only", 5), "onlypost"),
162 "or": (4, None, None, ("or", 4), None),
162 "or": (4, None, None, ("or", 4), None),
163 "|": (4, None, None, ("or", 4), None),
163 "|": (4, None, None, ("or", 4), None),
164 "+": (4, None, None, ("or", 4), None),
164 "+": (4, None, None, ("or", 4), None),
165 "=": (3, None, None, ("keyvalue", 3), None),
165 "=": (3, None, None, ("keyvalue", 3), None),
166 ",": (2, None, None, ("list", 2), None),
166 ",": (2, None, None, ("list", 2), None),
167 ")": (0, None, None, None, None),
167 ")": (0, None, None, None, None),
168 "symbol": (0, "symbol", None, None, None),
168 "symbol": (0, "symbol", None, None, None),
169 "string": (0, "string", None, None, None),
169 "string": (0, "string", None, None, None),
170 "end": (0, None, None, None, None),
170 "end": (0, None, None, None, None),
171 }
171 }
172
172
173 keywords = set(['and', 'or', 'not'])
173 keywords = set(['and', 'or', 'not'])
174
174
175 # default set of valid characters for the initial letter of symbols
175 # default set of valid characters for the initial letter of symbols
176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
177 if c.isalnum() or c in '._@' or ord(c) > 127)
177 if c.isalnum() or c in '._@' or ord(c) > 127)
178
178
179 # default set of valid characters for non-initial letters of symbols
179 # default set of valid characters for non-initial letters of symbols
180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
182
182
183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
184 '''
184 '''
185 Parse a revset statement into a stream of tokens
185 Parse a revset statement into a stream of tokens
186
186
187 ``syminitletters`` is the set of valid characters for the initial
187 ``syminitletters`` is the set of valid characters for the initial
188 letter of symbols.
188 letter of symbols.
189
189
190 By default, character ``c`` is recognized as valid for initial
190 By default, character ``c`` is recognized as valid for initial
191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
192
192
193 ``symletters`` is the set of valid characters for non-initial
193 ``symletters`` is the set of valid characters for non-initial
194 letters of symbols.
194 letters of symbols.
195
195
196 By default, character ``c`` is recognized as valid for non-initial
196 By default, character ``c`` is recognized as valid for non-initial
197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
198
198
199 Check that @ is a valid unquoted token character (issue3686):
199 Check that @ is a valid unquoted token character (issue3686):
200 >>> list(tokenize("@::"))
200 >>> list(tokenize("@::"))
201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
202
202
203 '''
203 '''
204 if syminitletters is None:
204 if syminitletters is None:
205 syminitletters = _syminitletters
205 syminitletters = _syminitletters
206 if symletters is None:
206 if symletters is None:
207 symletters = _symletters
207 symletters = _symletters
208
208
209 if program and lookup:
209 if program and lookup:
210 # attempt to parse old-style ranges first to deal with
210 # attempt to parse old-style ranges first to deal with
211 # things like old-tag which contain query metacharacters
211 # things like old-tag which contain query metacharacters
212 parts = program.split(':', 1)
212 parts = program.split(':', 1)
213 if all(lookup(sym) for sym in parts if sym):
213 if all(lookup(sym) for sym in parts if sym):
214 if parts[0]:
214 if parts[0]:
215 yield ('symbol', parts[0], 0)
215 yield ('symbol', parts[0], 0)
216 if len(parts) > 1:
216 if len(parts) > 1:
217 s = len(parts[0])
217 s = len(parts[0])
218 yield (':', None, s)
218 yield (':', None, s)
219 if parts[1]:
219 if parts[1]:
220 yield ('symbol', parts[1], s + 1)
220 yield ('symbol', parts[1], s + 1)
221 yield ('end', None, len(program))
221 yield ('end', None, len(program))
222 return
222 return
223
223
224 pos, l = 0, len(program)
224 pos, l = 0, len(program)
225 while pos < l:
225 while pos < l:
226 c = program[pos]
226 c = program[pos]
227 if c.isspace(): # skip inter-token whitespace
227 if c.isspace(): # skip inter-token whitespace
228 pass
228 pass
229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
230 yield ('::', None, pos)
230 yield ('::', None, pos)
231 pos += 1 # skip ahead
231 pos += 1 # skip ahead
232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
233 yield ('..', None, pos)
233 yield ('..', None, pos)
234 pos += 1 # skip ahead
234 pos += 1 # skip ahead
235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
236 yield ('##', None, pos)
236 yield ('##', None, pos)
237 pos += 1 # skip ahead
237 pos += 1 # skip ahead
238 elif c in "():=,-|&+!~^%": # handle simple operators
238 elif c in "():=,-|&+!~^%": # handle simple operators
239 yield (c, None, pos)
239 yield (c, None, pos)
240 elif (c in '"\'' or c == 'r' and
240 elif (c in '"\'' or c == 'r' and
241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
242 if c == 'r':
242 if c == 'r':
243 pos += 1
243 pos += 1
244 c = program[pos]
244 c = program[pos]
245 decode = lambda x: x
245 decode = lambda x: x
246 else:
246 else:
247 decode = parser.unescapestr
247 decode = parser.unescapestr
248 pos += 1
248 pos += 1
249 s = pos
249 s = pos
250 while pos < l: # find closing quote
250 while pos < l: # find closing quote
251 d = program[pos]
251 d = program[pos]
252 if d == '\\': # skip over escaped characters
252 if d == '\\': # skip over escaped characters
253 pos += 2
253 pos += 2
254 continue
254 continue
255 if d == c:
255 if d == c:
256 yield ('string', decode(program[s:pos]), s)
256 yield ('string', decode(program[s:pos]), s)
257 break
257 break
258 pos += 1
258 pos += 1
259 else:
259 else:
260 raise error.ParseError(_("unterminated string"), s)
260 raise error.ParseError(_("unterminated string"), s)
261 # gather up a symbol/keyword
261 # gather up a symbol/keyword
262 elif c in syminitletters:
262 elif c in syminitletters:
263 s = pos
263 s = pos
264 pos += 1
264 pos += 1
265 while pos < l: # find end of symbol
265 while pos < l: # find end of symbol
266 d = program[pos]
266 d = program[pos]
267 if d not in symletters:
267 if d not in symletters:
268 break
268 break
269 if d == '.' and program[pos - 1] == '.': # special case for ..
269 if d == '.' and program[pos - 1] == '.': # special case for ..
270 pos -= 1
270 pos -= 1
271 break
271 break
272 pos += 1
272 pos += 1
273 sym = program[s:pos]
273 sym = program[s:pos]
274 if sym in keywords: # operator keywords
274 if sym in keywords: # operator keywords
275 yield (sym, None, s)
275 yield (sym, None, s)
276 elif '-' in sym:
276 elif '-' in sym:
277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
278 if lookup and lookup(sym):
278 if lookup and lookup(sym):
279 # looks like a real symbol
279 # looks like a real symbol
280 yield ('symbol', sym, s)
280 yield ('symbol', sym, s)
281 else:
281 else:
282 # looks like an expression
282 # looks like an expression
283 parts = sym.split('-')
283 parts = sym.split('-')
284 for p in parts[:-1]:
284 for p in parts[:-1]:
285 if p: # possible consecutive -
285 if p: # possible consecutive -
286 yield ('symbol', p, s)
286 yield ('symbol', p, s)
287 s += len(p)
287 s += len(p)
288 yield ('-', None, pos)
288 yield ('-', None, pos)
289 s += 1
289 s += 1
290 if parts[-1]: # possible trailing -
290 if parts[-1]: # possible trailing -
291 yield ('symbol', parts[-1], s)
291 yield ('symbol', parts[-1], s)
292 else:
292 else:
293 yield ('symbol', sym, s)
293 yield ('symbol', sym, s)
294 pos -= 1
294 pos -= 1
295 else:
295 else:
296 raise error.ParseError(_("syntax error in revset '%s'") %
296 raise error.ParseError(_("syntax error in revset '%s'") %
297 program, pos)
297 program, pos)
298 pos += 1
298 pos += 1
299 yield ('end', None, pos)
299 yield ('end', None, pos)
300
300
301 # helpers
301 # helpers
302
302
303 def getsymbol(x):
303 def getsymbol(x):
304 if x and x[0] == 'symbol':
304 if x and x[0] == 'symbol':
305 return x[1]
305 return x[1]
306 raise error.ParseError(_('not a symbol'))
306 raise error.ParseError(_('not a symbol'))
307
307
308 def getstring(x, err):
308 def getstring(x, err):
309 if x and (x[0] == 'string' or x[0] == 'symbol'):
309 if x and (x[0] == 'string' or x[0] == 'symbol'):
310 return x[1]
310 return x[1]
311 raise error.ParseError(err)
311 raise error.ParseError(err)
312
312
313 def getlist(x):
313 def getlist(x):
314 if not x:
314 if not x:
315 return []
315 return []
316 if x[0] == 'list':
316 if x[0] == 'list':
317 return list(x[1:])
317 return list(x[1:])
318 return [x]
318 return [x]
319
319
320 def getargs(x, min, max, err):
320 def getargs(x, min, max, err):
321 l = getlist(x)
321 l = getlist(x)
322 if len(l) < min or (max >= 0 and len(l) > max):
322 if len(l) < min or (max >= 0 and len(l) > max):
323 raise error.ParseError(err)
323 raise error.ParseError(err)
324 return l
324 return l
325
325
326 def getargsdict(x, funcname, keys):
326 def getargsdict(x, funcname, keys):
327 return parser.buildargsdict(getlist(x), funcname, keys.split(),
327 return parser.buildargsdict(getlist(x), funcname, keys.split(),
328 keyvaluenode='keyvalue', keynode='symbol')
328 keyvaluenode='keyvalue', keynode='symbol')
329
329
330 def getset(repo, subset, x):
330 def getset(repo, subset, x):
331 if not x:
331 if not x:
332 raise error.ParseError(_("missing argument"))
332 raise error.ParseError(_("missing argument"))
333 s = methods[x[0]](repo, subset, *x[1:])
333 s = methods[x[0]](repo, subset, *x[1:])
334 if util.safehasattr(s, 'isascending'):
334 if util.safehasattr(s, 'isascending'):
335 return s
335 return s
336 # else case should not happen, because all non-func are internal,
336 # else case should not happen, because all non-func are internal,
337 # ignoring for now.
337 # ignoring for now.
338 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
338 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
339 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
339 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
340 % x[1][1],
340 % x[1][1],
341 '3.9')
341 '3.9')
342 return baseset(s)
342 return baseset(s)
343
343
344 def _getrevsource(repo, r):
344 def _getrevsource(repo, r):
345 extra = repo[r].extra()
345 extra = repo[r].extra()
346 for label in ('source', 'transplant_source', 'rebase_source'):
346 for label in ('source', 'transplant_source', 'rebase_source'):
347 if label in extra:
347 if label in extra:
348 try:
348 try:
349 return repo[extra[label]].rev()
349 return repo[extra[label]].rev()
350 except error.RepoLookupError:
350 except error.RepoLookupError:
351 pass
351 pass
352 return None
352 return None
353
353
354 # operator methods
354 # operator methods
355
355
356 def stringset(repo, subset, x):
356 def stringset(repo, subset, x):
357 x = repo[x].rev()
357 x = repo[x].rev()
358 if (x in subset
358 if (x in subset
359 or x == node.nullrev and isinstance(subset, fullreposet)):
359 or x == node.nullrev and isinstance(subset, fullreposet)):
360 return baseset([x])
360 return baseset([x])
361 return baseset()
361 return baseset()
362
362
363 def rangeset(repo, subset, x, y, order):
363 def rangeset(repo, subset, x, y, order):
364 m = getset(repo, fullreposet(repo), x)
364 m = getset(repo, fullreposet(repo), x)
365 n = getset(repo, fullreposet(repo), y)
365 n = getset(repo, fullreposet(repo), y)
366
366
367 if not m or not n:
367 if not m or not n:
368 return baseset()
368 return baseset()
369 m, n = m.first(), n.last()
369 m, n = m.first(), n.last()
370
370
371 if m == n:
371 if m == n:
372 r = baseset([m])
372 r = baseset([m])
373 elif n == node.wdirrev:
373 elif n == node.wdirrev:
374 r = spanset(repo, m, len(repo)) + baseset([n])
374 r = spanset(repo, m, len(repo)) + baseset([n])
375 elif m == node.wdirrev:
375 elif m == node.wdirrev:
376 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
376 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
377 elif m < n:
377 elif m < n:
378 r = spanset(repo, m, n + 1)
378 r = spanset(repo, m, n + 1)
379 else:
379 else:
380 r = spanset(repo, m, n - 1)
380 r = spanset(repo, m, n - 1)
381 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
381 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
382 # necessary to ensure we preserve the order in subset.
382 # necessary to ensure we preserve the order in subset.
383 #
383 #
384 # This has performance implication, carrying the sorting over when possible
384 # This has performance implication, carrying the sorting over when possible
385 # would be more efficient.
385 # would be more efficient.
386 return r & subset
386 return r & subset
387
387
388 def dagrange(repo, subset, x, y, order):
388 def dagrange(repo, subset, x, y, order):
389 r = fullreposet(repo)
389 r = fullreposet(repo)
390 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
390 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
391 includepath=True)
391 includepath=True)
392 return subset & xs
392 return subset & xs
393
393
394 def andset(repo, subset, x, y, order):
394 def andset(repo, subset, x, y, order):
395 return getset(repo, getset(repo, subset, x), y)
395 return getset(repo, getset(repo, subset, x), y)
396
396
397 def differenceset(repo, subset, x, y, order):
397 def differenceset(repo, subset, x, y, order):
398 return getset(repo, subset, x) - getset(repo, subset, y)
398 return getset(repo, subset, x) - getset(repo, subset, y)
399
399
400 def _orsetlist(repo, subset, xs):
400 def _orsetlist(repo, subset, xs):
401 assert xs
401 assert xs
402 if len(xs) == 1:
402 if len(xs) == 1:
403 return getset(repo, subset, xs[0])
403 return getset(repo, subset, xs[0])
404 p = len(xs) // 2
404 p = len(xs) // 2
405 a = _orsetlist(repo, subset, xs[:p])
405 a = _orsetlist(repo, subset, xs[:p])
406 b = _orsetlist(repo, subset, xs[p:])
406 b = _orsetlist(repo, subset, xs[p:])
407 return a + b
407 return a + b
408
408
409 def orset(repo, subset, x, order):
409 def orset(repo, subset, x, order):
410 return _orsetlist(repo, subset, getlist(x))
410 return _orsetlist(repo, subset, getlist(x))
411
411
412 def notset(repo, subset, x, order):
412 def notset(repo, subset, x, order):
413 return subset - getset(repo, subset, x)
413 return subset - getset(repo, subset, x)
414
414
415 def listset(repo, subset, *xs):
415 def listset(repo, subset, *xs):
416 raise error.ParseError(_("can't use a list in this context"),
416 raise error.ParseError(_("can't use a list in this context"),
417 hint=_('see hg help "revsets.x or y"'))
417 hint=_('see hg help "revsets.x or y"'))
418
418
419 def keyvaluepair(repo, subset, k, v):
419 def keyvaluepair(repo, subset, k, v):
420 raise error.ParseError(_("can't use a key-value pair in this context"))
420 raise error.ParseError(_("can't use a key-value pair in this context"))
421
421
422 def func(repo, subset, a, b, order):
422 def func(repo, subset, a, b, order):
423 f = getsymbol(a)
423 f = getsymbol(a)
424 if f in symbols:
424 if f in symbols:
425 return symbols[f](repo, subset, b)
425 fn = symbols[f]
426 if getattr(fn, '_takeorder', False):
427 return fn(repo, subset, b, order)
428 return fn(repo, subset, b)
426
429
427 keep = lambda fn: getattr(fn, '__doc__', None) is not None
430 keep = lambda fn: getattr(fn, '__doc__', None) is not None
428
431
429 syms = [s for (s, fn) in symbols.items() if keep(fn)]
432 syms = [s for (s, fn) in symbols.items() if keep(fn)]
430 raise error.UnknownIdentifier(f, syms)
433 raise error.UnknownIdentifier(f, syms)
431
434
432 # functions
435 # functions
433
436
434 # symbols are callables like:
437 # symbols are callables like:
435 # fn(repo, subset, x)
438 # fn(repo, subset, x)
436 # with:
439 # with:
437 # repo - current repository instance
440 # repo - current repository instance
438 # subset - of revisions to be examined
441 # subset - of revisions to be examined
439 # x - argument in tree form
442 # x - argument in tree form
440 symbols = {}
443 symbols = {}
441
444
442 # symbols which can't be used for a DoS attack for any given input
445 # symbols which can't be used for a DoS attack for any given input
443 # (e.g. those which accept regexes as plain strings shouldn't be included)
446 # (e.g. those which accept regexes as plain strings shouldn't be included)
444 # functions that just return a lot of changesets (like all) don't count here
447 # functions that just return a lot of changesets (like all) don't count here
445 safesymbols = set()
448 safesymbols = set()
446
449
447 predicate = registrar.revsetpredicate()
450 predicate = registrar.revsetpredicate()
448
451
449 @predicate('_destupdate')
452 @predicate('_destupdate')
450 def _destupdate(repo, subset, x):
453 def _destupdate(repo, subset, x):
451 # experimental revset for update destination
454 # experimental revset for update destination
452 args = getargsdict(x, 'limit', 'clean check')
455 args = getargsdict(x, 'limit', 'clean check')
453 return subset & baseset([destutil.destupdate(repo, **args)[0]])
456 return subset & baseset([destutil.destupdate(repo, **args)[0]])
454
457
455 @predicate('_destmerge')
458 @predicate('_destmerge')
456 def _destmerge(repo, subset, x):
459 def _destmerge(repo, subset, x):
457 # experimental revset for merge destination
460 # experimental revset for merge destination
458 sourceset = None
461 sourceset = None
459 if x is not None:
462 if x is not None:
460 sourceset = getset(repo, fullreposet(repo), x)
463 sourceset = getset(repo, fullreposet(repo), x)
461 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
464 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
462
465
463 @predicate('adds(pattern)', safe=True)
466 @predicate('adds(pattern)', safe=True)
464 def adds(repo, subset, x):
467 def adds(repo, subset, x):
465 """Changesets that add a file matching pattern.
468 """Changesets that add a file matching pattern.
466
469
467 The pattern without explicit kind like ``glob:`` is expected to be
470 The pattern without explicit kind like ``glob:`` is expected to be
468 relative to the current directory and match against a file or a
471 relative to the current directory and match against a file or a
469 directory.
472 directory.
470 """
473 """
471 # i18n: "adds" is a keyword
474 # i18n: "adds" is a keyword
472 pat = getstring(x, _("adds requires a pattern"))
475 pat = getstring(x, _("adds requires a pattern"))
473 return checkstatus(repo, subset, pat, 1)
476 return checkstatus(repo, subset, pat, 1)
474
477
475 @predicate('ancestor(*changeset)', safe=True)
478 @predicate('ancestor(*changeset)', safe=True)
476 def ancestor(repo, subset, x):
479 def ancestor(repo, subset, x):
477 """A greatest common ancestor of the changesets.
480 """A greatest common ancestor of the changesets.
478
481
479 Accepts 0 or more changesets.
482 Accepts 0 or more changesets.
480 Will return empty list when passed no args.
483 Will return empty list when passed no args.
481 Greatest common ancestor of a single changeset is that changeset.
484 Greatest common ancestor of a single changeset is that changeset.
482 """
485 """
483 # i18n: "ancestor" is a keyword
486 # i18n: "ancestor" is a keyword
484 l = getlist(x)
487 l = getlist(x)
485 rl = fullreposet(repo)
488 rl = fullreposet(repo)
486 anc = None
489 anc = None
487
490
488 # (getset(repo, rl, i) for i in l) generates a list of lists
491 # (getset(repo, rl, i) for i in l) generates a list of lists
489 for revs in (getset(repo, rl, i) for i in l):
492 for revs in (getset(repo, rl, i) for i in l):
490 for r in revs:
493 for r in revs:
491 if anc is None:
494 if anc is None:
492 anc = repo[r]
495 anc = repo[r]
493 else:
496 else:
494 anc = anc.ancestor(repo[r])
497 anc = anc.ancestor(repo[r])
495
498
496 if anc is not None and anc.rev() in subset:
499 if anc is not None and anc.rev() in subset:
497 return baseset([anc.rev()])
500 return baseset([anc.rev()])
498 return baseset()
501 return baseset()
499
502
500 def _ancestors(repo, subset, x, followfirst=False):
503 def _ancestors(repo, subset, x, followfirst=False):
501 heads = getset(repo, fullreposet(repo), x)
504 heads = getset(repo, fullreposet(repo), x)
502 if not heads:
505 if not heads:
503 return baseset()
506 return baseset()
504 s = _revancestors(repo, heads, followfirst)
507 s = _revancestors(repo, heads, followfirst)
505 return subset & s
508 return subset & s
506
509
507 @predicate('ancestors(set)', safe=True)
510 @predicate('ancestors(set)', safe=True)
508 def ancestors(repo, subset, x):
511 def ancestors(repo, subset, x):
509 """Changesets that are ancestors of a changeset in set.
512 """Changesets that are ancestors of a changeset in set.
510 """
513 """
511 return _ancestors(repo, subset, x)
514 return _ancestors(repo, subset, x)
512
515
513 @predicate('_firstancestors', safe=True)
516 @predicate('_firstancestors', safe=True)
514 def _firstancestors(repo, subset, x):
517 def _firstancestors(repo, subset, x):
515 # ``_firstancestors(set)``
518 # ``_firstancestors(set)``
516 # Like ``ancestors(set)`` but follows only the first parents.
519 # Like ``ancestors(set)`` but follows only the first parents.
517 return _ancestors(repo, subset, x, followfirst=True)
520 return _ancestors(repo, subset, x, followfirst=True)
518
521
519 def ancestorspec(repo, subset, x, n, order):
522 def ancestorspec(repo, subset, x, n, order):
520 """``set~n``
523 """``set~n``
521 Changesets that are the Nth ancestor (first parents only) of a changeset
524 Changesets that are the Nth ancestor (first parents only) of a changeset
522 in set.
525 in set.
523 """
526 """
524 try:
527 try:
525 n = int(n[1])
528 n = int(n[1])
526 except (TypeError, ValueError):
529 except (TypeError, ValueError):
527 raise error.ParseError(_("~ expects a number"))
530 raise error.ParseError(_("~ expects a number"))
528 ps = set()
531 ps = set()
529 cl = repo.changelog
532 cl = repo.changelog
530 for r in getset(repo, fullreposet(repo), x):
533 for r in getset(repo, fullreposet(repo), x):
531 for i in range(n):
534 for i in range(n):
532 r = cl.parentrevs(r)[0]
535 r = cl.parentrevs(r)[0]
533 ps.add(r)
536 ps.add(r)
534 return subset & ps
537 return subset & ps
535
538
536 @predicate('author(string)', safe=True)
539 @predicate('author(string)', safe=True)
537 def author(repo, subset, x):
540 def author(repo, subset, x):
538 """Alias for ``user(string)``.
541 """Alias for ``user(string)``.
539 """
542 """
540 # i18n: "author" is a keyword
543 # i18n: "author" is a keyword
541 n = encoding.lower(getstring(x, _("author requires a string")))
544 n = encoding.lower(getstring(x, _("author requires a string")))
542 kind, pattern, matcher = _substringmatcher(n)
545 kind, pattern, matcher = _substringmatcher(n)
543 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
546 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
544 condrepr=('<user %r>', n))
547 condrepr=('<user %r>', n))
545
548
546 @predicate('bisect(string)', safe=True)
549 @predicate('bisect(string)', safe=True)
547 def bisect(repo, subset, x):
550 def bisect(repo, subset, x):
548 """Changesets marked in the specified bisect status:
551 """Changesets marked in the specified bisect status:
549
552
550 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
553 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
551 - ``goods``, ``bads`` : csets topologically good/bad
554 - ``goods``, ``bads`` : csets topologically good/bad
552 - ``range`` : csets taking part in the bisection
555 - ``range`` : csets taking part in the bisection
553 - ``pruned`` : csets that are goods, bads or skipped
556 - ``pruned`` : csets that are goods, bads or skipped
554 - ``untested`` : csets whose fate is yet unknown
557 - ``untested`` : csets whose fate is yet unknown
555 - ``ignored`` : csets ignored due to DAG topology
558 - ``ignored`` : csets ignored due to DAG topology
556 - ``current`` : the cset currently being bisected
559 - ``current`` : the cset currently being bisected
557 """
560 """
558 # i18n: "bisect" is a keyword
561 # i18n: "bisect" is a keyword
559 status = getstring(x, _("bisect requires a string")).lower()
562 status = getstring(x, _("bisect requires a string")).lower()
560 state = set(hbisect.get(repo, status))
563 state = set(hbisect.get(repo, status))
561 return subset & state
564 return subset & state
562
565
563 # Backward-compatibility
566 # Backward-compatibility
564 # - no help entry so that we do not advertise it any more
567 # - no help entry so that we do not advertise it any more
565 @predicate('bisected', safe=True)
568 @predicate('bisected', safe=True)
566 def bisected(repo, subset, x):
569 def bisected(repo, subset, x):
567 return bisect(repo, subset, x)
570 return bisect(repo, subset, x)
568
571
569 @predicate('bookmark([name])', safe=True)
572 @predicate('bookmark([name])', safe=True)
570 def bookmark(repo, subset, x):
573 def bookmark(repo, subset, x):
571 """The named bookmark or all bookmarks.
574 """The named bookmark or all bookmarks.
572
575
573 If `name` starts with `re:`, the remainder of the name is treated as
576 If `name` starts with `re:`, the remainder of the name is treated as
574 a regular expression. To match a bookmark that actually starts with `re:`,
577 a regular expression. To match a bookmark that actually starts with `re:`,
575 use the prefix `literal:`.
578 use the prefix `literal:`.
576 """
579 """
577 # i18n: "bookmark" is a keyword
580 # i18n: "bookmark" is a keyword
578 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
581 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
579 if args:
582 if args:
580 bm = getstring(args[0],
583 bm = getstring(args[0],
581 # i18n: "bookmark" is a keyword
584 # i18n: "bookmark" is a keyword
582 _('the argument to bookmark must be a string'))
585 _('the argument to bookmark must be a string'))
583 kind, pattern, matcher = util.stringmatcher(bm)
586 kind, pattern, matcher = util.stringmatcher(bm)
584 bms = set()
587 bms = set()
585 if kind == 'literal':
588 if kind == 'literal':
586 bmrev = repo._bookmarks.get(pattern, None)
589 bmrev = repo._bookmarks.get(pattern, None)
587 if not bmrev:
590 if not bmrev:
588 raise error.RepoLookupError(_("bookmark '%s' does not exist")
591 raise error.RepoLookupError(_("bookmark '%s' does not exist")
589 % pattern)
592 % pattern)
590 bms.add(repo[bmrev].rev())
593 bms.add(repo[bmrev].rev())
591 else:
594 else:
592 matchrevs = set()
595 matchrevs = set()
593 for name, bmrev in repo._bookmarks.iteritems():
596 for name, bmrev in repo._bookmarks.iteritems():
594 if matcher(name):
597 if matcher(name):
595 matchrevs.add(bmrev)
598 matchrevs.add(bmrev)
596 if not matchrevs:
599 if not matchrevs:
597 raise error.RepoLookupError(_("no bookmarks exist"
600 raise error.RepoLookupError(_("no bookmarks exist"
598 " that match '%s'") % pattern)
601 " that match '%s'") % pattern)
599 for bmrev in matchrevs:
602 for bmrev in matchrevs:
600 bms.add(repo[bmrev].rev())
603 bms.add(repo[bmrev].rev())
601 else:
604 else:
602 bms = set([repo[r].rev()
605 bms = set([repo[r].rev()
603 for r in repo._bookmarks.values()])
606 for r in repo._bookmarks.values()])
604 bms -= set([node.nullrev])
607 bms -= set([node.nullrev])
605 return subset & bms
608 return subset & bms
606
609
607 @predicate('branch(string or set)', safe=True)
610 @predicate('branch(string or set)', safe=True)
608 def branch(repo, subset, x):
611 def branch(repo, subset, x):
609 """
612 """
610 All changesets belonging to the given branch or the branches of the given
613 All changesets belonging to the given branch or the branches of the given
611 changesets.
614 changesets.
612
615
613 If `string` starts with `re:`, the remainder of the name is treated as
616 If `string` starts with `re:`, the remainder of the name is treated as
614 a regular expression. To match a branch that actually starts with `re:`,
617 a regular expression. To match a branch that actually starts with `re:`,
615 use the prefix `literal:`.
618 use the prefix `literal:`.
616 """
619 """
617 getbi = repo.revbranchcache().branchinfo
620 getbi = repo.revbranchcache().branchinfo
618
621
619 try:
622 try:
620 b = getstring(x, '')
623 b = getstring(x, '')
621 except error.ParseError:
624 except error.ParseError:
622 # not a string, but another revspec, e.g. tip()
625 # not a string, but another revspec, e.g. tip()
623 pass
626 pass
624 else:
627 else:
625 kind, pattern, matcher = util.stringmatcher(b)
628 kind, pattern, matcher = util.stringmatcher(b)
626 if kind == 'literal':
629 if kind == 'literal':
627 # note: falls through to the revspec case if no branch with
630 # note: falls through to the revspec case if no branch with
628 # this name exists and pattern kind is not specified explicitly
631 # this name exists and pattern kind is not specified explicitly
629 if pattern in repo.branchmap():
632 if pattern in repo.branchmap():
630 return subset.filter(lambda r: matcher(getbi(r)[0]),
633 return subset.filter(lambda r: matcher(getbi(r)[0]),
631 condrepr=('<branch %r>', b))
634 condrepr=('<branch %r>', b))
632 if b.startswith('literal:'):
635 if b.startswith('literal:'):
633 raise error.RepoLookupError(_("branch '%s' does not exist")
636 raise error.RepoLookupError(_("branch '%s' does not exist")
634 % pattern)
637 % pattern)
635 else:
638 else:
636 return subset.filter(lambda r: matcher(getbi(r)[0]),
639 return subset.filter(lambda r: matcher(getbi(r)[0]),
637 condrepr=('<branch %r>', b))
640 condrepr=('<branch %r>', b))
638
641
639 s = getset(repo, fullreposet(repo), x)
642 s = getset(repo, fullreposet(repo), x)
640 b = set()
643 b = set()
641 for r in s:
644 for r in s:
642 b.add(getbi(r)[0])
645 b.add(getbi(r)[0])
643 c = s.__contains__
646 c = s.__contains__
644 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
647 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
645 condrepr=lambda: '<branch %r>' % sorted(b))
648 condrepr=lambda: '<branch %r>' % sorted(b))
646
649
647 @predicate('bumped()', safe=True)
650 @predicate('bumped()', safe=True)
648 def bumped(repo, subset, x):
651 def bumped(repo, subset, x):
649 """Mutable changesets marked as successors of public changesets.
652 """Mutable changesets marked as successors of public changesets.
650
653
651 Only non-public and non-obsolete changesets can be `bumped`.
654 Only non-public and non-obsolete changesets can be `bumped`.
652 """
655 """
653 # i18n: "bumped" is a keyword
656 # i18n: "bumped" is a keyword
654 getargs(x, 0, 0, _("bumped takes no arguments"))
657 getargs(x, 0, 0, _("bumped takes no arguments"))
655 bumped = obsmod.getrevs(repo, 'bumped')
658 bumped = obsmod.getrevs(repo, 'bumped')
656 return subset & bumped
659 return subset & bumped
657
660
658 @predicate('bundle()', safe=True)
661 @predicate('bundle()', safe=True)
659 def bundle(repo, subset, x):
662 def bundle(repo, subset, x):
660 """Changesets in the bundle.
663 """Changesets in the bundle.
661
664
662 Bundle must be specified by the -R option."""
665 Bundle must be specified by the -R option."""
663
666
664 try:
667 try:
665 bundlerevs = repo.changelog.bundlerevs
668 bundlerevs = repo.changelog.bundlerevs
666 except AttributeError:
669 except AttributeError:
667 raise error.Abort(_("no bundle provided - specify with -R"))
670 raise error.Abort(_("no bundle provided - specify with -R"))
668 return subset & bundlerevs
671 return subset & bundlerevs
669
672
670 def checkstatus(repo, subset, pat, field):
673 def checkstatus(repo, subset, pat, field):
671 hasset = matchmod.patkind(pat) == 'set'
674 hasset = matchmod.patkind(pat) == 'set'
672
675
673 mcache = [None]
676 mcache = [None]
674 def matches(x):
677 def matches(x):
675 c = repo[x]
678 c = repo[x]
676 if not mcache[0] or hasset:
679 if not mcache[0] or hasset:
677 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
680 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
678 m = mcache[0]
681 m = mcache[0]
679 fname = None
682 fname = None
680 if not m.anypats() and len(m.files()) == 1:
683 if not m.anypats() and len(m.files()) == 1:
681 fname = m.files()[0]
684 fname = m.files()[0]
682 if fname is not None:
685 if fname is not None:
683 if fname not in c.files():
686 if fname not in c.files():
684 return False
687 return False
685 else:
688 else:
686 for f in c.files():
689 for f in c.files():
687 if m(f):
690 if m(f):
688 break
691 break
689 else:
692 else:
690 return False
693 return False
691 files = repo.status(c.p1().node(), c.node())[field]
694 files = repo.status(c.p1().node(), c.node())[field]
692 if fname is not None:
695 if fname is not None:
693 if fname in files:
696 if fname in files:
694 return True
697 return True
695 else:
698 else:
696 for f in files:
699 for f in files:
697 if m(f):
700 if m(f):
698 return True
701 return True
699
702
700 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
703 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
701
704
702 def _children(repo, subset, parentset):
705 def _children(repo, subset, parentset):
703 if not parentset:
706 if not parentset:
704 return baseset()
707 return baseset()
705 cs = set()
708 cs = set()
706 pr = repo.changelog.parentrevs
709 pr = repo.changelog.parentrevs
707 minrev = parentset.min()
710 minrev = parentset.min()
708 for r in subset:
711 for r in subset:
709 if r <= minrev:
712 if r <= minrev:
710 continue
713 continue
711 for p in pr(r):
714 for p in pr(r):
712 if p in parentset:
715 if p in parentset:
713 cs.add(r)
716 cs.add(r)
714 return baseset(cs)
717 return baseset(cs)
715
718
716 @predicate('children(set)', safe=True)
719 @predicate('children(set)', safe=True)
717 def children(repo, subset, x):
720 def children(repo, subset, x):
718 """Child changesets of changesets in set.
721 """Child changesets of changesets in set.
719 """
722 """
720 s = getset(repo, fullreposet(repo), x)
723 s = getset(repo, fullreposet(repo), x)
721 cs = _children(repo, subset, s)
724 cs = _children(repo, subset, s)
722 return subset & cs
725 return subset & cs
723
726
724 @predicate('closed()', safe=True)
727 @predicate('closed()', safe=True)
725 def closed(repo, subset, x):
728 def closed(repo, subset, x):
726 """Changeset is closed.
729 """Changeset is closed.
727 """
730 """
728 # i18n: "closed" is a keyword
731 # i18n: "closed" is a keyword
729 getargs(x, 0, 0, _("closed takes no arguments"))
732 getargs(x, 0, 0, _("closed takes no arguments"))
730 return subset.filter(lambda r: repo[r].closesbranch(),
733 return subset.filter(lambda r: repo[r].closesbranch(),
731 condrepr='<branch closed>')
734 condrepr='<branch closed>')
732
735
733 @predicate('contains(pattern)')
736 @predicate('contains(pattern)')
734 def contains(repo, subset, x):
737 def contains(repo, subset, x):
735 """The revision's manifest contains a file matching pattern (but might not
738 """The revision's manifest contains a file matching pattern (but might not
736 modify it). See :hg:`help patterns` for information about file patterns.
739 modify it). See :hg:`help patterns` for information about file patterns.
737
740
738 The pattern without explicit kind like ``glob:`` is expected to be
741 The pattern without explicit kind like ``glob:`` is expected to be
739 relative to the current directory and match against a file exactly
742 relative to the current directory and match against a file exactly
740 for efficiency.
743 for efficiency.
741 """
744 """
742 # i18n: "contains" is a keyword
745 # i18n: "contains" is a keyword
743 pat = getstring(x, _("contains requires a pattern"))
746 pat = getstring(x, _("contains requires a pattern"))
744
747
745 def matches(x):
748 def matches(x):
746 if not matchmod.patkind(pat):
749 if not matchmod.patkind(pat):
747 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
750 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
748 if pats in repo[x]:
751 if pats in repo[x]:
749 return True
752 return True
750 else:
753 else:
751 c = repo[x]
754 c = repo[x]
752 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
755 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
753 for f in c.manifest():
756 for f in c.manifest():
754 if m(f):
757 if m(f):
755 return True
758 return True
756 return False
759 return False
757
760
758 return subset.filter(matches, condrepr=('<contains %r>', pat))
761 return subset.filter(matches, condrepr=('<contains %r>', pat))
759
762
760 @predicate('converted([id])', safe=True)
763 @predicate('converted([id])', safe=True)
761 def converted(repo, subset, x):
764 def converted(repo, subset, x):
762 """Changesets converted from the given identifier in the old repository if
765 """Changesets converted from the given identifier in the old repository if
763 present, or all converted changesets if no identifier is specified.
766 present, or all converted changesets if no identifier is specified.
764 """
767 """
765
768
766 # There is exactly no chance of resolving the revision, so do a simple
769 # There is exactly no chance of resolving the revision, so do a simple
767 # string compare and hope for the best
770 # string compare and hope for the best
768
771
769 rev = None
772 rev = None
770 # i18n: "converted" is a keyword
773 # i18n: "converted" is a keyword
771 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
774 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
772 if l:
775 if l:
773 # i18n: "converted" is a keyword
776 # i18n: "converted" is a keyword
774 rev = getstring(l[0], _('converted requires a revision'))
777 rev = getstring(l[0], _('converted requires a revision'))
775
778
776 def _matchvalue(r):
779 def _matchvalue(r):
777 source = repo[r].extra().get('convert_revision', None)
780 source = repo[r].extra().get('convert_revision', None)
778 return source is not None and (rev is None or source.startswith(rev))
781 return source is not None and (rev is None or source.startswith(rev))
779
782
780 return subset.filter(lambda r: _matchvalue(r),
783 return subset.filter(lambda r: _matchvalue(r),
781 condrepr=('<converted %r>', rev))
784 condrepr=('<converted %r>', rev))
782
785
783 @predicate('date(interval)', safe=True)
786 @predicate('date(interval)', safe=True)
784 def date(repo, subset, x):
787 def date(repo, subset, x):
785 """Changesets within the interval, see :hg:`help dates`.
788 """Changesets within the interval, see :hg:`help dates`.
786 """
789 """
787 # i18n: "date" is a keyword
790 # i18n: "date" is a keyword
788 ds = getstring(x, _("date requires a string"))
791 ds = getstring(x, _("date requires a string"))
789 dm = util.matchdate(ds)
792 dm = util.matchdate(ds)
790 return subset.filter(lambda x: dm(repo[x].date()[0]),
793 return subset.filter(lambda x: dm(repo[x].date()[0]),
791 condrepr=('<date %r>', ds))
794 condrepr=('<date %r>', ds))
792
795
793 @predicate('desc(string)', safe=True)
796 @predicate('desc(string)', safe=True)
794 def desc(repo, subset, x):
797 def desc(repo, subset, x):
795 """Search commit message for string. The match is case-insensitive.
798 """Search commit message for string. The match is case-insensitive.
796 """
799 """
797 # i18n: "desc" is a keyword
800 # i18n: "desc" is a keyword
798 ds = encoding.lower(getstring(x, _("desc requires a string")))
801 ds = encoding.lower(getstring(x, _("desc requires a string")))
799
802
800 def matches(x):
803 def matches(x):
801 c = repo[x]
804 c = repo[x]
802 return ds in encoding.lower(c.description())
805 return ds in encoding.lower(c.description())
803
806
804 return subset.filter(matches, condrepr=('<desc %r>', ds))
807 return subset.filter(matches, condrepr=('<desc %r>', ds))
805
808
806 def _descendants(repo, subset, x, followfirst=False):
809 def _descendants(repo, subset, x, followfirst=False):
807 roots = getset(repo, fullreposet(repo), x)
810 roots = getset(repo, fullreposet(repo), x)
808 if not roots:
811 if not roots:
809 return baseset()
812 return baseset()
810 s = _revdescendants(repo, roots, followfirst)
813 s = _revdescendants(repo, roots, followfirst)
811
814
812 # Both sets need to be ascending in order to lazily return the union
815 # Both sets need to be ascending in order to lazily return the union
813 # in the correct order.
816 # in the correct order.
814 base = subset & roots
817 base = subset & roots
815 desc = subset & s
818 desc = subset & s
816 result = base + desc
819 result = base + desc
817 if subset.isascending():
820 if subset.isascending():
818 result.sort()
821 result.sort()
819 elif subset.isdescending():
822 elif subset.isdescending():
820 result.sort(reverse=True)
823 result.sort(reverse=True)
821 else:
824 else:
822 result = subset & result
825 result = subset & result
823 return result
826 return result
824
827
825 @predicate('descendants(set)', safe=True)
828 @predicate('descendants(set)', safe=True)
826 def descendants(repo, subset, x):
829 def descendants(repo, subset, x):
827 """Changesets which are descendants of changesets in set.
830 """Changesets which are descendants of changesets in set.
828 """
831 """
829 return _descendants(repo, subset, x)
832 return _descendants(repo, subset, x)
830
833
831 @predicate('_firstdescendants', safe=True)
834 @predicate('_firstdescendants', safe=True)
832 def _firstdescendants(repo, subset, x):
835 def _firstdescendants(repo, subset, x):
833 # ``_firstdescendants(set)``
836 # ``_firstdescendants(set)``
834 # Like ``descendants(set)`` but follows only the first parents.
837 # Like ``descendants(set)`` but follows only the first parents.
835 return _descendants(repo, subset, x, followfirst=True)
838 return _descendants(repo, subset, x, followfirst=True)
836
839
837 @predicate('destination([set])', safe=True)
840 @predicate('destination([set])', safe=True)
838 def destination(repo, subset, x):
841 def destination(repo, subset, x):
839 """Changesets that were created by a graft, transplant or rebase operation,
842 """Changesets that were created by a graft, transplant or rebase operation,
840 with the given revisions specified as the source. Omitting the optional set
843 with the given revisions specified as the source. Omitting the optional set
841 is the same as passing all().
844 is the same as passing all().
842 """
845 """
843 if x is not None:
846 if x is not None:
844 sources = getset(repo, fullreposet(repo), x)
847 sources = getset(repo, fullreposet(repo), x)
845 else:
848 else:
846 sources = fullreposet(repo)
849 sources = fullreposet(repo)
847
850
848 dests = set()
851 dests = set()
849
852
850 # subset contains all of the possible destinations that can be returned, so
853 # subset contains all of the possible destinations that can be returned, so
851 # iterate over them and see if their source(s) were provided in the arg set.
854 # iterate over them and see if their source(s) were provided in the arg set.
852 # Even if the immediate src of r is not in the arg set, src's source (or
855 # Even if the immediate src of r is not in the arg set, src's source (or
853 # further back) may be. Scanning back further than the immediate src allows
856 # further back) may be. Scanning back further than the immediate src allows
854 # transitive transplants and rebases to yield the same results as transitive
857 # transitive transplants and rebases to yield the same results as transitive
855 # grafts.
858 # grafts.
856 for r in subset:
859 for r in subset:
857 src = _getrevsource(repo, r)
860 src = _getrevsource(repo, r)
858 lineage = None
861 lineage = None
859
862
860 while src is not None:
863 while src is not None:
861 if lineage is None:
864 if lineage is None:
862 lineage = list()
865 lineage = list()
863
866
864 lineage.append(r)
867 lineage.append(r)
865
868
866 # The visited lineage is a match if the current source is in the arg
869 # The visited lineage is a match if the current source is in the arg
867 # set. Since every candidate dest is visited by way of iterating
870 # set. Since every candidate dest is visited by way of iterating
868 # subset, any dests further back in the lineage will be tested by a
871 # subset, any dests further back in the lineage will be tested by a
869 # different iteration over subset. Likewise, if the src was already
872 # different iteration over subset. Likewise, if the src was already
870 # selected, the current lineage can be selected without going back
873 # selected, the current lineage can be selected without going back
871 # further.
874 # further.
872 if src in sources or src in dests:
875 if src in sources or src in dests:
873 dests.update(lineage)
876 dests.update(lineage)
874 break
877 break
875
878
876 r = src
879 r = src
877 src = _getrevsource(repo, r)
880 src = _getrevsource(repo, r)
878
881
879 return subset.filter(dests.__contains__,
882 return subset.filter(dests.__contains__,
880 condrepr=lambda: '<destination %r>' % sorted(dests))
883 condrepr=lambda: '<destination %r>' % sorted(dests))
881
884
882 @predicate('divergent()', safe=True)
885 @predicate('divergent()', safe=True)
883 def divergent(repo, subset, x):
886 def divergent(repo, subset, x):
884 """
887 """
885 Final successors of changesets with an alternative set of final successors.
888 Final successors of changesets with an alternative set of final successors.
886 """
889 """
887 # i18n: "divergent" is a keyword
890 # i18n: "divergent" is a keyword
888 getargs(x, 0, 0, _("divergent takes no arguments"))
891 getargs(x, 0, 0, _("divergent takes no arguments"))
889 divergent = obsmod.getrevs(repo, 'divergent')
892 divergent = obsmod.getrevs(repo, 'divergent')
890 return subset & divergent
893 return subset & divergent
891
894
892 @predicate('extinct()', safe=True)
895 @predicate('extinct()', safe=True)
893 def extinct(repo, subset, x):
896 def extinct(repo, subset, x):
894 """Obsolete changesets with obsolete descendants only.
897 """Obsolete changesets with obsolete descendants only.
895 """
898 """
896 # i18n: "extinct" is a keyword
899 # i18n: "extinct" is a keyword
897 getargs(x, 0, 0, _("extinct takes no arguments"))
900 getargs(x, 0, 0, _("extinct takes no arguments"))
898 extincts = obsmod.getrevs(repo, 'extinct')
901 extincts = obsmod.getrevs(repo, 'extinct')
899 return subset & extincts
902 return subset & extincts
900
903
901 @predicate('extra(label, [value])', safe=True)
904 @predicate('extra(label, [value])', safe=True)
902 def extra(repo, subset, x):
905 def extra(repo, subset, x):
903 """Changesets with the given label in the extra metadata, with the given
906 """Changesets with the given label in the extra metadata, with the given
904 optional value.
907 optional value.
905
908
906 If `value` starts with `re:`, the remainder of the value is treated as
909 If `value` starts with `re:`, the remainder of the value is treated as
907 a regular expression. To match a value that actually starts with `re:`,
910 a regular expression. To match a value that actually starts with `re:`,
908 use the prefix `literal:`.
911 use the prefix `literal:`.
909 """
912 """
910 args = getargsdict(x, 'extra', 'label value')
913 args = getargsdict(x, 'extra', 'label value')
911 if 'label' not in args:
914 if 'label' not in args:
912 # i18n: "extra" is a keyword
915 # i18n: "extra" is a keyword
913 raise error.ParseError(_('extra takes at least 1 argument'))
916 raise error.ParseError(_('extra takes at least 1 argument'))
914 # i18n: "extra" is a keyword
917 # i18n: "extra" is a keyword
915 label = getstring(args['label'], _('first argument to extra must be '
918 label = getstring(args['label'], _('first argument to extra must be '
916 'a string'))
919 'a string'))
917 value = None
920 value = None
918
921
919 if 'value' in args:
922 if 'value' in args:
920 # i18n: "extra" is a keyword
923 # i18n: "extra" is a keyword
921 value = getstring(args['value'], _('second argument to extra must be '
924 value = getstring(args['value'], _('second argument to extra must be '
922 'a string'))
925 'a string'))
923 kind, value, matcher = util.stringmatcher(value)
926 kind, value, matcher = util.stringmatcher(value)
924
927
925 def _matchvalue(r):
928 def _matchvalue(r):
926 extra = repo[r].extra()
929 extra = repo[r].extra()
927 return label in extra and (value is None or matcher(extra[label]))
930 return label in extra and (value is None or matcher(extra[label]))
928
931
929 return subset.filter(lambda r: _matchvalue(r),
932 return subset.filter(lambda r: _matchvalue(r),
930 condrepr=('<extra[%r] %r>', label, value))
933 condrepr=('<extra[%r] %r>', label, value))
931
934
932 @predicate('filelog(pattern)', safe=True)
935 @predicate('filelog(pattern)', safe=True)
933 def filelog(repo, subset, x):
936 def filelog(repo, subset, x):
934 """Changesets connected to the specified filelog.
937 """Changesets connected to the specified filelog.
935
938
936 For performance reasons, visits only revisions mentioned in the file-level
939 For performance reasons, visits only revisions mentioned in the file-level
937 filelog, rather than filtering through all changesets (much faster, but
940 filelog, rather than filtering through all changesets (much faster, but
938 doesn't include deletes or duplicate changes). For a slower, more accurate
941 doesn't include deletes or duplicate changes). For a slower, more accurate
939 result, use ``file()``.
942 result, use ``file()``.
940
943
941 The pattern without explicit kind like ``glob:`` is expected to be
944 The pattern without explicit kind like ``glob:`` is expected to be
942 relative to the current directory and match against a file exactly
945 relative to the current directory and match against a file exactly
943 for efficiency.
946 for efficiency.
944
947
945 If some linkrev points to revisions filtered by the current repoview, we'll
948 If some linkrev points to revisions filtered by the current repoview, we'll
946 work around it to return a non-filtered value.
949 work around it to return a non-filtered value.
947 """
950 """
948
951
949 # i18n: "filelog" is a keyword
952 # i18n: "filelog" is a keyword
950 pat = getstring(x, _("filelog requires a pattern"))
953 pat = getstring(x, _("filelog requires a pattern"))
951 s = set()
954 s = set()
952 cl = repo.changelog
955 cl = repo.changelog
953
956
954 if not matchmod.patkind(pat):
957 if not matchmod.patkind(pat):
955 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
958 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
956 files = [f]
959 files = [f]
957 else:
960 else:
958 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
961 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
959 files = (f for f in repo[None] if m(f))
962 files = (f for f in repo[None] if m(f))
960
963
961 for f in files:
964 for f in files:
962 fl = repo.file(f)
965 fl = repo.file(f)
963 known = {}
966 known = {}
964 scanpos = 0
967 scanpos = 0
965 for fr in list(fl):
968 for fr in list(fl):
966 fn = fl.node(fr)
969 fn = fl.node(fr)
967 if fn in known:
970 if fn in known:
968 s.add(known[fn])
971 s.add(known[fn])
969 continue
972 continue
970
973
971 lr = fl.linkrev(fr)
974 lr = fl.linkrev(fr)
972 if lr in cl:
975 if lr in cl:
973 s.add(lr)
976 s.add(lr)
974 elif scanpos is not None:
977 elif scanpos is not None:
975 # lowest matching changeset is filtered, scan further
978 # lowest matching changeset is filtered, scan further
976 # ahead in changelog
979 # ahead in changelog
977 start = max(lr, scanpos) + 1
980 start = max(lr, scanpos) + 1
978 scanpos = None
981 scanpos = None
979 for r in cl.revs(start):
982 for r in cl.revs(start):
980 # minimize parsing of non-matching entries
983 # minimize parsing of non-matching entries
981 if f in cl.revision(r) and f in cl.readfiles(r):
984 if f in cl.revision(r) and f in cl.readfiles(r):
982 try:
985 try:
983 # try to use manifest delta fastpath
986 # try to use manifest delta fastpath
984 n = repo[r].filenode(f)
987 n = repo[r].filenode(f)
985 if n not in known:
988 if n not in known:
986 if n == fn:
989 if n == fn:
987 s.add(r)
990 s.add(r)
988 scanpos = r
991 scanpos = r
989 break
992 break
990 else:
993 else:
991 known[n] = r
994 known[n] = r
992 except error.ManifestLookupError:
995 except error.ManifestLookupError:
993 # deletion in changelog
996 # deletion in changelog
994 continue
997 continue
995
998
996 return subset & s
999 return subset & s
997
1000
998 @predicate('first(set, [n])', safe=True)
1001 @predicate('first(set, [n])', safe=True)
999 def first(repo, subset, x):
1002 def first(repo, subset, x):
1000 """An alias for limit().
1003 """An alias for limit().
1001 """
1004 """
1002 return limit(repo, subset, x)
1005 return limit(repo, subset, x)
1003
1006
1004 def _follow(repo, subset, x, name, followfirst=False):
1007 def _follow(repo, subset, x, name, followfirst=False):
1005 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1008 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1006 "and an optional revset") % name)
1009 "and an optional revset") % name)
1007 c = repo['.']
1010 c = repo['.']
1008 if l:
1011 if l:
1009 x = getstring(l[0], _("%s expected a pattern") % name)
1012 x = getstring(l[0], _("%s expected a pattern") % name)
1010 rev = None
1013 rev = None
1011 if len(l) >= 2:
1014 if len(l) >= 2:
1012 rev = getset(repo, fullreposet(repo), l[1]).last()
1015 rev = getset(repo, fullreposet(repo), l[1]).last()
1013 if rev is None:
1016 if rev is None:
1014 raise error.RepoLookupError(
1017 raise error.RepoLookupError(
1015 _("%s: starting revision set cannot be empty") % name)
1018 _("%s: starting revision set cannot be empty") % name)
1016 c = repo[rev]
1019 c = repo[rev]
1017 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1020 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1018 ctx=repo[rev], default='path')
1021 ctx=repo[rev], default='path')
1019
1022
1020 files = c.manifest().walk(matcher)
1023 files = c.manifest().walk(matcher)
1021
1024
1022 s = set()
1025 s = set()
1023 for fname in files:
1026 for fname in files:
1024 fctx = c[fname]
1027 fctx = c[fname]
1025 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1028 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1026 # include the revision responsible for the most recent version
1029 # include the revision responsible for the most recent version
1027 s.add(fctx.introrev())
1030 s.add(fctx.introrev())
1028 else:
1031 else:
1029 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1032 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1030
1033
1031 return subset & s
1034 return subset & s
1032
1035
1033 @predicate('follow([pattern[, startrev]])', safe=True)
1036 @predicate('follow([pattern[, startrev]])', safe=True)
1034 def follow(repo, subset, x):
1037 def follow(repo, subset, x):
1035 """
1038 """
1036 An alias for ``::.`` (ancestors of the working directory's first parent).
1039 An alias for ``::.`` (ancestors of the working directory's first parent).
1037 If pattern is specified, the histories of files matching given
1040 If pattern is specified, the histories of files matching given
1038 pattern in the revision given by startrev are followed, including copies.
1041 pattern in the revision given by startrev are followed, including copies.
1039 """
1042 """
1040 return _follow(repo, subset, x, 'follow')
1043 return _follow(repo, subset, x, 'follow')
1041
1044
1042 @predicate('_followfirst', safe=True)
1045 @predicate('_followfirst', safe=True)
1043 def _followfirst(repo, subset, x):
1046 def _followfirst(repo, subset, x):
1044 # ``followfirst([pattern[, startrev]])``
1047 # ``followfirst([pattern[, startrev]])``
1045 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1048 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1046 # of every revisions or files revisions.
1049 # of every revisions or files revisions.
1047 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1050 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1048
1051
1049 @predicate('all()', safe=True)
1052 @predicate('all()', safe=True)
1050 def getall(repo, subset, x):
1053 def getall(repo, subset, x):
1051 """All changesets, the same as ``0:tip``.
1054 """All changesets, the same as ``0:tip``.
1052 """
1055 """
1053 # i18n: "all" is a keyword
1056 # i18n: "all" is a keyword
1054 getargs(x, 0, 0, _("all takes no arguments"))
1057 getargs(x, 0, 0, _("all takes no arguments"))
1055 return subset & spanset(repo) # drop "null" if any
1058 return subset & spanset(repo) # drop "null" if any
1056
1059
1057 @predicate('grep(regex)')
1060 @predicate('grep(regex)')
1058 def grep(repo, subset, x):
1061 def grep(repo, subset, x):
1059 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1062 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1060 to ensure special escape characters are handled correctly. Unlike
1063 to ensure special escape characters are handled correctly. Unlike
1061 ``keyword(string)``, the match is case-sensitive.
1064 ``keyword(string)``, the match is case-sensitive.
1062 """
1065 """
1063 try:
1066 try:
1064 # i18n: "grep" is a keyword
1067 # i18n: "grep" is a keyword
1065 gr = re.compile(getstring(x, _("grep requires a string")))
1068 gr = re.compile(getstring(x, _("grep requires a string")))
1066 except re.error as e:
1069 except re.error as e:
1067 raise error.ParseError(_('invalid match pattern: %s') % e)
1070 raise error.ParseError(_('invalid match pattern: %s') % e)
1068
1071
1069 def matches(x):
1072 def matches(x):
1070 c = repo[x]
1073 c = repo[x]
1071 for e in c.files() + [c.user(), c.description()]:
1074 for e in c.files() + [c.user(), c.description()]:
1072 if gr.search(e):
1075 if gr.search(e):
1073 return True
1076 return True
1074 return False
1077 return False
1075
1078
1076 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1079 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1077
1080
1078 @predicate('_matchfiles', safe=True)
1081 @predicate('_matchfiles', safe=True)
1079 def _matchfiles(repo, subset, x):
1082 def _matchfiles(repo, subset, x):
1080 # _matchfiles takes a revset list of prefixed arguments:
1083 # _matchfiles takes a revset list of prefixed arguments:
1081 #
1084 #
1082 # [p:foo, i:bar, x:baz]
1085 # [p:foo, i:bar, x:baz]
1083 #
1086 #
1084 # builds a match object from them and filters subset. Allowed
1087 # builds a match object from them and filters subset. Allowed
1085 # prefixes are 'p:' for regular patterns, 'i:' for include
1088 # prefixes are 'p:' for regular patterns, 'i:' for include
1086 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1089 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1087 # a revision identifier, or the empty string to reference the
1090 # a revision identifier, or the empty string to reference the
1088 # working directory, from which the match object is
1091 # working directory, from which the match object is
1089 # initialized. Use 'd:' to set the default matching mode, default
1092 # initialized. Use 'd:' to set the default matching mode, default
1090 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1093 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1091
1094
1092 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1095 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1093 pats, inc, exc = [], [], []
1096 pats, inc, exc = [], [], []
1094 rev, default = None, None
1097 rev, default = None, None
1095 for arg in l:
1098 for arg in l:
1096 s = getstring(arg, "_matchfiles requires string arguments")
1099 s = getstring(arg, "_matchfiles requires string arguments")
1097 prefix, value = s[:2], s[2:]
1100 prefix, value = s[:2], s[2:]
1098 if prefix == 'p:':
1101 if prefix == 'p:':
1099 pats.append(value)
1102 pats.append(value)
1100 elif prefix == 'i:':
1103 elif prefix == 'i:':
1101 inc.append(value)
1104 inc.append(value)
1102 elif prefix == 'x:':
1105 elif prefix == 'x:':
1103 exc.append(value)
1106 exc.append(value)
1104 elif prefix == 'r:':
1107 elif prefix == 'r:':
1105 if rev is not None:
1108 if rev is not None:
1106 raise error.ParseError('_matchfiles expected at most one '
1109 raise error.ParseError('_matchfiles expected at most one '
1107 'revision')
1110 'revision')
1108 if value != '': # empty means working directory; leave rev as None
1111 if value != '': # empty means working directory; leave rev as None
1109 rev = value
1112 rev = value
1110 elif prefix == 'd:':
1113 elif prefix == 'd:':
1111 if default is not None:
1114 if default is not None:
1112 raise error.ParseError('_matchfiles expected at most one '
1115 raise error.ParseError('_matchfiles expected at most one '
1113 'default mode')
1116 'default mode')
1114 default = value
1117 default = value
1115 else:
1118 else:
1116 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1119 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1117 if not default:
1120 if not default:
1118 default = 'glob'
1121 default = 'glob'
1119
1122
1120 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1123 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1121 exclude=exc, ctx=repo[rev], default=default)
1124 exclude=exc, ctx=repo[rev], default=default)
1122
1125
1123 # This directly read the changelog data as creating changectx for all
1126 # This directly read the changelog data as creating changectx for all
1124 # revisions is quite expensive.
1127 # revisions is quite expensive.
1125 getfiles = repo.changelog.readfiles
1128 getfiles = repo.changelog.readfiles
1126 wdirrev = node.wdirrev
1129 wdirrev = node.wdirrev
1127 def matches(x):
1130 def matches(x):
1128 if x == wdirrev:
1131 if x == wdirrev:
1129 files = repo[x].files()
1132 files = repo[x].files()
1130 else:
1133 else:
1131 files = getfiles(x)
1134 files = getfiles(x)
1132 for f in files:
1135 for f in files:
1133 if m(f):
1136 if m(f):
1134 return True
1137 return True
1135 return False
1138 return False
1136
1139
1137 return subset.filter(matches,
1140 return subset.filter(matches,
1138 condrepr=('<matchfiles patterns=%r, include=%r '
1141 condrepr=('<matchfiles patterns=%r, include=%r '
1139 'exclude=%r, default=%r, rev=%r>',
1142 'exclude=%r, default=%r, rev=%r>',
1140 pats, inc, exc, default, rev))
1143 pats, inc, exc, default, rev))
1141
1144
1142 @predicate('file(pattern)', safe=True)
1145 @predicate('file(pattern)', safe=True)
1143 def hasfile(repo, subset, x):
1146 def hasfile(repo, subset, x):
1144 """Changesets affecting files matched by pattern.
1147 """Changesets affecting files matched by pattern.
1145
1148
1146 For a faster but less accurate result, consider using ``filelog()``
1149 For a faster but less accurate result, consider using ``filelog()``
1147 instead.
1150 instead.
1148
1151
1149 This predicate uses ``glob:`` as the default kind of pattern.
1152 This predicate uses ``glob:`` as the default kind of pattern.
1150 """
1153 """
1151 # i18n: "file" is a keyword
1154 # i18n: "file" is a keyword
1152 pat = getstring(x, _("file requires a pattern"))
1155 pat = getstring(x, _("file requires a pattern"))
1153 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1156 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1154
1157
1155 @predicate('head()', safe=True)
1158 @predicate('head()', safe=True)
1156 def head(repo, subset, x):
1159 def head(repo, subset, x):
1157 """Changeset is a named branch head.
1160 """Changeset is a named branch head.
1158 """
1161 """
1159 # i18n: "head" is a keyword
1162 # i18n: "head" is a keyword
1160 getargs(x, 0, 0, _("head takes no arguments"))
1163 getargs(x, 0, 0, _("head takes no arguments"))
1161 hs = set()
1164 hs = set()
1162 cl = repo.changelog
1165 cl = repo.changelog
1163 for ls in repo.branchmap().itervalues():
1166 for ls in repo.branchmap().itervalues():
1164 hs.update(cl.rev(h) for h in ls)
1167 hs.update(cl.rev(h) for h in ls)
1165 return subset & baseset(hs)
1168 return subset & baseset(hs)
1166
1169
1167 @predicate('heads(set)', safe=True)
1170 @predicate('heads(set)', safe=True)
1168 def heads(repo, subset, x):
1171 def heads(repo, subset, x):
1169 """Members of set with no children in set.
1172 """Members of set with no children in set.
1170 """
1173 """
1171 s = getset(repo, subset, x)
1174 s = getset(repo, subset, x)
1172 ps = parents(repo, subset, x)
1175 ps = parents(repo, subset, x)
1173 return s - ps
1176 return s - ps
1174
1177
1175 @predicate('hidden()', safe=True)
1178 @predicate('hidden()', safe=True)
1176 def hidden(repo, subset, x):
1179 def hidden(repo, subset, x):
1177 """Hidden changesets.
1180 """Hidden changesets.
1178 """
1181 """
1179 # i18n: "hidden" is a keyword
1182 # i18n: "hidden" is a keyword
1180 getargs(x, 0, 0, _("hidden takes no arguments"))
1183 getargs(x, 0, 0, _("hidden takes no arguments"))
1181 hiddenrevs = repoview.filterrevs(repo, 'visible')
1184 hiddenrevs = repoview.filterrevs(repo, 'visible')
1182 return subset & hiddenrevs
1185 return subset & hiddenrevs
1183
1186
1184 @predicate('keyword(string)', safe=True)
1187 @predicate('keyword(string)', safe=True)
1185 def keyword(repo, subset, x):
1188 def keyword(repo, subset, x):
1186 """Search commit message, user name, and names of changed files for
1189 """Search commit message, user name, and names of changed files for
1187 string. The match is case-insensitive.
1190 string. The match is case-insensitive.
1188 """
1191 """
1189 # i18n: "keyword" is a keyword
1192 # i18n: "keyword" is a keyword
1190 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1193 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1191
1194
1192 def matches(r):
1195 def matches(r):
1193 c = repo[r]
1196 c = repo[r]
1194 return any(kw in encoding.lower(t)
1197 return any(kw in encoding.lower(t)
1195 for t in c.files() + [c.user(), c.description()])
1198 for t in c.files() + [c.user(), c.description()])
1196
1199
1197 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1200 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1198
1201
1199 @predicate('limit(set[, n[, offset]])', safe=True)
1202 @predicate('limit(set[, n[, offset]])', safe=True)
1200 def limit(repo, subset, x):
1203 def limit(repo, subset, x):
1201 """First n members of set, defaulting to 1, starting from offset.
1204 """First n members of set, defaulting to 1, starting from offset.
1202 """
1205 """
1203 args = getargsdict(x, 'limit', 'set n offset')
1206 args = getargsdict(x, 'limit', 'set n offset')
1204 if 'set' not in args:
1207 if 'set' not in args:
1205 # i18n: "limit" is a keyword
1208 # i18n: "limit" is a keyword
1206 raise error.ParseError(_("limit requires one to three arguments"))
1209 raise error.ParseError(_("limit requires one to three arguments"))
1207 try:
1210 try:
1208 lim, ofs = 1, 0
1211 lim, ofs = 1, 0
1209 if 'n' in args:
1212 if 'n' in args:
1210 # i18n: "limit" is a keyword
1213 # i18n: "limit" is a keyword
1211 lim = int(getstring(args['n'], _("limit requires a number")))
1214 lim = int(getstring(args['n'], _("limit requires a number")))
1212 if 'offset' in args:
1215 if 'offset' in args:
1213 # i18n: "limit" is a keyword
1216 # i18n: "limit" is a keyword
1214 ofs = int(getstring(args['offset'], _("limit requires a number")))
1217 ofs = int(getstring(args['offset'], _("limit requires a number")))
1215 if ofs < 0:
1218 if ofs < 0:
1216 raise error.ParseError(_("negative offset"))
1219 raise error.ParseError(_("negative offset"))
1217 except (TypeError, ValueError):
1220 except (TypeError, ValueError):
1218 # i18n: "limit" is a keyword
1221 # i18n: "limit" is a keyword
1219 raise error.ParseError(_("limit expects a number"))
1222 raise error.ParseError(_("limit expects a number"))
1220 os = getset(repo, fullreposet(repo), args['set'])
1223 os = getset(repo, fullreposet(repo), args['set'])
1221 result = []
1224 result = []
1222 it = iter(os)
1225 it = iter(os)
1223 for x in xrange(ofs):
1226 for x in xrange(ofs):
1224 y = next(it, None)
1227 y = next(it, None)
1225 if y is None:
1228 if y is None:
1226 break
1229 break
1227 for x in xrange(lim):
1230 for x in xrange(lim):
1228 y = next(it, None)
1231 y = next(it, None)
1229 if y is None:
1232 if y is None:
1230 break
1233 break
1231 elif y in subset:
1234 elif y in subset:
1232 result.append(y)
1235 result.append(y)
1233 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1236 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1234 lim, ofs, subset, os))
1237 lim, ofs, subset, os))
1235
1238
1236 @predicate('last(set, [n])', safe=True)
1239 @predicate('last(set, [n])', safe=True)
1237 def last(repo, subset, x):
1240 def last(repo, subset, x):
1238 """Last n members of set, defaulting to 1.
1241 """Last n members of set, defaulting to 1.
1239 """
1242 """
1240 # i18n: "last" is a keyword
1243 # i18n: "last" is a keyword
1241 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1244 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1242 try:
1245 try:
1243 lim = 1
1246 lim = 1
1244 if len(l) == 2:
1247 if len(l) == 2:
1245 # i18n: "last" is a keyword
1248 # i18n: "last" is a keyword
1246 lim = int(getstring(l[1], _("last requires a number")))
1249 lim = int(getstring(l[1], _("last requires a number")))
1247 except (TypeError, ValueError):
1250 except (TypeError, ValueError):
1248 # i18n: "last" is a keyword
1251 # i18n: "last" is a keyword
1249 raise error.ParseError(_("last expects a number"))
1252 raise error.ParseError(_("last expects a number"))
1250 os = getset(repo, fullreposet(repo), l[0])
1253 os = getset(repo, fullreposet(repo), l[0])
1251 os.reverse()
1254 os.reverse()
1252 result = []
1255 result = []
1253 it = iter(os)
1256 it = iter(os)
1254 for x in xrange(lim):
1257 for x in xrange(lim):
1255 y = next(it, None)
1258 y = next(it, None)
1256 if y is None:
1259 if y is None:
1257 break
1260 break
1258 elif y in subset:
1261 elif y in subset:
1259 result.append(y)
1262 result.append(y)
1260 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1263 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1261
1264
1262 @predicate('max(set)', safe=True)
1265 @predicate('max(set)', safe=True)
1263 def maxrev(repo, subset, x):
1266 def maxrev(repo, subset, x):
1264 """Changeset with highest revision number in set.
1267 """Changeset with highest revision number in set.
1265 """
1268 """
1266 os = getset(repo, fullreposet(repo), x)
1269 os = getset(repo, fullreposet(repo), x)
1267 try:
1270 try:
1268 m = os.max()
1271 m = os.max()
1269 if m in subset:
1272 if m in subset:
1270 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1273 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1271 except ValueError:
1274 except ValueError:
1272 # os.max() throws a ValueError when the collection is empty.
1275 # os.max() throws a ValueError when the collection is empty.
1273 # Same as python's max().
1276 # Same as python's max().
1274 pass
1277 pass
1275 return baseset(datarepr=('<max %r, %r>', subset, os))
1278 return baseset(datarepr=('<max %r, %r>', subset, os))
1276
1279
1277 @predicate('merge()', safe=True)
1280 @predicate('merge()', safe=True)
1278 def merge(repo, subset, x):
1281 def merge(repo, subset, x):
1279 """Changeset is a merge changeset.
1282 """Changeset is a merge changeset.
1280 """
1283 """
1281 # i18n: "merge" is a keyword
1284 # i18n: "merge" is a keyword
1282 getargs(x, 0, 0, _("merge takes no arguments"))
1285 getargs(x, 0, 0, _("merge takes no arguments"))
1283 cl = repo.changelog
1286 cl = repo.changelog
1284 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1287 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1285 condrepr='<merge>')
1288 condrepr='<merge>')
1286
1289
1287 @predicate('branchpoint()', safe=True)
1290 @predicate('branchpoint()', safe=True)
1288 def branchpoint(repo, subset, x):
1291 def branchpoint(repo, subset, x):
1289 """Changesets with more than one child.
1292 """Changesets with more than one child.
1290 """
1293 """
1291 # i18n: "branchpoint" is a keyword
1294 # i18n: "branchpoint" is a keyword
1292 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1295 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1293 cl = repo.changelog
1296 cl = repo.changelog
1294 if not subset:
1297 if not subset:
1295 return baseset()
1298 return baseset()
1296 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1299 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1297 # (and if it is not, it should.)
1300 # (and if it is not, it should.)
1298 baserev = min(subset)
1301 baserev = min(subset)
1299 parentscount = [0]*(len(repo) - baserev)
1302 parentscount = [0]*(len(repo) - baserev)
1300 for r in cl.revs(start=baserev + 1):
1303 for r in cl.revs(start=baserev + 1):
1301 for p in cl.parentrevs(r):
1304 for p in cl.parentrevs(r):
1302 if p >= baserev:
1305 if p >= baserev:
1303 parentscount[p - baserev] += 1
1306 parentscount[p - baserev] += 1
1304 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1307 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1305 condrepr='<branchpoint>')
1308 condrepr='<branchpoint>')
1306
1309
1307 @predicate('min(set)', safe=True)
1310 @predicate('min(set)', safe=True)
1308 def minrev(repo, subset, x):
1311 def minrev(repo, subset, x):
1309 """Changeset with lowest revision number in set.
1312 """Changeset with lowest revision number in set.
1310 """
1313 """
1311 os = getset(repo, fullreposet(repo), x)
1314 os = getset(repo, fullreposet(repo), x)
1312 try:
1315 try:
1313 m = os.min()
1316 m = os.min()
1314 if m in subset:
1317 if m in subset:
1315 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1318 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1316 except ValueError:
1319 except ValueError:
1317 # os.min() throws a ValueError when the collection is empty.
1320 # os.min() throws a ValueError when the collection is empty.
1318 # Same as python's min().
1321 # Same as python's min().
1319 pass
1322 pass
1320 return baseset(datarepr=('<min %r, %r>', subset, os))
1323 return baseset(datarepr=('<min %r, %r>', subset, os))
1321
1324
1322 @predicate('modifies(pattern)', safe=True)
1325 @predicate('modifies(pattern)', safe=True)
1323 def modifies(repo, subset, x):
1326 def modifies(repo, subset, x):
1324 """Changesets modifying files matched by pattern.
1327 """Changesets modifying files matched by pattern.
1325
1328
1326 The pattern without explicit kind like ``glob:`` is expected to be
1329 The pattern without explicit kind like ``glob:`` is expected to be
1327 relative to the current directory and match against a file or a
1330 relative to the current directory and match against a file or a
1328 directory.
1331 directory.
1329 """
1332 """
1330 # i18n: "modifies" is a keyword
1333 # i18n: "modifies" is a keyword
1331 pat = getstring(x, _("modifies requires a pattern"))
1334 pat = getstring(x, _("modifies requires a pattern"))
1332 return checkstatus(repo, subset, pat, 0)
1335 return checkstatus(repo, subset, pat, 0)
1333
1336
1334 @predicate('named(namespace)')
1337 @predicate('named(namespace)')
1335 def named(repo, subset, x):
1338 def named(repo, subset, x):
1336 """The changesets in a given namespace.
1339 """The changesets in a given namespace.
1337
1340
1338 If `namespace` starts with `re:`, the remainder of the string is treated as
1341 If `namespace` starts with `re:`, the remainder of the string is treated as
1339 a regular expression. To match a namespace that actually starts with `re:`,
1342 a regular expression. To match a namespace that actually starts with `re:`,
1340 use the prefix `literal:`.
1343 use the prefix `literal:`.
1341 """
1344 """
1342 # i18n: "named" is a keyword
1345 # i18n: "named" is a keyword
1343 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1346 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1344
1347
1345 ns = getstring(args[0],
1348 ns = getstring(args[0],
1346 # i18n: "named" is a keyword
1349 # i18n: "named" is a keyword
1347 _('the argument to named must be a string'))
1350 _('the argument to named must be a string'))
1348 kind, pattern, matcher = util.stringmatcher(ns)
1351 kind, pattern, matcher = util.stringmatcher(ns)
1349 namespaces = set()
1352 namespaces = set()
1350 if kind == 'literal':
1353 if kind == 'literal':
1351 if pattern not in repo.names:
1354 if pattern not in repo.names:
1352 raise error.RepoLookupError(_("namespace '%s' does not exist")
1355 raise error.RepoLookupError(_("namespace '%s' does not exist")
1353 % ns)
1356 % ns)
1354 namespaces.add(repo.names[pattern])
1357 namespaces.add(repo.names[pattern])
1355 else:
1358 else:
1356 for name, ns in repo.names.iteritems():
1359 for name, ns in repo.names.iteritems():
1357 if matcher(name):
1360 if matcher(name):
1358 namespaces.add(ns)
1361 namespaces.add(ns)
1359 if not namespaces:
1362 if not namespaces:
1360 raise error.RepoLookupError(_("no namespace exists"
1363 raise error.RepoLookupError(_("no namespace exists"
1361 " that match '%s'") % pattern)
1364 " that match '%s'") % pattern)
1362
1365
1363 names = set()
1366 names = set()
1364 for ns in namespaces:
1367 for ns in namespaces:
1365 for name in ns.listnames(repo):
1368 for name in ns.listnames(repo):
1366 if name not in ns.deprecated:
1369 if name not in ns.deprecated:
1367 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1370 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1368
1371
1369 names -= set([node.nullrev])
1372 names -= set([node.nullrev])
1370 return subset & names
1373 return subset & names
1371
1374
1372 @predicate('id(string)', safe=True)
1375 @predicate('id(string)', safe=True)
1373 def node_(repo, subset, x):
1376 def node_(repo, subset, x):
1374 """Revision non-ambiguously specified by the given hex string prefix.
1377 """Revision non-ambiguously specified by the given hex string prefix.
1375 """
1378 """
1376 # i18n: "id" is a keyword
1379 # i18n: "id" is a keyword
1377 l = getargs(x, 1, 1, _("id requires one argument"))
1380 l = getargs(x, 1, 1, _("id requires one argument"))
1378 # i18n: "id" is a keyword
1381 # i18n: "id" is a keyword
1379 n = getstring(l[0], _("id requires a string"))
1382 n = getstring(l[0], _("id requires a string"))
1380 if len(n) == 40:
1383 if len(n) == 40:
1381 try:
1384 try:
1382 rn = repo.changelog.rev(node.bin(n))
1385 rn = repo.changelog.rev(node.bin(n))
1383 except (LookupError, TypeError):
1386 except (LookupError, TypeError):
1384 rn = None
1387 rn = None
1385 else:
1388 else:
1386 rn = None
1389 rn = None
1387 pm = repo.changelog._partialmatch(n)
1390 pm = repo.changelog._partialmatch(n)
1388 if pm is not None:
1391 if pm is not None:
1389 rn = repo.changelog.rev(pm)
1392 rn = repo.changelog.rev(pm)
1390
1393
1391 if rn is None:
1394 if rn is None:
1392 return baseset()
1395 return baseset()
1393 result = baseset([rn])
1396 result = baseset([rn])
1394 return result & subset
1397 return result & subset
1395
1398
1396 @predicate('obsolete()', safe=True)
1399 @predicate('obsolete()', safe=True)
1397 def obsolete(repo, subset, x):
1400 def obsolete(repo, subset, x):
1398 """Mutable changeset with a newer version."""
1401 """Mutable changeset with a newer version."""
1399 # i18n: "obsolete" is a keyword
1402 # i18n: "obsolete" is a keyword
1400 getargs(x, 0, 0, _("obsolete takes no arguments"))
1403 getargs(x, 0, 0, _("obsolete takes no arguments"))
1401 obsoletes = obsmod.getrevs(repo, 'obsolete')
1404 obsoletes = obsmod.getrevs(repo, 'obsolete')
1402 return subset & obsoletes
1405 return subset & obsoletes
1403
1406
1404 @predicate('only(set, [set])', safe=True)
1407 @predicate('only(set, [set])', safe=True)
1405 def only(repo, subset, x):
1408 def only(repo, subset, x):
1406 """Changesets that are ancestors of the first set that are not ancestors
1409 """Changesets that are ancestors of the first set that are not ancestors
1407 of any other head in the repo. If a second set is specified, the result
1410 of any other head in the repo. If a second set is specified, the result
1408 is ancestors of the first set that are not ancestors of the second set
1411 is ancestors of the first set that are not ancestors of the second set
1409 (i.e. ::<set1> - ::<set2>).
1412 (i.e. ::<set1> - ::<set2>).
1410 """
1413 """
1411 cl = repo.changelog
1414 cl = repo.changelog
1412 # i18n: "only" is a keyword
1415 # i18n: "only" is a keyword
1413 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1416 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1414 include = getset(repo, fullreposet(repo), args[0])
1417 include = getset(repo, fullreposet(repo), args[0])
1415 if len(args) == 1:
1418 if len(args) == 1:
1416 if not include:
1419 if not include:
1417 return baseset()
1420 return baseset()
1418
1421
1419 descendants = set(_revdescendants(repo, include, False))
1422 descendants = set(_revdescendants(repo, include, False))
1420 exclude = [rev for rev in cl.headrevs()
1423 exclude = [rev for rev in cl.headrevs()
1421 if not rev in descendants and not rev in include]
1424 if not rev in descendants and not rev in include]
1422 else:
1425 else:
1423 exclude = getset(repo, fullreposet(repo), args[1])
1426 exclude = getset(repo, fullreposet(repo), args[1])
1424
1427
1425 results = set(cl.findmissingrevs(common=exclude, heads=include))
1428 results = set(cl.findmissingrevs(common=exclude, heads=include))
1426 # XXX we should turn this into a baseset instead of a set, smartset may do
1429 # XXX we should turn this into a baseset instead of a set, smartset may do
1427 # some optimisations from the fact this is a baseset.
1430 # some optimisations from the fact this is a baseset.
1428 return subset & results
1431 return subset & results
1429
1432
1430 @predicate('origin([set])', safe=True)
1433 @predicate('origin([set])', safe=True)
1431 def origin(repo, subset, x):
1434 def origin(repo, subset, x):
1432 """
1435 """
1433 Changesets that were specified as a source for the grafts, transplants or
1436 Changesets that were specified as a source for the grafts, transplants or
1434 rebases that created the given revisions. Omitting the optional set is the
1437 rebases that created the given revisions. Omitting the optional set is the
1435 same as passing all(). If a changeset created by these operations is itself
1438 same as passing all(). If a changeset created by these operations is itself
1436 specified as a source for one of these operations, only the source changeset
1439 specified as a source for one of these operations, only the source changeset
1437 for the first operation is selected.
1440 for the first operation is selected.
1438 """
1441 """
1439 if x is not None:
1442 if x is not None:
1440 dests = getset(repo, fullreposet(repo), x)
1443 dests = getset(repo, fullreposet(repo), x)
1441 else:
1444 else:
1442 dests = fullreposet(repo)
1445 dests = fullreposet(repo)
1443
1446
1444 def _firstsrc(rev):
1447 def _firstsrc(rev):
1445 src = _getrevsource(repo, rev)
1448 src = _getrevsource(repo, rev)
1446 if src is None:
1449 if src is None:
1447 return None
1450 return None
1448
1451
1449 while True:
1452 while True:
1450 prev = _getrevsource(repo, src)
1453 prev = _getrevsource(repo, src)
1451
1454
1452 if prev is None:
1455 if prev is None:
1453 return src
1456 return src
1454 src = prev
1457 src = prev
1455
1458
1456 o = set([_firstsrc(r) for r in dests])
1459 o = set([_firstsrc(r) for r in dests])
1457 o -= set([None])
1460 o -= set([None])
1458 # XXX we should turn this into a baseset instead of a set, smartset may do
1461 # XXX we should turn this into a baseset instead of a set, smartset may do
1459 # some optimisations from the fact this is a baseset.
1462 # some optimisations from the fact this is a baseset.
1460 return subset & o
1463 return subset & o
1461
1464
1462 @predicate('outgoing([path])', safe=True)
1465 @predicate('outgoing([path])', safe=True)
1463 def outgoing(repo, subset, x):
1466 def outgoing(repo, subset, x):
1464 """Changesets not found in the specified destination repository, or the
1467 """Changesets not found in the specified destination repository, or the
1465 default push location.
1468 default push location.
1466 """
1469 """
1467 # Avoid cycles.
1470 # Avoid cycles.
1468 from . import (
1471 from . import (
1469 discovery,
1472 discovery,
1470 hg,
1473 hg,
1471 )
1474 )
1472 # i18n: "outgoing" is a keyword
1475 # i18n: "outgoing" is a keyword
1473 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1476 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1474 # i18n: "outgoing" is a keyword
1477 # i18n: "outgoing" is a keyword
1475 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1478 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1476 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1479 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1477 dest, branches = hg.parseurl(dest)
1480 dest, branches = hg.parseurl(dest)
1478 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1481 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1479 if revs:
1482 if revs:
1480 revs = [repo.lookup(rev) for rev in revs]
1483 revs = [repo.lookup(rev) for rev in revs]
1481 other = hg.peer(repo, {}, dest)
1484 other = hg.peer(repo, {}, dest)
1482 repo.ui.pushbuffer()
1485 repo.ui.pushbuffer()
1483 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1486 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1484 repo.ui.popbuffer()
1487 repo.ui.popbuffer()
1485 cl = repo.changelog
1488 cl = repo.changelog
1486 o = set([cl.rev(r) for r in outgoing.missing])
1489 o = set([cl.rev(r) for r in outgoing.missing])
1487 return subset & o
1490 return subset & o
1488
1491
1489 @predicate('p1([set])', safe=True)
1492 @predicate('p1([set])', safe=True)
1490 def p1(repo, subset, x):
1493 def p1(repo, subset, x):
1491 """First parent of changesets in set, or the working directory.
1494 """First parent of changesets in set, or the working directory.
1492 """
1495 """
1493 if x is None:
1496 if x is None:
1494 p = repo[x].p1().rev()
1497 p = repo[x].p1().rev()
1495 if p >= 0:
1498 if p >= 0:
1496 return subset & baseset([p])
1499 return subset & baseset([p])
1497 return baseset()
1500 return baseset()
1498
1501
1499 ps = set()
1502 ps = set()
1500 cl = repo.changelog
1503 cl = repo.changelog
1501 for r in getset(repo, fullreposet(repo), x):
1504 for r in getset(repo, fullreposet(repo), x):
1502 ps.add(cl.parentrevs(r)[0])
1505 ps.add(cl.parentrevs(r)[0])
1503 ps -= set([node.nullrev])
1506 ps -= set([node.nullrev])
1504 # XXX we should turn this into a baseset instead of a set, smartset may do
1507 # XXX we should turn this into a baseset instead of a set, smartset may do
1505 # some optimisations from the fact this is a baseset.
1508 # some optimisations from the fact this is a baseset.
1506 return subset & ps
1509 return subset & ps
1507
1510
1508 @predicate('p2([set])', safe=True)
1511 @predicate('p2([set])', safe=True)
1509 def p2(repo, subset, x):
1512 def p2(repo, subset, x):
1510 """Second parent of changesets in set, or the working directory.
1513 """Second parent of changesets in set, or the working directory.
1511 """
1514 """
1512 if x is None:
1515 if x is None:
1513 ps = repo[x].parents()
1516 ps = repo[x].parents()
1514 try:
1517 try:
1515 p = ps[1].rev()
1518 p = ps[1].rev()
1516 if p >= 0:
1519 if p >= 0:
1517 return subset & baseset([p])
1520 return subset & baseset([p])
1518 return baseset()
1521 return baseset()
1519 except IndexError:
1522 except IndexError:
1520 return baseset()
1523 return baseset()
1521
1524
1522 ps = set()
1525 ps = set()
1523 cl = repo.changelog
1526 cl = repo.changelog
1524 for r in getset(repo, fullreposet(repo), x):
1527 for r in getset(repo, fullreposet(repo), x):
1525 ps.add(cl.parentrevs(r)[1])
1528 ps.add(cl.parentrevs(r)[1])
1526 ps -= set([node.nullrev])
1529 ps -= set([node.nullrev])
1527 # XXX we should turn this into a baseset instead of a set, smartset may do
1530 # XXX we should turn this into a baseset instead of a set, smartset may do
1528 # some optimisations from the fact this is a baseset.
1531 # some optimisations from the fact this is a baseset.
1529 return subset & ps
1532 return subset & ps
1530
1533
1531 def parentpost(repo, subset, x, order):
1534 def parentpost(repo, subset, x, order):
1532 return p1(repo, subset, x)
1535 return p1(repo, subset, x)
1533
1536
1534 @predicate('parents([set])', safe=True)
1537 @predicate('parents([set])', safe=True)
1535 def parents(repo, subset, x):
1538 def parents(repo, subset, x):
1536 """
1539 """
1537 The set of all parents for all changesets in set, or the working directory.
1540 The set of all parents for all changesets in set, or the working directory.
1538 """
1541 """
1539 if x is None:
1542 if x is None:
1540 ps = set(p.rev() for p in repo[x].parents())
1543 ps = set(p.rev() for p in repo[x].parents())
1541 else:
1544 else:
1542 ps = set()
1545 ps = set()
1543 cl = repo.changelog
1546 cl = repo.changelog
1544 up = ps.update
1547 up = ps.update
1545 parentrevs = cl.parentrevs
1548 parentrevs = cl.parentrevs
1546 for r in getset(repo, fullreposet(repo), x):
1549 for r in getset(repo, fullreposet(repo), x):
1547 if r == node.wdirrev:
1550 if r == node.wdirrev:
1548 up(p.rev() for p in repo[r].parents())
1551 up(p.rev() for p in repo[r].parents())
1549 else:
1552 else:
1550 up(parentrevs(r))
1553 up(parentrevs(r))
1551 ps -= set([node.nullrev])
1554 ps -= set([node.nullrev])
1552 return subset & ps
1555 return subset & ps
1553
1556
1554 def _phase(repo, subset, target):
1557 def _phase(repo, subset, target):
1555 """helper to select all rev in phase <target>"""
1558 """helper to select all rev in phase <target>"""
1556 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1559 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1557 if repo._phasecache._phasesets:
1560 if repo._phasecache._phasesets:
1558 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1561 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1559 s = baseset(s)
1562 s = baseset(s)
1560 s.sort() # set are non ordered, so we enforce ascending
1563 s.sort() # set are non ordered, so we enforce ascending
1561 return subset & s
1564 return subset & s
1562 else:
1565 else:
1563 phase = repo._phasecache.phase
1566 phase = repo._phasecache.phase
1564 condition = lambda r: phase(repo, r) == target
1567 condition = lambda r: phase(repo, r) == target
1565 return subset.filter(condition, condrepr=('<phase %r>', target),
1568 return subset.filter(condition, condrepr=('<phase %r>', target),
1566 cache=False)
1569 cache=False)
1567
1570
1568 @predicate('draft()', safe=True)
1571 @predicate('draft()', safe=True)
1569 def draft(repo, subset, x):
1572 def draft(repo, subset, x):
1570 """Changeset in draft phase."""
1573 """Changeset in draft phase."""
1571 # i18n: "draft" is a keyword
1574 # i18n: "draft" is a keyword
1572 getargs(x, 0, 0, _("draft takes no arguments"))
1575 getargs(x, 0, 0, _("draft takes no arguments"))
1573 target = phases.draft
1576 target = phases.draft
1574 return _phase(repo, subset, target)
1577 return _phase(repo, subset, target)
1575
1578
1576 @predicate('secret()', safe=True)
1579 @predicate('secret()', safe=True)
1577 def secret(repo, subset, x):
1580 def secret(repo, subset, x):
1578 """Changeset in secret phase."""
1581 """Changeset in secret phase."""
1579 # i18n: "secret" is a keyword
1582 # i18n: "secret" is a keyword
1580 getargs(x, 0, 0, _("secret takes no arguments"))
1583 getargs(x, 0, 0, _("secret takes no arguments"))
1581 target = phases.secret
1584 target = phases.secret
1582 return _phase(repo, subset, target)
1585 return _phase(repo, subset, target)
1583
1586
1584 def parentspec(repo, subset, x, n, order):
1587 def parentspec(repo, subset, x, n, order):
1585 """``set^0``
1588 """``set^0``
1586 The set.
1589 The set.
1587 ``set^1`` (or ``set^``), ``set^2``
1590 ``set^1`` (or ``set^``), ``set^2``
1588 First or second parent, respectively, of all changesets in set.
1591 First or second parent, respectively, of all changesets in set.
1589 """
1592 """
1590 try:
1593 try:
1591 n = int(n[1])
1594 n = int(n[1])
1592 if n not in (0, 1, 2):
1595 if n not in (0, 1, 2):
1593 raise ValueError
1596 raise ValueError
1594 except (TypeError, ValueError):
1597 except (TypeError, ValueError):
1595 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1598 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1596 ps = set()
1599 ps = set()
1597 cl = repo.changelog
1600 cl = repo.changelog
1598 for r in getset(repo, fullreposet(repo), x):
1601 for r in getset(repo, fullreposet(repo), x):
1599 if n == 0:
1602 if n == 0:
1600 ps.add(r)
1603 ps.add(r)
1601 elif n == 1:
1604 elif n == 1:
1602 ps.add(cl.parentrevs(r)[0])
1605 ps.add(cl.parentrevs(r)[0])
1603 elif n == 2:
1606 elif n == 2:
1604 parents = cl.parentrevs(r)
1607 parents = cl.parentrevs(r)
1605 if len(parents) > 1:
1608 if len(parents) > 1:
1606 ps.add(parents[1])
1609 ps.add(parents[1])
1607 return subset & ps
1610 return subset & ps
1608
1611
1609 @predicate('present(set)', safe=True)
1612 @predicate('present(set)', safe=True)
1610 def present(repo, subset, x):
1613 def present(repo, subset, x):
1611 """An empty set, if any revision in set isn't found; otherwise,
1614 """An empty set, if any revision in set isn't found; otherwise,
1612 all revisions in set.
1615 all revisions in set.
1613
1616
1614 If any of specified revisions is not present in the local repository,
1617 If any of specified revisions is not present in the local repository,
1615 the query is normally aborted. But this predicate allows the query
1618 the query is normally aborted. But this predicate allows the query
1616 to continue even in such cases.
1619 to continue even in such cases.
1617 """
1620 """
1618 try:
1621 try:
1619 return getset(repo, subset, x)
1622 return getset(repo, subset, x)
1620 except error.RepoLookupError:
1623 except error.RepoLookupError:
1621 return baseset()
1624 return baseset()
1622
1625
1623 # for internal use
1626 # for internal use
1624 @predicate('_notpublic', safe=True)
1627 @predicate('_notpublic', safe=True)
1625 def _notpublic(repo, subset, x):
1628 def _notpublic(repo, subset, x):
1626 getargs(x, 0, 0, "_notpublic takes no arguments")
1629 getargs(x, 0, 0, "_notpublic takes no arguments")
1627 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1630 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1628 if repo._phasecache._phasesets:
1631 if repo._phasecache._phasesets:
1629 s = set()
1632 s = set()
1630 for u in repo._phasecache._phasesets[1:]:
1633 for u in repo._phasecache._phasesets[1:]:
1631 s.update(u)
1634 s.update(u)
1632 s = baseset(s - repo.changelog.filteredrevs)
1635 s = baseset(s - repo.changelog.filteredrevs)
1633 s.sort()
1636 s.sort()
1634 return subset & s
1637 return subset & s
1635 else:
1638 else:
1636 phase = repo._phasecache.phase
1639 phase = repo._phasecache.phase
1637 target = phases.public
1640 target = phases.public
1638 condition = lambda r: phase(repo, r) != target
1641 condition = lambda r: phase(repo, r) != target
1639 return subset.filter(condition, condrepr=('<phase %r>', target),
1642 return subset.filter(condition, condrepr=('<phase %r>', target),
1640 cache=False)
1643 cache=False)
1641
1644
1642 @predicate('public()', safe=True)
1645 @predicate('public()', safe=True)
1643 def public(repo, subset, x):
1646 def public(repo, subset, x):
1644 """Changeset in public phase."""
1647 """Changeset in public phase."""
1645 # i18n: "public" is a keyword
1648 # i18n: "public" is a keyword
1646 getargs(x, 0, 0, _("public takes no arguments"))
1649 getargs(x, 0, 0, _("public takes no arguments"))
1647 phase = repo._phasecache.phase
1650 phase = repo._phasecache.phase
1648 target = phases.public
1651 target = phases.public
1649 condition = lambda r: phase(repo, r) == target
1652 condition = lambda r: phase(repo, r) == target
1650 return subset.filter(condition, condrepr=('<phase %r>', target),
1653 return subset.filter(condition, condrepr=('<phase %r>', target),
1651 cache=False)
1654 cache=False)
1652
1655
1653 @predicate('remote([id [,path]])', safe=True)
1656 @predicate('remote([id [,path]])', safe=True)
1654 def remote(repo, subset, x):
1657 def remote(repo, subset, x):
1655 """Local revision that corresponds to the given identifier in a
1658 """Local revision that corresponds to the given identifier in a
1656 remote repository, if present. Here, the '.' identifier is a
1659 remote repository, if present. Here, the '.' identifier is a
1657 synonym for the current local branch.
1660 synonym for the current local branch.
1658 """
1661 """
1659
1662
1660 from . import hg # avoid start-up nasties
1663 from . import hg # avoid start-up nasties
1661 # i18n: "remote" is a keyword
1664 # i18n: "remote" is a keyword
1662 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1665 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1663
1666
1664 q = '.'
1667 q = '.'
1665 if len(l) > 0:
1668 if len(l) > 0:
1666 # i18n: "remote" is a keyword
1669 # i18n: "remote" is a keyword
1667 q = getstring(l[0], _("remote requires a string id"))
1670 q = getstring(l[0], _("remote requires a string id"))
1668 if q == '.':
1671 if q == '.':
1669 q = repo['.'].branch()
1672 q = repo['.'].branch()
1670
1673
1671 dest = ''
1674 dest = ''
1672 if len(l) > 1:
1675 if len(l) > 1:
1673 # i18n: "remote" is a keyword
1676 # i18n: "remote" is a keyword
1674 dest = getstring(l[1], _("remote requires a repository path"))
1677 dest = getstring(l[1], _("remote requires a repository path"))
1675 dest = repo.ui.expandpath(dest or 'default')
1678 dest = repo.ui.expandpath(dest or 'default')
1676 dest, branches = hg.parseurl(dest)
1679 dest, branches = hg.parseurl(dest)
1677 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1680 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1678 if revs:
1681 if revs:
1679 revs = [repo.lookup(rev) for rev in revs]
1682 revs = [repo.lookup(rev) for rev in revs]
1680 other = hg.peer(repo, {}, dest)
1683 other = hg.peer(repo, {}, dest)
1681 n = other.lookup(q)
1684 n = other.lookup(q)
1682 if n in repo:
1685 if n in repo:
1683 r = repo[n].rev()
1686 r = repo[n].rev()
1684 if r in subset:
1687 if r in subset:
1685 return baseset([r])
1688 return baseset([r])
1686 return baseset()
1689 return baseset()
1687
1690
1688 @predicate('removes(pattern)', safe=True)
1691 @predicate('removes(pattern)', safe=True)
1689 def removes(repo, subset, x):
1692 def removes(repo, subset, x):
1690 """Changesets which remove files matching pattern.
1693 """Changesets which remove files matching pattern.
1691
1694
1692 The pattern without explicit kind like ``glob:`` is expected to be
1695 The pattern without explicit kind like ``glob:`` is expected to be
1693 relative to the current directory and match against a file or a
1696 relative to the current directory and match against a file or a
1694 directory.
1697 directory.
1695 """
1698 """
1696 # i18n: "removes" is a keyword
1699 # i18n: "removes" is a keyword
1697 pat = getstring(x, _("removes requires a pattern"))
1700 pat = getstring(x, _("removes requires a pattern"))
1698 return checkstatus(repo, subset, pat, 2)
1701 return checkstatus(repo, subset, pat, 2)
1699
1702
1700 @predicate('rev(number)', safe=True)
1703 @predicate('rev(number)', safe=True)
1701 def rev(repo, subset, x):
1704 def rev(repo, subset, x):
1702 """Revision with the given numeric identifier.
1705 """Revision with the given numeric identifier.
1703 """
1706 """
1704 # i18n: "rev" is a keyword
1707 # i18n: "rev" is a keyword
1705 l = getargs(x, 1, 1, _("rev requires one argument"))
1708 l = getargs(x, 1, 1, _("rev requires one argument"))
1706 try:
1709 try:
1707 # i18n: "rev" is a keyword
1710 # i18n: "rev" is a keyword
1708 l = int(getstring(l[0], _("rev requires a number")))
1711 l = int(getstring(l[0], _("rev requires a number")))
1709 except (TypeError, ValueError):
1712 except (TypeError, ValueError):
1710 # i18n: "rev" is a keyword
1713 # i18n: "rev" is a keyword
1711 raise error.ParseError(_("rev expects a number"))
1714 raise error.ParseError(_("rev expects a number"))
1712 if l not in repo.changelog and l != node.nullrev:
1715 if l not in repo.changelog and l != node.nullrev:
1713 return baseset()
1716 return baseset()
1714 return subset & baseset([l])
1717 return subset & baseset([l])
1715
1718
1716 @predicate('matching(revision [, field])', safe=True)
1719 @predicate('matching(revision [, field])', safe=True)
1717 def matching(repo, subset, x):
1720 def matching(repo, subset, x):
1718 """Changesets in which a given set of fields match the set of fields in the
1721 """Changesets in which a given set of fields match the set of fields in the
1719 selected revision or set.
1722 selected revision or set.
1720
1723
1721 To match more than one field pass the list of fields to match separated
1724 To match more than one field pass the list of fields to match separated
1722 by spaces (e.g. ``author description``).
1725 by spaces (e.g. ``author description``).
1723
1726
1724 Valid fields are most regular revision fields and some special fields.
1727 Valid fields are most regular revision fields and some special fields.
1725
1728
1726 Regular revision fields are ``description``, ``author``, ``branch``,
1729 Regular revision fields are ``description``, ``author``, ``branch``,
1727 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1730 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1728 and ``diff``.
1731 and ``diff``.
1729 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1732 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1730 contents of the revision. Two revisions matching their ``diff`` will
1733 contents of the revision. Two revisions matching their ``diff`` will
1731 also match their ``files``.
1734 also match their ``files``.
1732
1735
1733 Special fields are ``summary`` and ``metadata``:
1736 Special fields are ``summary`` and ``metadata``:
1734 ``summary`` matches the first line of the description.
1737 ``summary`` matches the first line of the description.
1735 ``metadata`` is equivalent to matching ``description user date``
1738 ``metadata`` is equivalent to matching ``description user date``
1736 (i.e. it matches the main metadata fields).
1739 (i.e. it matches the main metadata fields).
1737
1740
1738 ``metadata`` is the default field which is used when no fields are
1741 ``metadata`` is the default field which is used when no fields are
1739 specified. You can match more than one field at a time.
1742 specified. You can match more than one field at a time.
1740 """
1743 """
1741 # i18n: "matching" is a keyword
1744 # i18n: "matching" is a keyword
1742 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1745 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1743
1746
1744 revs = getset(repo, fullreposet(repo), l[0])
1747 revs = getset(repo, fullreposet(repo), l[0])
1745
1748
1746 fieldlist = ['metadata']
1749 fieldlist = ['metadata']
1747 if len(l) > 1:
1750 if len(l) > 1:
1748 fieldlist = getstring(l[1],
1751 fieldlist = getstring(l[1],
1749 # i18n: "matching" is a keyword
1752 # i18n: "matching" is a keyword
1750 _("matching requires a string "
1753 _("matching requires a string "
1751 "as its second argument")).split()
1754 "as its second argument")).split()
1752
1755
1753 # Make sure that there are no repeated fields,
1756 # Make sure that there are no repeated fields,
1754 # expand the 'special' 'metadata' field type
1757 # expand the 'special' 'metadata' field type
1755 # and check the 'files' whenever we check the 'diff'
1758 # and check the 'files' whenever we check the 'diff'
1756 fields = []
1759 fields = []
1757 for field in fieldlist:
1760 for field in fieldlist:
1758 if field == 'metadata':
1761 if field == 'metadata':
1759 fields += ['user', 'description', 'date']
1762 fields += ['user', 'description', 'date']
1760 elif field == 'diff':
1763 elif field == 'diff':
1761 # a revision matching the diff must also match the files
1764 # a revision matching the diff must also match the files
1762 # since matching the diff is very costly, make sure to
1765 # since matching the diff is very costly, make sure to
1763 # also match the files first
1766 # also match the files first
1764 fields += ['files', 'diff']
1767 fields += ['files', 'diff']
1765 else:
1768 else:
1766 if field == 'author':
1769 if field == 'author':
1767 field = 'user'
1770 field = 'user'
1768 fields.append(field)
1771 fields.append(field)
1769 fields = set(fields)
1772 fields = set(fields)
1770 if 'summary' in fields and 'description' in fields:
1773 if 'summary' in fields and 'description' in fields:
1771 # If a revision matches its description it also matches its summary
1774 # If a revision matches its description it also matches its summary
1772 fields.discard('summary')
1775 fields.discard('summary')
1773
1776
1774 # We may want to match more than one field
1777 # We may want to match more than one field
1775 # Not all fields take the same amount of time to be matched
1778 # Not all fields take the same amount of time to be matched
1776 # Sort the selected fields in order of increasing matching cost
1779 # Sort the selected fields in order of increasing matching cost
1777 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1780 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1778 'files', 'description', 'substate', 'diff']
1781 'files', 'description', 'substate', 'diff']
1779 def fieldkeyfunc(f):
1782 def fieldkeyfunc(f):
1780 try:
1783 try:
1781 return fieldorder.index(f)
1784 return fieldorder.index(f)
1782 except ValueError:
1785 except ValueError:
1783 # assume an unknown field is very costly
1786 # assume an unknown field is very costly
1784 return len(fieldorder)
1787 return len(fieldorder)
1785 fields = list(fields)
1788 fields = list(fields)
1786 fields.sort(key=fieldkeyfunc)
1789 fields.sort(key=fieldkeyfunc)
1787
1790
1788 # Each field will be matched with its own "getfield" function
1791 # Each field will be matched with its own "getfield" function
1789 # which will be added to the getfieldfuncs array of functions
1792 # which will be added to the getfieldfuncs array of functions
1790 getfieldfuncs = []
1793 getfieldfuncs = []
1791 _funcs = {
1794 _funcs = {
1792 'user': lambda r: repo[r].user(),
1795 'user': lambda r: repo[r].user(),
1793 'branch': lambda r: repo[r].branch(),
1796 'branch': lambda r: repo[r].branch(),
1794 'date': lambda r: repo[r].date(),
1797 'date': lambda r: repo[r].date(),
1795 'description': lambda r: repo[r].description(),
1798 'description': lambda r: repo[r].description(),
1796 'files': lambda r: repo[r].files(),
1799 'files': lambda r: repo[r].files(),
1797 'parents': lambda r: repo[r].parents(),
1800 'parents': lambda r: repo[r].parents(),
1798 'phase': lambda r: repo[r].phase(),
1801 'phase': lambda r: repo[r].phase(),
1799 'substate': lambda r: repo[r].substate,
1802 'substate': lambda r: repo[r].substate,
1800 'summary': lambda r: repo[r].description().splitlines()[0],
1803 'summary': lambda r: repo[r].description().splitlines()[0],
1801 'diff': lambda r: list(repo[r].diff(git=True),)
1804 'diff': lambda r: list(repo[r].diff(git=True),)
1802 }
1805 }
1803 for info in fields:
1806 for info in fields:
1804 getfield = _funcs.get(info, None)
1807 getfield = _funcs.get(info, None)
1805 if getfield is None:
1808 if getfield is None:
1806 raise error.ParseError(
1809 raise error.ParseError(
1807 # i18n: "matching" is a keyword
1810 # i18n: "matching" is a keyword
1808 _("unexpected field name passed to matching: %s") % info)
1811 _("unexpected field name passed to matching: %s") % info)
1809 getfieldfuncs.append(getfield)
1812 getfieldfuncs.append(getfield)
1810 # convert the getfield array of functions into a "getinfo" function
1813 # convert the getfield array of functions into a "getinfo" function
1811 # which returns an array of field values (or a single value if there
1814 # which returns an array of field values (or a single value if there
1812 # is only one field to match)
1815 # is only one field to match)
1813 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1816 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1814
1817
1815 def matches(x):
1818 def matches(x):
1816 for rev in revs:
1819 for rev in revs:
1817 target = getinfo(rev)
1820 target = getinfo(rev)
1818 match = True
1821 match = True
1819 for n, f in enumerate(getfieldfuncs):
1822 for n, f in enumerate(getfieldfuncs):
1820 if target[n] != f(x):
1823 if target[n] != f(x):
1821 match = False
1824 match = False
1822 if match:
1825 if match:
1823 return True
1826 return True
1824 return False
1827 return False
1825
1828
1826 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1829 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1827
1830
1828 @predicate('reverse(set)', safe=True)
1831 @predicate('reverse(set)', safe=True)
1829 def reverse(repo, subset, x):
1832 def reverse(repo, subset, x):
1830 """Reverse order of set.
1833 """Reverse order of set.
1831 """
1834 """
1832 l = getset(repo, subset, x)
1835 l = getset(repo, subset, x)
1833 l.reverse()
1836 l.reverse()
1834 return l
1837 return l
1835
1838
1836 @predicate('roots(set)', safe=True)
1839 @predicate('roots(set)', safe=True)
1837 def roots(repo, subset, x):
1840 def roots(repo, subset, x):
1838 """Changesets in set with no parent changeset in set.
1841 """Changesets in set with no parent changeset in set.
1839 """
1842 """
1840 s = getset(repo, fullreposet(repo), x)
1843 s = getset(repo, fullreposet(repo), x)
1841 parents = repo.changelog.parentrevs
1844 parents = repo.changelog.parentrevs
1842 def filter(r):
1845 def filter(r):
1843 for p in parents(r):
1846 for p in parents(r):
1844 if 0 <= p and p in s:
1847 if 0 <= p and p in s:
1845 return False
1848 return False
1846 return True
1849 return True
1847 return subset & s.filter(filter, condrepr='<roots>')
1850 return subset & s.filter(filter, condrepr='<roots>')
1848
1851
1849 _sortkeyfuncs = {
1852 _sortkeyfuncs = {
1850 'rev': lambda c: c.rev(),
1853 'rev': lambda c: c.rev(),
1851 'branch': lambda c: c.branch(),
1854 'branch': lambda c: c.branch(),
1852 'desc': lambda c: c.description(),
1855 'desc': lambda c: c.description(),
1853 'user': lambda c: c.user(),
1856 'user': lambda c: c.user(),
1854 'author': lambda c: c.user(),
1857 'author': lambda c: c.user(),
1855 'date': lambda c: c.date()[0],
1858 'date': lambda c: c.date()[0],
1856 }
1859 }
1857
1860
1858 def _getsortargs(x):
1861 def _getsortargs(x):
1859 """Parse sort options into (set, [(key, reverse)], opts)"""
1862 """Parse sort options into (set, [(key, reverse)], opts)"""
1860 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1863 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1861 if 'set' not in args:
1864 if 'set' not in args:
1862 # i18n: "sort" is a keyword
1865 # i18n: "sort" is a keyword
1863 raise error.ParseError(_('sort requires one or two arguments'))
1866 raise error.ParseError(_('sort requires one or two arguments'))
1864 keys = "rev"
1867 keys = "rev"
1865 if 'keys' in args:
1868 if 'keys' in args:
1866 # i18n: "sort" is a keyword
1869 # i18n: "sort" is a keyword
1867 keys = getstring(args['keys'], _("sort spec must be a string"))
1870 keys = getstring(args['keys'], _("sort spec must be a string"))
1868
1871
1869 keyflags = []
1872 keyflags = []
1870 for k in keys.split():
1873 for k in keys.split():
1871 fk = k
1874 fk = k
1872 reverse = (k[0] == '-')
1875 reverse = (k[0] == '-')
1873 if reverse:
1876 if reverse:
1874 k = k[1:]
1877 k = k[1:]
1875 if k not in _sortkeyfuncs and k != 'topo':
1878 if k not in _sortkeyfuncs and k != 'topo':
1876 raise error.ParseError(_("unknown sort key %r") % fk)
1879 raise error.ParseError(_("unknown sort key %r") % fk)
1877 keyflags.append((k, reverse))
1880 keyflags.append((k, reverse))
1878
1881
1879 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1882 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1880 # i18n: "topo" is a keyword
1883 # i18n: "topo" is a keyword
1881 raise error.ParseError(_('topo sort order cannot be combined '
1884 raise error.ParseError(_('topo sort order cannot be combined '
1882 'with other sort keys'))
1885 'with other sort keys'))
1883
1886
1884 opts = {}
1887 opts = {}
1885 if 'topo.firstbranch' in args:
1888 if 'topo.firstbranch' in args:
1886 if any(k == 'topo' for k, reverse in keyflags):
1889 if any(k == 'topo' for k, reverse in keyflags):
1887 opts['topo.firstbranch'] = args['topo.firstbranch']
1890 opts['topo.firstbranch'] = args['topo.firstbranch']
1888 else:
1891 else:
1889 # i18n: "topo" and "topo.firstbranch" are keywords
1892 # i18n: "topo" and "topo.firstbranch" are keywords
1890 raise error.ParseError(_('topo.firstbranch can only be used '
1893 raise error.ParseError(_('topo.firstbranch can only be used '
1891 'when using the topo sort key'))
1894 'when using the topo sort key'))
1892
1895
1893 return args['set'], keyflags, opts
1896 return args['set'], keyflags, opts
1894
1897
1895 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1898 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1896 def sort(repo, subset, x):
1899 def sort(repo, subset, x):
1897 """Sort set by keys. The default sort order is ascending, specify a key
1900 """Sort set by keys. The default sort order is ascending, specify a key
1898 as ``-key`` to sort in descending order.
1901 as ``-key`` to sort in descending order.
1899
1902
1900 The keys can be:
1903 The keys can be:
1901
1904
1902 - ``rev`` for the revision number,
1905 - ``rev`` for the revision number,
1903 - ``branch`` for the branch name,
1906 - ``branch`` for the branch name,
1904 - ``desc`` for the commit message (description),
1907 - ``desc`` for the commit message (description),
1905 - ``user`` for user name (``author`` can be used as an alias),
1908 - ``user`` for user name (``author`` can be used as an alias),
1906 - ``date`` for the commit date
1909 - ``date`` for the commit date
1907 - ``topo`` for a reverse topographical sort
1910 - ``topo`` for a reverse topographical sort
1908
1911
1909 The ``topo`` sort order cannot be combined with other sort keys. This sort
1912 The ``topo`` sort order cannot be combined with other sort keys. This sort
1910 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1913 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1911 specifies what topographical branches to prioritize in the sort.
1914 specifies what topographical branches to prioritize in the sort.
1912
1915
1913 """
1916 """
1914 s, keyflags, opts = _getsortargs(x)
1917 s, keyflags, opts = _getsortargs(x)
1915 revs = getset(repo, subset, s)
1918 revs = getset(repo, subset, s)
1916
1919
1917 if not keyflags:
1920 if not keyflags:
1918 return revs
1921 return revs
1919 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1922 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1920 revs.sort(reverse=keyflags[0][1])
1923 revs.sort(reverse=keyflags[0][1])
1921 return revs
1924 return revs
1922 elif keyflags[0][0] == "topo":
1925 elif keyflags[0][0] == "topo":
1923 firstbranch = ()
1926 firstbranch = ()
1924 if 'topo.firstbranch' in opts:
1927 if 'topo.firstbranch' in opts:
1925 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1928 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1926 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1929 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1927 istopo=True)
1930 istopo=True)
1928 if keyflags[0][1]:
1931 if keyflags[0][1]:
1929 revs.reverse()
1932 revs.reverse()
1930 return revs
1933 return revs
1931
1934
1932 # sort() is guaranteed to be stable
1935 # sort() is guaranteed to be stable
1933 ctxs = [repo[r] for r in revs]
1936 ctxs = [repo[r] for r in revs]
1934 for k, reverse in reversed(keyflags):
1937 for k, reverse in reversed(keyflags):
1935 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1938 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1936 return baseset([c.rev() for c in ctxs])
1939 return baseset([c.rev() for c in ctxs])
1937
1940
1938 def _toposort(revs, parentsfunc, firstbranch=()):
1941 def _toposort(revs, parentsfunc, firstbranch=()):
1939 """Yield revisions from heads to roots one (topo) branch at a time.
1942 """Yield revisions from heads to roots one (topo) branch at a time.
1940
1943
1941 This function aims to be used by a graph generator that wishes to minimize
1944 This function aims to be used by a graph generator that wishes to minimize
1942 the number of parallel branches and their interleaving.
1945 the number of parallel branches and their interleaving.
1943
1946
1944 Example iteration order (numbers show the "true" order in a changelog):
1947 Example iteration order (numbers show the "true" order in a changelog):
1945
1948
1946 o 4
1949 o 4
1947 |
1950 |
1948 o 1
1951 o 1
1949 |
1952 |
1950 | o 3
1953 | o 3
1951 | |
1954 | |
1952 | o 2
1955 | o 2
1953 |/
1956 |/
1954 o 0
1957 o 0
1955
1958
1956 Note that the ancestors of merges are understood by the current
1959 Note that the ancestors of merges are understood by the current
1957 algorithm to be on the same branch. This means no reordering will
1960 algorithm to be on the same branch. This means no reordering will
1958 occur behind a merge.
1961 occur behind a merge.
1959 """
1962 """
1960
1963
1961 ### Quick summary of the algorithm
1964 ### Quick summary of the algorithm
1962 #
1965 #
1963 # This function is based around a "retention" principle. We keep revisions
1966 # This function is based around a "retention" principle. We keep revisions
1964 # in memory until we are ready to emit a whole branch that immediately
1967 # in memory until we are ready to emit a whole branch that immediately
1965 # "merges" into an existing one. This reduces the number of parallel
1968 # "merges" into an existing one. This reduces the number of parallel
1966 # branches with interleaved revisions.
1969 # branches with interleaved revisions.
1967 #
1970 #
1968 # During iteration revs are split into two groups:
1971 # During iteration revs are split into two groups:
1969 # A) revision already emitted
1972 # A) revision already emitted
1970 # B) revision in "retention". They are stored as different subgroups.
1973 # B) revision in "retention". They are stored as different subgroups.
1971 #
1974 #
1972 # for each REV, we do the following logic:
1975 # for each REV, we do the following logic:
1973 #
1976 #
1974 # 1) if REV is a parent of (A), we will emit it. If there is a
1977 # 1) if REV is a parent of (A), we will emit it. If there is a
1975 # retention group ((B) above) that is blocked on REV being
1978 # retention group ((B) above) that is blocked on REV being
1976 # available, we emit all the revisions out of that retention
1979 # available, we emit all the revisions out of that retention
1977 # group first.
1980 # group first.
1978 #
1981 #
1979 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1982 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1980 # available, if such subgroup exist, we add REV to it and the subgroup is
1983 # available, if such subgroup exist, we add REV to it and the subgroup is
1981 # now awaiting for REV.parents() to be available.
1984 # now awaiting for REV.parents() to be available.
1982 #
1985 #
1983 # 3) finally if no such group existed in (B), we create a new subgroup.
1986 # 3) finally if no such group existed in (B), we create a new subgroup.
1984 #
1987 #
1985 #
1988 #
1986 # To bootstrap the algorithm, we emit the tipmost revision (which
1989 # To bootstrap the algorithm, we emit the tipmost revision (which
1987 # puts it in group (A) from above).
1990 # puts it in group (A) from above).
1988
1991
1989 revs.sort(reverse=True)
1992 revs.sort(reverse=True)
1990
1993
1991 # Set of parents of revision that have been emitted. They can be considered
1994 # Set of parents of revision that have been emitted. They can be considered
1992 # unblocked as the graph generator is already aware of them so there is no
1995 # unblocked as the graph generator is already aware of them so there is no
1993 # need to delay the revisions that reference them.
1996 # need to delay the revisions that reference them.
1994 #
1997 #
1995 # If someone wants to prioritize a branch over the others, pre-filling this
1998 # If someone wants to prioritize a branch over the others, pre-filling this
1996 # set will force all other branches to wait until this branch is ready to be
1999 # set will force all other branches to wait until this branch is ready to be
1997 # emitted.
2000 # emitted.
1998 unblocked = set(firstbranch)
2001 unblocked = set(firstbranch)
1999
2002
2000 # list of groups waiting to be displayed, each group is defined by:
2003 # list of groups waiting to be displayed, each group is defined by:
2001 #
2004 #
2002 # (revs: lists of revs waiting to be displayed,
2005 # (revs: lists of revs waiting to be displayed,
2003 # blocked: set of that cannot be displayed before those in 'revs')
2006 # blocked: set of that cannot be displayed before those in 'revs')
2004 #
2007 #
2005 # The second value ('blocked') correspond to parents of any revision in the
2008 # The second value ('blocked') correspond to parents of any revision in the
2006 # group ('revs') that is not itself contained in the group. The main idea
2009 # group ('revs') that is not itself contained in the group. The main idea
2007 # of this algorithm is to delay as much as possible the emission of any
2010 # of this algorithm is to delay as much as possible the emission of any
2008 # revision. This means waiting for the moment we are about to display
2011 # revision. This means waiting for the moment we are about to display
2009 # these parents to display the revs in a group.
2012 # these parents to display the revs in a group.
2010 #
2013 #
2011 # This first implementation is smart until it encounters a merge: it will
2014 # This first implementation is smart until it encounters a merge: it will
2012 # emit revs as soon as any parent is about to be emitted and can grow an
2015 # emit revs as soon as any parent is about to be emitted and can grow an
2013 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2016 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2014 # retains new branches but gives up on any special ordering for ancestors
2017 # retains new branches but gives up on any special ordering for ancestors
2015 # of merges. The implementation can be improved to handle this better.
2018 # of merges. The implementation can be improved to handle this better.
2016 #
2019 #
2017 # The first subgroup is special. It corresponds to all the revision that
2020 # The first subgroup is special. It corresponds to all the revision that
2018 # were already emitted. The 'revs' lists is expected to be empty and the
2021 # were already emitted. The 'revs' lists is expected to be empty and the
2019 # 'blocked' set contains the parents revisions of already emitted revision.
2022 # 'blocked' set contains the parents revisions of already emitted revision.
2020 #
2023 #
2021 # You could pre-seed the <parents> set of groups[0] to a specific
2024 # You could pre-seed the <parents> set of groups[0] to a specific
2022 # changesets to select what the first emitted branch should be.
2025 # changesets to select what the first emitted branch should be.
2023 groups = [([], unblocked)]
2026 groups = [([], unblocked)]
2024 pendingheap = []
2027 pendingheap = []
2025 pendingset = set()
2028 pendingset = set()
2026
2029
2027 heapq.heapify(pendingheap)
2030 heapq.heapify(pendingheap)
2028 heappop = heapq.heappop
2031 heappop = heapq.heappop
2029 heappush = heapq.heappush
2032 heappush = heapq.heappush
2030 for currentrev in revs:
2033 for currentrev in revs:
2031 # Heap works with smallest element, we want highest so we invert
2034 # Heap works with smallest element, we want highest so we invert
2032 if currentrev not in pendingset:
2035 if currentrev not in pendingset:
2033 heappush(pendingheap, -currentrev)
2036 heappush(pendingheap, -currentrev)
2034 pendingset.add(currentrev)
2037 pendingset.add(currentrev)
2035 # iterates on pending rev until after the current rev have been
2038 # iterates on pending rev until after the current rev have been
2036 # processed.
2039 # processed.
2037 rev = None
2040 rev = None
2038 while rev != currentrev:
2041 while rev != currentrev:
2039 rev = -heappop(pendingheap)
2042 rev = -heappop(pendingheap)
2040 pendingset.remove(rev)
2043 pendingset.remove(rev)
2041
2044
2042 # Seek for a subgroup blocked, waiting for the current revision.
2045 # Seek for a subgroup blocked, waiting for the current revision.
2043 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2046 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2044
2047
2045 if matching:
2048 if matching:
2046 # The main idea is to gather together all sets that are blocked
2049 # The main idea is to gather together all sets that are blocked
2047 # on the same revision.
2050 # on the same revision.
2048 #
2051 #
2049 # Groups are merged when a common blocking ancestor is
2052 # Groups are merged when a common blocking ancestor is
2050 # observed. For example, given two groups:
2053 # observed. For example, given two groups:
2051 #
2054 #
2052 # revs [5, 4] waiting for 1
2055 # revs [5, 4] waiting for 1
2053 # revs [3, 2] waiting for 1
2056 # revs [3, 2] waiting for 1
2054 #
2057 #
2055 # These two groups will be merged when we process
2058 # These two groups will be merged when we process
2056 # 1. In theory, we could have merged the groups when
2059 # 1. In theory, we could have merged the groups when
2057 # we added 2 to the group it is now in (we could have
2060 # we added 2 to the group it is now in (we could have
2058 # noticed the groups were both blocked on 1 then), but
2061 # noticed the groups were both blocked on 1 then), but
2059 # the way it works now makes the algorithm simpler.
2062 # the way it works now makes the algorithm simpler.
2060 #
2063 #
2061 # We also always keep the oldest subgroup first. We can
2064 # We also always keep the oldest subgroup first. We can
2062 # probably improve the behavior by having the longest set
2065 # probably improve the behavior by having the longest set
2063 # first. That way, graph algorithms could minimise the length
2066 # first. That way, graph algorithms could minimise the length
2064 # of parallel lines their drawing. This is currently not done.
2067 # of parallel lines their drawing. This is currently not done.
2065 targetidx = matching.pop(0)
2068 targetidx = matching.pop(0)
2066 trevs, tparents = groups[targetidx]
2069 trevs, tparents = groups[targetidx]
2067 for i in matching:
2070 for i in matching:
2068 gr = groups[i]
2071 gr = groups[i]
2069 trevs.extend(gr[0])
2072 trevs.extend(gr[0])
2070 tparents |= gr[1]
2073 tparents |= gr[1]
2071 # delete all merged subgroups (except the one we kept)
2074 # delete all merged subgroups (except the one we kept)
2072 # (starting from the last subgroup for performance and
2075 # (starting from the last subgroup for performance and
2073 # sanity reasons)
2076 # sanity reasons)
2074 for i in reversed(matching):
2077 for i in reversed(matching):
2075 del groups[i]
2078 del groups[i]
2076 else:
2079 else:
2077 # This is a new head. We create a new subgroup for it.
2080 # This is a new head. We create a new subgroup for it.
2078 targetidx = len(groups)
2081 targetidx = len(groups)
2079 groups.append(([], set([rev])))
2082 groups.append(([], set([rev])))
2080
2083
2081 gr = groups[targetidx]
2084 gr = groups[targetidx]
2082
2085
2083 # We now add the current nodes to this subgroups. This is done
2086 # We now add the current nodes to this subgroups. This is done
2084 # after the subgroup merging because all elements from a subgroup
2087 # after the subgroup merging because all elements from a subgroup
2085 # that relied on this rev must precede it.
2088 # that relied on this rev must precede it.
2086 #
2089 #
2087 # we also update the <parents> set to include the parents of the
2090 # we also update the <parents> set to include the parents of the
2088 # new nodes.
2091 # new nodes.
2089 if rev == currentrev: # only display stuff in rev
2092 if rev == currentrev: # only display stuff in rev
2090 gr[0].append(rev)
2093 gr[0].append(rev)
2091 gr[1].remove(rev)
2094 gr[1].remove(rev)
2092 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2095 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2093 gr[1].update(parents)
2096 gr[1].update(parents)
2094 for p in parents:
2097 for p in parents:
2095 if p not in pendingset:
2098 if p not in pendingset:
2096 pendingset.add(p)
2099 pendingset.add(p)
2097 heappush(pendingheap, -p)
2100 heappush(pendingheap, -p)
2098
2101
2099 # Look for a subgroup to display
2102 # Look for a subgroup to display
2100 #
2103 #
2101 # When unblocked is empty (if clause), we were not waiting for any
2104 # When unblocked is empty (if clause), we were not waiting for any
2102 # revisions during the first iteration (if no priority was given) or
2105 # revisions during the first iteration (if no priority was given) or
2103 # if we emitted a whole disconnected set of the graph (reached a
2106 # if we emitted a whole disconnected set of the graph (reached a
2104 # root). In that case we arbitrarily take the oldest known
2107 # root). In that case we arbitrarily take the oldest known
2105 # subgroup. The heuristic could probably be better.
2108 # subgroup. The heuristic could probably be better.
2106 #
2109 #
2107 # Otherwise (elif clause) if the subgroup is blocked on
2110 # Otherwise (elif clause) if the subgroup is blocked on
2108 # a revision we just emitted, we can safely emit it as
2111 # a revision we just emitted, we can safely emit it as
2109 # well.
2112 # well.
2110 if not unblocked:
2113 if not unblocked:
2111 if len(groups) > 1: # display other subset
2114 if len(groups) > 1: # display other subset
2112 targetidx = 1
2115 targetidx = 1
2113 gr = groups[1]
2116 gr = groups[1]
2114 elif not gr[1] & unblocked:
2117 elif not gr[1] & unblocked:
2115 gr = None
2118 gr = None
2116
2119
2117 if gr is not None:
2120 if gr is not None:
2118 # update the set of awaited revisions with the one from the
2121 # update the set of awaited revisions with the one from the
2119 # subgroup
2122 # subgroup
2120 unblocked |= gr[1]
2123 unblocked |= gr[1]
2121 # output all revisions in the subgroup
2124 # output all revisions in the subgroup
2122 for r in gr[0]:
2125 for r in gr[0]:
2123 yield r
2126 yield r
2124 # delete the subgroup that you just output
2127 # delete the subgroup that you just output
2125 # unless it is groups[0] in which case you just empty it.
2128 # unless it is groups[0] in which case you just empty it.
2126 if targetidx:
2129 if targetidx:
2127 del groups[targetidx]
2130 del groups[targetidx]
2128 else:
2131 else:
2129 gr[0][:] = []
2132 gr[0][:] = []
2130 # Check if we have some subgroup waiting for revisions we are not going to
2133 # Check if we have some subgroup waiting for revisions we are not going to
2131 # iterate over
2134 # iterate over
2132 for g in groups:
2135 for g in groups:
2133 for r in g[0]:
2136 for r in g[0]:
2134 yield r
2137 yield r
2135
2138
2136 @predicate('subrepo([pattern])')
2139 @predicate('subrepo([pattern])')
2137 def subrepo(repo, subset, x):
2140 def subrepo(repo, subset, x):
2138 """Changesets that add, modify or remove the given subrepo. If no subrepo
2141 """Changesets that add, modify or remove the given subrepo. If no subrepo
2139 pattern is named, any subrepo changes are returned.
2142 pattern is named, any subrepo changes are returned.
2140 """
2143 """
2141 # i18n: "subrepo" is a keyword
2144 # i18n: "subrepo" is a keyword
2142 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2145 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2143 pat = None
2146 pat = None
2144 if len(args) != 0:
2147 if len(args) != 0:
2145 pat = getstring(args[0], _("subrepo requires a pattern"))
2148 pat = getstring(args[0], _("subrepo requires a pattern"))
2146
2149
2147 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2150 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2148
2151
2149 def submatches(names):
2152 def submatches(names):
2150 k, p, m = util.stringmatcher(pat)
2153 k, p, m = util.stringmatcher(pat)
2151 for name in names:
2154 for name in names:
2152 if m(name):
2155 if m(name):
2153 yield name
2156 yield name
2154
2157
2155 def matches(x):
2158 def matches(x):
2156 c = repo[x]
2159 c = repo[x]
2157 s = repo.status(c.p1().node(), c.node(), match=m)
2160 s = repo.status(c.p1().node(), c.node(), match=m)
2158
2161
2159 if pat is None:
2162 if pat is None:
2160 return s.added or s.modified or s.removed
2163 return s.added or s.modified or s.removed
2161
2164
2162 if s.added:
2165 if s.added:
2163 return any(submatches(c.substate.keys()))
2166 return any(submatches(c.substate.keys()))
2164
2167
2165 if s.modified:
2168 if s.modified:
2166 subs = set(c.p1().substate.keys())
2169 subs = set(c.p1().substate.keys())
2167 subs.update(c.substate.keys())
2170 subs.update(c.substate.keys())
2168
2171
2169 for path in submatches(subs):
2172 for path in submatches(subs):
2170 if c.p1().substate.get(path) != c.substate.get(path):
2173 if c.p1().substate.get(path) != c.substate.get(path):
2171 return True
2174 return True
2172
2175
2173 if s.removed:
2176 if s.removed:
2174 return any(submatches(c.p1().substate.keys()))
2177 return any(submatches(c.p1().substate.keys()))
2175
2178
2176 return False
2179 return False
2177
2180
2178 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2181 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2179
2182
2180 def _substringmatcher(pattern):
2183 def _substringmatcher(pattern):
2181 kind, pattern, matcher = util.stringmatcher(pattern)
2184 kind, pattern, matcher = util.stringmatcher(pattern)
2182 if kind == 'literal':
2185 if kind == 'literal':
2183 matcher = lambda s: pattern in s
2186 matcher = lambda s: pattern in s
2184 return kind, pattern, matcher
2187 return kind, pattern, matcher
2185
2188
2186 @predicate('tag([name])', safe=True)
2189 @predicate('tag([name])', safe=True)
2187 def tag(repo, subset, x):
2190 def tag(repo, subset, x):
2188 """The specified tag by name, or all tagged revisions if no name is given.
2191 """The specified tag by name, or all tagged revisions if no name is given.
2189
2192
2190 If `name` starts with `re:`, the remainder of the name is treated as
2193 If `name` starts with `re:`, the remainder of the name is treated as
2191 a regular expression. To match a tag that actually starts with `re:`,
2194 a regular expression. To match a tag that actually starts with `re:`,
2192 use the prefix `literal:`.
2195 use the prefix `literal:`.
2193 """
2196 """
2194 # i18n: "tag" is a keyword
2197 # i18n: "tag" is a keyword
2195 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2198 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2196 cl = repo.changelog
2199 cl = repo.changelog
2197 if args:
2200 if args:
2198 pattern = getstring(args[0],
2201 pattern = getstring(args[0],
2199 # i18n: "tag" is a keyword
2202 # i18n: "tag" is a keyword
2200 _('the argument to tag must be a string'))
2203 _('the argument to tag must be a string'))
2201 kind, pattern, matcher = util.stringmatcher(pattern)
2204 kind, pattern, matcher = util.stringmatcher(pattern)
2202 if kind == 'literal':
2205 if kind == 'literal':
2203 # avoid resolving all tags
2206 # avoid resolving all tags
2204 tn = repo._tagscache.tags.get(pattern, None)
2207 tn = repo._tagscache.tags.get(pattern, None)
2205 if tn is None:
2208 if tn is None:
2206 raise error.RepoLookupError(_("tag '%s' does not exist")
2209 raise error.RepoLookupError(_("tag '%s' does not exist")
2207 % pattern)
2210 % pattern)
2208 s = set([repo[tn].rev()])
2211 s = set([repo[tn].rev()])
2209 else:
2212 else:
2210 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2213 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2211 else:
2214 else:
2212 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2215 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2213 return subset & s
2216 return subset & s
2214
2217
2215 @predicate('tagged', safe=True)
2218 @predicate('tagged', safe=True)
2216 def tagged(repo, subset, x):
2219 def tagged(repo, subset, x):
2217 return tag(repo, subset, x)
2220 return tag(repo, subset, x)
2218
2221
2219 @predicate('unstable()', safe=True)
2222 @predicate('unstable()', safe=True)
2220 def unstable(repo, subset, x):
2223 def unstable(repo, subset, x):
2221 """Non-obsolete changesets with obsolete ancestors.
2224 """Non-obsolete changesets with obsolete ancestors.
2222 """
2225 """
2223 # i18n: "unstable" is a keyword
2226 # i18n: "unstable" is a keyword
2224 getargs(x, 0, 0, _("unstable takes no arguments"))
2227 getargs(x, 0, 0, _("unstable takes no arguments"))
2225 unstables = obsmod.getrevs(repo, 'unstable')
2228 unstables = obsmod.getrevs(repo, 'unstable')
2226 return subset & unstables
2229 return subset & unstables
2227
2230
2228
2231
2229 @predicate('user(string)', safe=True)
2232 @predicate('user(string)', safe=True)
2230 def user(repo, subset, x):
2233 def user(repo, subset, x):
2231 """User name contains string. The match is case-insensitive.
2234 """User name contains string. The match is case-insensitive.
2232
2235
2233 If `string` starts with `re:`, the remainder of the string is treated as
2236 If `string` starts with `re:`, the remainder of the string is treated as
2234 a regular expression. To match a user that actually contains `re:`, use
2237 a regular expression. To match a user that actually contains `re:`, use
2235 the prefix `literal:`.
2238 the prefix `literal:`.
2236 """
2239 """
2237 return author(repo, subset, x)
2240 return author(repo, subset, x)
2238
2241
2239 # experimental
2242 # experimental
2240 @predicate('wdir', safe=True)
2243 @predicate('wdir', safe=True)
2241 def wdir(repo, subset, x):
2244 def wdir(repo, subset, x):
2242 # i18n: "wdir" is a keyword
2245 # i18n: "wdir" is a keyword
2243 getargs(x, 0, 0, _("wdir takes no arguments"))
2246 getargs(x, 0, 0, _("wdir takes no arguments"))
2244 if node.wdirrev in subset or isinstance(subset, fullreposet):
2247 if node.wdirrev in subset or isinstance(subset, fullreposet):
2245 return baseset([node.wdirrev])
2248 return baseset([node.wdirrev])
2246 return baseset()
2249 return baseset()
2247
2250
2248 # for internal use
2251 # for internal use
2249 @predicate('_list', safe=True)
2252 @predicate('_list', safe=True)
2250 def _list(repo, subset, x):
2253 def _list(repo, subset, x):
2251 s = getstring(x, "internal error")
2254 s = getstring(x, "internal error")
2252 if not s:
2255 if not s:
2253 return baseset()
2256 return baseset()
2254 # remove duplicates here. it's difficult for caller to deduplicate sets
2257 # remove duplicates here. it's difficult for caller to deduplicate sets
2255 # because different symbols can point to the same rev.
2258 # because different symbols can point to the same rev.
2256 cl = repo.changelog
2259 cl = repo.changelog
2257 ls = []
2260 ls = []
2258 seen = set()
2261 seen = set()
2259 for t in s.split('\0'):
2262 for t in s.split('\0'):
2260 try:
2263 try:
2261 # fast path for integer revision
2264 # fast path for integer revision
2262 r = int(t)
2265 r = int(t)
2263 if str(r) != t or r not in cl:
2266 if str(r) != t or r not in cl:
2264 raise ValueError
2267 raise ValueError
2265 revs = [r]
2268 revs = [r]
2266 except ValueError:
2269 except ValueError:
2267 revs = stringset(repo, subset, t)
2270 revs = stringset(repo, subset, t)
2268
2271
2269 for r in revs:
2272 for r in revs:
2270 if r in seen:
2273 if r in seen:
2271 continue
2274 continue
2272 if (r in subset
2275 if (r in subset
2273 or r == node.nullrev and isinstance(subset, fullreposet)):
2276 or r == node.nullrev and isinstance(subset, fullreposet)):
2274 ls.append(r)
2277 ls.append(r)
2275 seen.add(r)
2278 seen.add(r)
2276 return baseset(ls)
2279 return baseset(ls)
2277
2280
2278 # for internal use
2281 # for internal use
2279 @predicate('_intlist', safe=True)
2282 @predicate('_intlist', safe=True)
2280 def _intlist(repo, subset, x):
2283 def _intlist(repo, subset, x):
2281 s = getstring(x, "internal error")
2284 s = getstring(x, "internal error")
2282 if not s:
2285 if not s:
2283 return baseset()
2286 return baseset()
2284 ls = [int(r) for r in s.split('\0')]
2287 ls = [int(r) for r in s.split('\0')]
2285 s = subset
2288 s = subset
2286 return baseset([r for r in ls if r in s])
2289 return baseset([r for r in ls if r in s])
2287
2290
2288 # for internal use
2291 # for internal use
2289 @predicate('_hexlist', safe=True)
2292 @predicate('_hexlist', safe=True)
2290 def _hexlist(repo, subset, x):
2293 def _hexlist(repo, subset, x):
2291 s = getstring(x, "internal error")
2294 s = getstring(x, "internal error")
2292 if not s:
2295 if not s:
2293 return baseset()
2296 return baseset()
2294 cl = repo.changelog
2297 cl = repo.changelog
2295 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2298 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2296 s = subset
2299 s = subset
2297 return baseset([r for r in ls if r in s])
2300 return baseset([r for r in ls if r in s])
2298
2301
2299 methods = {
2302 methods = {
2300 "range": rangeset,
2303 "range": rangeset,
2301 "dagrange": dagrange,
2304 "dagrange": dagrange,
2302 "string": stringset,
2305 "string": stringset,
2303 "symbol": stringset,
2306 "symbol": stringset,
2304 "and": andset,
2307 "and": andset,
2305 "or": orset,
2308 "or": orset,
2306 "not": notset,
2309 "not": notset,
2307 "difference": differenceset,
2310 "difference": differenceset,
2308 "list": listset,
2311 "list": listset,
2309 "keyvalue": keyvaluepair,
2312 "keyvalue": keyvaluepair,
2310 "func": func,
2313 "func": func,
2311 "ancestor": ancestorspec,
2314 "ancestor": ancestorspec,
2312 "parent": parentspec,
2315 "parent": parentspec,
2313 "parentpost": parentpost,
2316 "parentpost": parentpost,
2314 }
2317 }
2315
2318
2316 # Constants for ordering requirement, used in _analyze():
2319 # Constants for ordering requirement, used in _analyze():
2317 #
2320 #
2318 # If 'define', any nested functions and operations can change the ordering of
2321 # If 'define', any nested functions and operations can change the ordering of
2319 # the entries in the set. If 'follow', any nested functions and operations
2322 # the entries in the set. If 'follow', any nested functions and operations
2320 # should take the ordering specified by the first operand to the '&' operator.
2323 # should take the ordering specified by the first operand to the '&' operator.
2321 #
2324 #
2322 # For instance,
2325 # For instance,
2323 #
2326 #
2324 # X & (Y | Z)
2327 # X & (Y | Z)
2325 # ^ ^^^^^^^
2328 # ^ ^^^^^^^
2326 # | follow
2329 # | follow
2327 # define
2330 # define
2328 #
2331 #
2329 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2332 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2330 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2333 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2331 #
2334 #
2332 # 'any' means the order doesn't matter. For instance,
2335 # 'any' means the order doesn't matter. For instance,
2333 #
2336 #
2334 # X & !Y
2337 # X & !Y
2335 # ^
2338 # ^
2336 # any
2339 # any
2337 #
2340 #
2338 # 'y()' can either enforce its ordering requirement or take the ordering
2341 # 'y()' can either enforce its ordering requirement or take the ordering
2339 # specified by 'x()' because 'not()' doesn't care the order.
2342 # specified by 'x()' because 'not()' doesn't care the order.
2340 #
2343 #
2341 # Transition of ordering requirement:
2344 # Transition of ordering requirement:
2342 #
2345 #
2343 # 1. starts with 'define'
2346 # 1. starts with 'define'
2344 # 2. shifts to 'follow' by 'x & y'
2347 # 2. shifts to 'follow' by 'x & y'
2345 # 3. changes back to 'define' on function call 'f(x)' or function-like
2348 # 3. changes back to 'define' on function call 'f(x)' or function-like
2346 # operation 'x (f) y' because 'f' may have its own ordering requirement
2349 # operation 'x (f) y' because 'f' may have its own ordering requirement
2347 # for 'x' and 'y' (e.g. 'first(x)')
2350 # for 'x' and 'y' (e.g. 'first(x)')
2348 #
2351 #
2349 anyorder = 'any' # don't care the order
2352 anyorder = 'any' # don't care the order
2350 defineorder = 'define' # should define the order
2353 defineorder = 'define' # should define the order
2351 followorder = 'follow' # must follow the current order
2354 followorder = 'follow' # must follow the current order
2352
2355
2353 # transition table for 'x & y', from the current expression 'x' to 'y'
2356 # transition table for 'x & y', from the current expression 'x' to 'y'
2354 _tofolloworder = {
2357 _tofolloworder = {
2355 anyorder: anyorder,
2358 anyorder: anyorder,
2356 defineorder: followorder,
2359 defineorder: followorder,
2357 followorder: followorder,
2360 followorder: followorder,
2358 }
2361 }
2359
2362
2360 def _matchonly(revs, bases):
2363 def _matchonly(revs, bases):
2361 """
2364 """
2362 >>> f = lambda *args: _matchonly(*map(parse, args))
2365 >>> f = lambda *args: _matchonly(*map(parse, args))
2363 >>> f('ancestors(A)', 'not ancestors(B)')
2366 >>> f('ancestors(A)', 'not ancestors(B)')
2364 ('list', ('symbol', 'A'), ('symbol', 'B'))
2367 ('list', ('symbol', 'A'), ('symbol', 'B'))
2365 """
2368 """
2366 if (revs is not None
2369 if (revs is not None
2367 and revs[0] == 'func'
2370 and revs[0] == 'func'
2368 and getsymbol(revs[1]) == 'ancestors'
2371 and getsymbol(revs[1]) == 'ancestors'
2369 and bases is not None
2372 and bases is not None
2370 and bases[0] == 'not'
2373 and bases[0] == 'not'
2371 and bases[1][0] == 'func'
2374 and bases[1][0] == 'func'
2372 and getsymbol(bases[1][1]) == 'ancestors'):
2375 and getsymbol(bases[1][1]) == 'ancestors'):
2373 return ('list', revs[2], bases[1][2])
2376 return ('list', revs[2], bases[1][2])
2374
2377
2375 def _fixops(x):
2378 def _fixops(x):
2376 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2379 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2377 handled well by our simple top-down parser"""
2380 handled well by our simple top-down parser"""
2378 if not isinstance(x, tuple):
2381 if not isinstance(x, tuple):
2379 return x
2382 return x
2380
2383
2381 op = x[0]
2384 op = x[0]
2382 if op == 'parent':
2385 if op == 'parent':
2383 # x^:y means (x^) : y, not x ^ (:y)
2386 # x^:y means (x^) : y, not x ^ (:y)
2384 # x^: means (x^) :, not x ^ (:)
2387 # x^: means (x^) :, not x ^ (:)
2385 post = ('parentpost', x[1])
2388 post = ('parentpost', x[1])
2386 if x[2][0] == 'dagrangepre':
2389 if x[2][0] == 'dagrangepre':
2387 return _fixops(('dagrange', post, x[2][1]))
2390 return _fixops(('dagrange', post, x[2][1]))
2388 elif x[2][0] == 'rangepre':
2391 elif x[2][0] == 'rangepre':
2389 return _fixops(('range', post, x[2][1]))
2392 return _fixops(('range', post, x[2][1]))
2390 elif x[2][0] == 'rangeall':
2393 elif x[2][0] == 'rangeall':
2391 return _fixops(('rangepost', post))
2394 return _fixops(('rangepost', post))
2392 elif op == 'or':
2395 elif op == 'or':
2393 # make number of arguments deterministic:
2396 # make number of arguments deterministic:
2394 # x + y + z -> (or x y z) -> (or (list x y z))
2397 # x + y + z -> (or x y z) -> (or (list x y z))
2395 return (op, _fixops(('list',) + x[1:]))
2398 return (op, _fixops(('list',) + x[1:]))
2396
2399
2397 return (op,) + tuple(_fixops(y) for y in x[1:])
2400 return (op,) + tuple(_fixops(y) for y in x[1:])
2398
2401
2399 def _analyze(x, order):
2402 def _analyze(x, order):
2400 if x is None:
2403 if x is None:
2401 return x
2404 return x
2402
2405
2403 op = x[0]
2406 op = x[0]
2404 if op == 'minus':
2407 if op == 'minus':
2405 return _analyze(('and', x[1], ('not', x[2])), order)
2408 return _analyze(('and', x[1], ('not', x[2])), order)
2406 elif op == 'only':
2409 elif op == 'only':
2407 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2410 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2408 return _analyze(t, order)
2411 return _analyze(t, order)
2409 elif op == 'onlypost':
2412 elif op == 'onlypost':
2410 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2413 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2411 elif op == 'dagrangepre':
2414 elif op == 'dagrangepre':
2412 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2415 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2413 elif op == 'dagrangepost':
2416 elif op == 'dagrangepost':
2414 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2417 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2415 elif op == 'rangeall':
2418 elif op == 'rangeall':
2416 return _analyze(('range', ('string', '0'), ('string', 'tip')), order)
2419 return _analyze(('range', ('string', '0'), ('string', 'tip')), order)
2417 elif op == 'rangepre':
2420 elif op == 'rangepre':
2418 return _analyze(('range', ('string', '0'), x[1]), order)
2421 return _analyze(('range', ('string', '0'), x[1]), order)
2419 elif op == 'rangepost':
2422 elif op == 'rangepost':
2420 return _analyze(('range', x[1], ('string', 'tip')), order)
2423 return _analyze(('range', x[1], ('string', 'tip')), order)
2421 elif op == 'negate':
2424 elif op == 'negate':
2422 s = getstring(x[1], _("can't negate that"))
2425 s = getstring(x[1], _("can't negate that"))
2423 return _analyze(('string', '-' + s), order)
2426 return _analyze(('string', '-' + s), order)
2424 elif op in ('string', 'symbol'):
2427 elif op in ('string', 'symbol'):
2425 return x
2428 return x
2426 elif op == 'and':
2429 elif op == 'and':
2427 ta = _analyze(x[1], order)
2430 ta = _analyze(x[1], order)
2428 tb = _analyze(x[2], _tofolloworder[order])
2431 tb = _analyze(x[2], _tofolloworder[order])
2429 return (op, ta, tb, order)
2432 return (op, ta, tb, order)
2430 elif op == 'or':
2433 elif op == 'or':
2431 return (op, _analyze(x[1], order), order)
2434 return (op, _analyze(x[1], order), order)
2432 elif op == 'not':
2435 elif op == 'not':
2433 return (op, _analyze(x[1], anyorder), order)
2436 return (op, _analyze(x[1], anyorder), order)
2434 elif op == 'parentpost':
2437 elif op == 'parentpost':
2435 return (op, _analyze(x[1], defineorder), order)
2438 return (op, _analyze(x[1], defineorder), order)
2436 elif op == 'group':
2439 elif op == 'group':
2437 return _analyze(x[1], order)
2440 return _analyze(x[1], order)
2438 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2441 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2439 ta = _analyze(x[1], defineorder)
2442 ta = _analyze(x[1], defineorder)
2440 tb = _analyze(x[2], defineorder)
2443 tb = _analyze(x[2], defineorder)
2441 return (op, ta, tb, order)
2444 return (op, ta, tb, order)
2442 elif op == 'list':
2445 elif op == 'list':
2443 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2446 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2444 elif op == 'keyvalue':
2447 elif op == 'keyvalue':
2445 return (op, x[1], _analyze(x[2], order))
2448 return (op, x[1], _analyze(x[2], order))
2446 elif op == 'func':
2449 elif op == 'func':
2447 return (op, x[1], _analyze(x[2], defineorder), order)
2450 return (op, x[1], _analyze(x[2], defineorder), order)
2448 raise ValueError('invalid operator %r' % op)
2451 raise ValueError('invalid operator %r' % op)
2449
2452
2450 def analyze(x, order=defineorder):
2453 def analyze(x, order=defineorder):
2451 """Transform raw parsed tree to evaluatable tree which can be fed to
2454 """Transform raw parsed tree to evaluatable tree which can be fed to
2452 optimize() or getset()
2455 optimize() or getset()
2453
2456
2454 All pseudo operations should be mapped to real operations or functions
2457 All pseudo operations should be mapped to real operations or functions
2455 defined in methods or symbols table respectively.
2458 defined in methods or symbols table respectively.
2456
2459
2457 'order' specifies how the current expression 'x' is ordered (see the
2460 'order' specifies how the current expression 'x' is ordered (see the
2458 constants defined above.)
2461 constants defined above.)
2459 """
2462 """
2460 return _analyze(x, order)
2463 return _analyze(x, order)
2461
2464
2462 def _optimize(x, small):
2465 def _optimize(x, small):
2463 if x is None:
2466 if x is None:
2464 return 0, x
2467 return 0, x
2465
2468
2466 smallbonus = 1
2469 smallbonus = 1
2467 if small:
2470 if small:
2468 smallbonus = .5
2471 smallbonus = .5
2469
2472
2470 op = x[0]
2473 op = x[0]
2471 if op in ('string', 'symbol'):
2474 if op in ('string', 'symbol'):
2472 return smallbonus, x # single revisions are small
2475 return smallbonus, x # single revisions are small
2473 elif op == 'and':
2476 elif op == 'and':
2474 wa, ta = _optimize(x[1], True)
2477 wa, ta = _optimize(x[1], True)
2475 wb, tb = _optimize(x[2], True)
2478 wb, tb = _optimize(x[2], True)
2476 order = x[3]
2479 order = x[3]
2477 w = min(wa, wb)
2480 w = min(wa, wb)
2478
2481
2479 # (::x and not ::y)/(not ::y and ::x) have a fast path
2482 # (::x and not ::y)/(not ::y and ::x) have a fast path
2480 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2483 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2481 if tm:
2484 if tm:
2482 return w, ('func', ('symbol', 'only'), tm, order)
2485 return w, ('func', ('symbol', 'only'), tm, order)
2483
2486
2484 if tb is not None and tb[0] == 'not':
2487 if tb is not None and tb[0] == 'not':
2485 return wa, ('difference', ta, tb[1], order)
2488 return wa, ('difference', ta, tb[1], order)
2486
2489
2487 if wa > wb:
2490 if wa > wb:
2488 return w, (op, tb, ta, order)
2491 return w, (op, tb, ta, order)
2489 return w, (op, ta, tb, order)
2492 return w, (op, ta, tb, order)
2490 elif op == 'or':
2493 elif op == 'or':
2491 # fast path for machine-generated expression, that is likely to have
2494 # fast path for machine-generated expression, that is likely to have
2492 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2495 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2493 order = x[2]
2496 order = x[2]
2494 ws, ts, ss = [], [], []
2497 ws, ts, ss = [], [], []
2495 def flushss():
2498 def flushss():
2496 if not ss:
2499 if not ss:
2497 return
2500 return
2498 if len(ss) == 1:
2501 if len(ss) == 1:
2499 w, t = ss[0]
2502 w, t = ss[0]
2500 else:
2503 else:
2501 s = '\0'.join(t[1] for w, t in ss)
2504 s = '\0'.join(t[1] for w, t in ss)
2502 y = ('func', ('symbol', '_list'), ('string', s), order)
2505 y = ('func', ('symbol', '_list'), ('string', s), order)
2503 w, t = _optimize(y, False)
2506 w, t = _optimize(y, False)
2504 ws.append(w)
2507 ws.append(w)
2505 ts.append(t)
2508 ts.append(t)
2506 del ss[:]
2509 del ss[:]
2507 for y in getlist(x[1]):
2510 for y in getlist(x[1]):
2508 w, t = _optimize(y, False)
2511 w, t = _optimize(y, False)
2509 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2512 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2510 ss.append((w, t))
2513 ss.append((w, t))
2511 continue
2514 continue
2512 flushss()
2515 flushss()
2513 ws.append(w)
2516 ws.append(w)
2514 ts.append(t)
2517 ts.append(t)
2515 flushss()
2518 flushss()
2516 if len(ts) == 1:
2519 if len(ts) == 1:
2517 return ws[0], ts[0] # 'or' operation is fully optimized out
2520 return ws[0], ts[0] # 'or' operation is fully optimized out
2518 # we can't reorder trees by weight because it would change the order.
2521 # we can't reorder trees by weight because it would change the order.
2519 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2522 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2520 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2523 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2521 return max(ws), (op, ('list',) + tuple(ts), order)
2524 return max(ws), (op, ('list',) + tuple(ts), order)
2522 elif op == 'not':
2525 elif op == 'not':
2523 # Optimize not public() to _notpublic() because we have a fast version
2526 # Optimize not public() to _notpublic() because we have a fast version
2524 if x[1][:3] == ('func', ('symbol', 'public'), None):
2527 if x[1][:3] == ('func', ('symbol', 'public'), None):
2525 order = x[1][3]
2528 order = x[1][3]
2526 newsym = ('func', ('symbol', '_notpublic'), None, order)
2529 newsym = ('func', ('symbol', '_notpublic'), None, order)
2527 o = _optimize(newsym, not small)
2530 o = _optimize(newsym, not small)
2528 return o[0], o[1]
2531 return o[0], o[1]
2529 else:
2532 else:
2530 o = _optimize(x[1], not small)
2533 o = _optimize(x[1], not small)
2531 order = x[2]
2534 order = x[2]
2532 return o[0], (op, o[1], order)
2535 return o[0], (op, o[1], order)
2533 elif op == 'parentpost':
2536 elif op == 'parentpost':
2534 o = _optimize(x[1], small)
2537 o = _optimize(x[1], small)
2535 order = x[2]
2538 order = x[2]
2536 return o[0], (op, o[1], order)
2539 return o[0], (op, o[1], order)
2537 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2540 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2538 wa, ta = _optimize(x[1], small)
2541 wa, ta = _optimize(x[1], small)
2539 wb, tb = _optimize(x[2], small)
2542 wb, tb = _optimize(x[2], small)
2540 order = x[3]
2543 order = x[3]
2541 return wa + wb, (op, ta, tb, order)
2544 return wa + wb, (op, ta, tb, order)
2542 elif op == 'list':
2545 elif op == 'list':
2543 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2546 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2544 return sum(ws), (op,) + ts
2547 return sum(ws), (op,) + ts
2545 elif op == 'keyvalue':
2548 elif op == 'keyvalue':
2546 w, t = _optimize(x[2], small)
2549 w, t = _optimize(x[2], small)
2547 return w, (op, x[1], t)
2550 return w, (op, x[1], t)
2548 elif op == 'func':
2551 elif op == 'func':
2549 f = getsymbol(x[1])
2552 f = getsymbol(x[1])
2550 wa, ta = _optimize(x[2], small)
2553 wa, ta = _optimize(x[2], small)
2551 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2554 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2552 'keyword', 'outgoing', 'user'):
2555 'keyword', 'outgoing', 'user'):
2553 w = 10 # slow
2556 w = 10 # slow
2554 elif f in ('modifies', 'adds', 'removes'):
2557 elif f in ('modifies', 'adds', 'removes'):
2555 w = 30 # slower
2558 w = 30 # slower
2556 elif f == "contains":
2559 elif f == "contains":
2557 w = 100 # very slow
2560 w = 100 # very slow
2558 elif f == "ancestor":
2561 elif f == "ancestor":
2559 w = 1 * smallbonus
2562 w = 1 * smallbonus
2560 elif f in ('reverse', 'limit', 'first', '_intlist'):
2563 elif f in ('reverse', 'limit', 'first', '_intlist'):
2561 w = 0
2564 w = 0
2562 elif f == "sort":
2565 elif f == "sort":
2563 w = 10 # assume most sorts look at changelog
2566 w = 10 # assume most sorts look at changelog
2564 else:
2567 else:
2565 w = 1
2568 w = 1
2566 order = x[3]
2569 order = x[3]
2567 return w + wa, (op, x[1], ta, order)
2570 return w + wa, (op, x[1], ta, order)
2568 raise ValueError('invalid operator %r' % op)
2571 raise ValueError('invalid operator %r' % op)
2569
2572
2570 def optimize(tree):
2573 def optimize(tree):
2571 """Optimize evaluatable tree
2574 """Optimize evaluatable tree
2572
2575
2573 All pseudo operations should be transformed beforehand.
2576 All pseudo operations should be transformed beforehand.
2574 """
2577 """
2575 _weight, newtree = _optimize(tree, small=True)
2578 _weight, newtree = _optimize(tree, small=True)
2576 return newtree
2579 return newtree
2577
2580
2578 # the set of valid characters for the initial letter of symbols in
2581 # the set of valid characters for the initial letter of symbols in
2579 # alias declarations and definitions
2582 # alias declarations and definitions
2580 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2583 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2581 if c.isalnum() or c in '._@$' or ord(c) > 127)
2584 if c.isalnum() or c in '._@$' or ord(c) > 127)
2582
2585
2583 def _parsewith(spec, lookup=None, syminitletters=None):
2586 def _parsewith(spec, lookup=None, syminitletters=None):
2584 """Generate a parse tree of given spec with given tokenizing options
2587 """Generate a parse tree of given spec with given tokenizing options
2585
2588
2586 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2589 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2587 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2590 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2588 >>> _parsewith('$1')
2591 >>> _parsewith('$1')
2589 Traceback (most recent call last):
2592 Traceback (most recent call last):
2590 ...
2593 ...
2591 ParseError: ("syntax error in revset '$1'", 0)
2594 ParseError: ("syntax error in revset '$1'", 0)
2592 >>> _parsewith('foo bar')
2595 >>> _parsewith('foo bar')
2593 Traceback (most recent call last):
2596 Traceback (most recent call last):
2594 ...
2597 ...
2595 ParseError: ('invalid token', 4)
2598 ParseError: ('invalid token', 4)
2596 """
2599 """
2597 p = parser.parser(elements)
2600 p = parser.parser(elements)
2598 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2601 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2599 syminitletters=syminitletters))
2602 syminitletters=syminitletters))
2600 if pos != len(spec):
2603 if pos != len(spec):
2601 raise error.ParseError(_('invalid token'), pos)
2604 raise error.ParseError(_('invalid token'), pos)
2602 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2605 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2603
2606
2604 class _aliasrules(parser.basealiasrules):
2607 class _aliasrules(parser.basealiasrules):
2605 """Parsing and expansion rule set of revset aliases"""
2608 """Parsing and expansion rule set of revset aliases"""
2606 _section = _('revset alias')
2609 _section = _('revset alias')
2607
2610
2608 @staticmethod
2611 @staticmethod
2609 def _parse(spec):
2612 def _parse(spec):
2610 """Parse alias declaration/definition ``spec``
2613 """Parse alias declaration/definition ``spec``
2611
2614
2612 This allows symbol names to use also ``$`` as an initial letter
2615 This allows symbol names to use also ``$`` as an initial letter
2613 (for backward compatibility), and callers of this function should
2616 (for backward compatibility), and callers of this function should
2614 examine whether ``$`` is used also for unexpected symbols or not.
2617 examine whether ``$`` is used also for unexpected symbols or not.
2615 """
2618 """
2616 return _parsewith(spec, syminitletters=_aliassyminitletters)
2619 return _parsewith(spec, syminitletters=_aliassyminitletters)
2617
2620
2618 @staticmethod
2621 @staticmethod
2619 def _trygetfunc(tree):
2622 def _trygetfunc(tree):
2620 if tree[0] == 'func' and tree[1][0] == 'symbol':
2623 if tree[0] == 'func' and tree[1][0] == 'symbol':
2621 return tree[1][1], getlist(tree[2])
2624 return tree[1][1], getlist(tree[2])
2622
2625
2623 def expandaliases(ui, tree):
2626 def expandaliases(ui, tree):
2624 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2627 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2625 tree = _aliasrules.expand(aliases, tree)
2628 tree = _aliasrules.expand(aliases, tree)
2626 # warn about problematic (but not referred) aliases
2629 # warn about problematic (but not referred) aliases
2627 for name, alias in sorted(aliases.iteritems()):
2630 for name, alias in sorted(aliases.iteritems()):
2628 if alias.error and not alias.warned:
2631 if alias.error and not alias.warned:
2629 ui.warn(_('warning: %s\n') % (alias.error))
2632 ui.warn(_('warning: %s\n') % (alias.error))
2630 alias.warned = True
2633 alias.warned = True
2631 return tree
2634 return tree
2632
2635
2633 def foldconcat(tree):
2636 def foldconcat(tree):
2634 """Fold elements to be concatenated by `##`
2637 """Fold elements to be concatenated by `##`
2635 """
2638 """
2636 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2639 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2637 return tree
2640 return tree
2638 if tree[0] == '_concat':
2641 if tree[0] == '_concat':
2639 pending = [tree]
2642 pending = [tree]
2640 l = []
2643 l = []
2641 while pending:
2644 while pending:
2642 e = pending.pop()
2645 e = pending.pop()
2643 if e[0] == '_concat':
2646 if e[0] == '_concat':
2644 pending.extend(reversed(e[1:]))
2647 pending.extend(reversed(e[1:]))
2645 elif e[0] in ('string', 'symbol'):
2648 elif e[0] in ('string', 'symbol'):
2646 l.append(e[1])
2649 l.append(e[1])
2647 else:
2650 else:
2648 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2651 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2649 raise error.ParseError(msg)
2652 raise error.ParseError(msg)
2650 return ('string', ''.join(l))
2653 return ('string', ''.join(l))
2651 else:
2654 else:
2652 return tuple(foldconcat(t) for t in tree)
2655 return tuple(foldconcat(t) for t in tree)
2653
2656
2654 def parse(spec, lookup=None):
2657 def parse(spec, lookup=None):
2655 return _parsewith(spec, lookup=lookup)
2658 return _parsewith(spec, lookup=lookup)
2656
2659
2657 def posttreebuilthook(tree, repo):
2660 def posttreebuilthook(tree, repo):
2658 # hook for extensions to execute code on the optimized tree
2661 # hook for extensions to execute code on the optimized tree
2659 pass
2662 pass
2660
2663
2661 def match(ui, spec, repo=None):
2664 def match(ui, spec, repo=None):
2662 """Create a matcher for a single revision spec."""
2665 """Create a matcher for a single revision spec."""
2663 return matchany(ui, [spec], repo=repo)
2666 return matchany(ui, [spec], repo=repo)
2664
2667
2665 def matchany(ui, specs, repo=None):
2668 def matchany(ui, specs, repo=None):
2666 """Create a matcher that will include any revisions matching one of the
2669 """Create a matcher that will include any revisions matching one of the
2667 given specs"""
2670 given specs"""
2668 if not specs:
2671 if not specs:
2669 def mfunc(repo, subset=None):
2672 def mfunc(repo, subset=None):
2670 return baseset()
2673 return baseset()
2671 return mfunc
2674 return mfunc
2672 if not all(specs):
2675 if not all(specs):
2673 raise error.ParseError(_("empty query"))
2676 raise error.ParseError(_("empty query"))
2674 lookup = None
2677 lookup = None
2675 if repo:
2678 if repo:
2676 lookup = repo.__contains__
2679 lookup = repo.__contains__
2677 if len(specs) == 1:
2680 if len(specs) == 1:
2678 tree = parse(specs[0], lookup)
2681 tree = parse(specs[0], lookup)
2679 else:
2682 else:
2680 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2683 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2681
2684
2682 if ui:
2685 if ui:
2683 tree = expandaliases(ui, tree)
2686 tree = expandaliases(ui, tree)
2684 tree = foldconcat(tree)
2687 tree = foldconcat(tree)
2685 tree = analyze(tree)
2688 tree = analyze(tree)
2686 tree = optimize(tree)
2689 tree = optimize(tree)
2687 posttreebuilthook(tree, repo)
2690 posttreebuilthook(tree, repo)
2688 return makematcher(tree)
2691 return makematcher(tree)
2689
2692
2690 def makematcher(tree):
2693 def makematcher(tree):
2691 """Create a matcher from an evaluatable tree"""
2694 """Create a matcher from an evaluatable tree"""
2692 def mfunc(repo, subset=None):
2695 def mfunc(repo, subset=None):
2693 if subset is None:
2696 if subset is None:
2694 subset = fullreposet(repo)
2697 subset = fullreposet(repo)
2695 if util.safehasattr(subset, 'isascending'):
2698 if util.safehasattr(subset, 'isascending'):
2696 result = getset(repo, subset, tree)
2699 result = getset(repo, subset, tree)
2697 else:
2700 else:
2698 result = getset(repo, baseset(subset), tree)
2701 result = getset(repo, baseset(subset), tree)
2699 return result
2702 return result
2700 return mfunc
2703 return mfunc
2701
2704
2702 def formatspec(expr, *args):
2705 def formatspec(expr, *args):
2703 '''
2706 '''
2704 This is a convenience function for using revsets internally, and
2707 This is a convenience function for using revsets internally, and
2705 escapes arguments appropriately. Aliases are intentionally ignored
2708 escapes arguments appropriately. Aliases are intentionally ignored
2706 so that intended expression behavior isn't accidentally subverted.
2709 so that intended expression behavior isn't accidentally subverted.
2707
2710
2708 Supported arguments:
2711 Supported arguments:
2709
2712
2710 %r = revset expression, parenthesized
2713 %r = revset expression, parenthesized
2711 %d = int(arg), no quoting
2714 %d = int(arg), no quoting
2712 %s = string(arg), escaped and single-quoted
2715 %s = string(arg), escaped and single-quoted
2713 %b = arg.branch(), escaped and single-quoted
2716 %b = arg.branch(), escaped and single-quoted
2714 %n = hex(arg), single-quoted
2717 %n = hex(arg), single-quoted
2715 %% = a literal '%'
2718 %% = a literal '%'
2716
2719
2717 Prefixing the type with 'l' specifies a parenthesized list of that type.
2720 Prefixing the type with 'l' specifies a parenthesized list of that type.
2718
2721
2719 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2722 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2720 '(10 or 11):: and ((this()) or (that()))'
2723 '(10 or 11):: and ((this()) or (that()))'
2721 >>> formatspec('%d:: and not %d::', 10, 20)
2724 >>> formatspec('%d:: and not %d::', 10, 20)
2722 '10:: and not 20::'
2725 '10:: and not 20::'
2723 >>> formatspec('%ld or %ld', [], [1])
2726 >>> formatspec('%ld or %ld', [], [1])
2724 "_list('') or 1"
2727 "_list('') or 1"
2725 >>> formatspec('keyword(%s)', 'foo\\xe9')
2728 >>> formatspec('keyword(%s)', 'foo\\xe9')
2726 "keyword('foo\\\\xe9')"
2729 "keyword('foo\\\\xe9')"
2727 >>> b = lambda: 'default'
2730 >>> b = lambda: 'default'
2728 >>> b.branch = b
2731 >>> b.branch = b
2729 >>> formatspec('branch(%b)', b)
2732 >>> formatspec('branch(%b)', b)
2730 "branch('default')"
2733 "branch('default')"
2731 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2734 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2732 "root(_list('a\\x00b\\x00c\\x00d'))"
2735 "root(_list('a\\x00b\\x00c\\x00d'))"
2733 '''
2736 '''
2734
2737
2735 def quote(s):
2738 def quote(s):
2736 return repr(str(s))
2739 return repr(str(s))
2737
2740
2738 def argtype(c, arg):
2741 def argtype(c, arg):
2739 if c == 'd':
2742 if c == 'd':
2740 return str(int(arg))
2743 return str(int(arg))
2741 elif c == 's':
2744 elif c == 's':
2742 return quote(arg)
2745 return quote(arg)
2743 elif c == 'r':
2746 elif c == 'r':
2744 parse(arg) # make sure syntax errors are confined
2747 parse(arg) # make sure syntax errors are confined
2745 return '(%s)' % arg
2748 return '(%s)' % arg
2746 elif c == 'n':
2749 elif c == 'n':
2747 return quote(node.hex(arg))
2750 return quote(node.hex(arg))
2748 elif c == 'b':
2751 elif c == 'b':
2749 return quote(arg.branch())
2752 return quote(arg.branch())
2750
2753
2751 def listexp(s, t):
2754 def listexp(s, t):
2752 l = len(s)
2755 l = len(s)
2753 if l == 0:
2756 if l == 0:
2754 return "_list('')"
2757 return "_list('')"
2755 elif l == 1:
2758 elif l == 1:
2756 return argtype(t, s[0])
2759 return argtype(t, s[0])
2757 elif t == 'd':
2760 elif t == 'd':
2758 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2761 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2759 elif t == 's':
2762 elif t == 's':
2760 return "_list('%s')" % "\0".join(s)
2763 return "_list('%s')" % "\0".join(s)
2761 elif t == 'n':
2764 elif t == 'n':
2762 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2765 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2763 elif t == 'b':
2766 elif t == 'b':
2764 return "_list('%s')" % "\0".join(a.branch() for a in s)
2767 return "_list('%s')" % "\0".join(a.branch() for a in s)
2765
2768
2766 m = l // 2
2769 m = l // 2
2767 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2770 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2768
2771
2769 ret = ''
2772 ret = ''
2770 pos = 0
2773 pos = 0
2771 arg = 0
2774 arg = 0
2772 while pos < len(expr):
2775 while pos < len(expr):
2773 c = expr[pos]
2776 c = expr[pos]
2774 if c == '%':
2777 if c == '%':
2775 pos += 1
2778 pos += 1
2776 d = expr[pos]
2779 d = expr[pos]
2777 if d == '%':
2780 if d == '%':
2778 ret += d
2781 ret += d
2779 elif d in 'dsnbr':
2782 elif d in 'dsnbr':
2780 ret += argtype(d, args[arg])
2783 ret += argtype(d, args[arg])
2781 arg += 1
2784 arg += 1
2782 elif d == 'l':
2785 elif d == 'l':
2783 # a list of some type
2786 # a list of some type
2784 pos += 1
2787 pos += 1
2785 d = expr[pos]
2788 d = expr[pos]
2786 ret += listexp(list(args[arg]), d)
2789 ret += listexp(list(args[arg]), d)
2787 arg += 1
2790 arg += 1
2788 else:
2791 else:
2789 raise error.Abort(_('unexpected revspec format character %s')
2792 raise error.Abort(_('unexpected revspec format character %s')
2790 % d)
2793 % d)
2791 else:
2794 else:
2792 ret += c
2795 ret += c
2793 pos += 1
2796 pos += 1
2794
2797
2795 return ret
2798 return ret
2796
2799
2797 def prettyformat(tree):
2800 def prettyformat(tree):
2798 return parser.prettyformat(tree, ('string', 'symbol'))
2801 return parser.prettyformat(tree, ('string', 'symbol'))
2799
2802
2800 def depth(tree):
2803 def depth(tree):
2801 if isinstance(tree, tuple):
2804 if isinstance(tree, tuple):
2802 return max(map(depth, tree)) + 1
2805 return max(map(depth, tree)) + 1
2803 else:
2806 else:
2804 return 0
2807 return 0
2805
2808
2806 def funcsused(tree):
2809 def funcsused(tree):
2807 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2810 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2808 return set()
2811 return set()
2809 else:
2812 else:
2810 funcs = set()
2813 funcs = set()
2811 for s in tree[1:]:
2814 for s in tree[1:]:
2812 funcs |= funcsused(s)
2815 funcs |= funcsused(s)
2813 if tree[0] == 'func':
2816 if tree[0] == 'func':
2814 funcs.add(tree[1][1])
2817 funcs.add(tree[1][1])
2815 return funcs
2818 return funcs
2816
2819
2817 def _formatsetrepr(r):
2820 def _formatsetrepr(r):
2818 """Format an optional printable representation of a set
2821 """Format an optional printable representation of a set
2819
2822
2820 ======== =================================
2823 ======== =================================
2821 type(r) example
2824 type(r) example
2822 ======== =================================
2825 ======== =================================
2823 tuple ('<not %r>', other)
2826 tuple ('<not %r>', other)
2824 str '<branch closed>'
2827 str '<branch closed>'
2825 callable lambda: '<branch %r>' % sorted(b)
2828 callable lambda: '<branch %r>' % sorted(b)
2826 object other
2829 object other
2827 ======== =================================
2830 ======== =================================
2828 """
2831 """
2829 if r is None:
2832 if r is None:
2830 return ''
2833 return ''
2831 elif isinstance(r, tuple):
2834 elif isinstance(r, tuple):
2832 return r[0] % r[1:]
2835 return r[0] % r[1:]
2833 elif isinstance(r, str):
2836 elif isinstance(r, str):
2834 return r
2837 return r
2835 elif callable(r):
2838 elif callable(r):
2836 return r()
2839 return r()
2837 else:
2840 else:
2838 return repr(r)
2841 return repr(r)
2839
2842
2840 class abstractsmartset(object):
2843 class abstractsmartset(object):
2841
2844
2842 def __nonzero__(self):
2845 def __nonzero__(self):
2843 """True if the smartset is not empty"""
2846 """True if the smartset is not empty"""
2844 raise NotImplementedError()
2847 raise NotImplementedError()
2845
2848
2846 def __contains__(self, rev):
2849 def __contains__(self, rev):
2847 """provide fast membership testing"""
2850 """provide fast membership testing"""
2848 raise NotImplementedError()
2851 raise NotImplementedError()
2849
2852
2850 def __iter__(self):
2853 def __iter__(self):
2851 """iterate the set in the order it is supposed to be iterated"""
2854 """iterate the set in the order it is supposed to be iterated"""
2852 raise NotImplementedError()
2855 raise NotImplementedError()
2853
2856
2854 # Attributes containing a function to perform a fast iteration in a given
2857 # Attributes containing a function to perform a fast iteration in a given
2855 # direction. A smartset can have none, one, or both defined.
2858 # direction. A smartset can have none, one, or both defined.
2856 #
2859 #
2857 # Default value is None instead of a function returning None to avoid
2860 # Default value is None instead of a function returning None to avoid
2858 # initializing an iterator just for testing if a fast method exists.
2861 # initializing an iterator just for testing if a fast method exists.
2859 fastasc = None
2862 fastasc = None
2860 fastdesc = None
2863 fastdesc = None
2861
2864
2862 def isascending(self):
2865 def isascending(self):
2863 """True if the set will iterate in ascending order"""
2866 """True if the set will iterate in ascending order"""
2864 raise NotImplementedError()
2867 raise NotImplementedError()
2865
2868
2866 def isdescending(self):
2869 def isdescending(self):
2867 """True if the set will iterate in descending order"""
2870 """True if the set will iterate in descending order"""
2868 raise NotImplementedError()
2871 raise NotImplementedError()
2869
2872
2870 def istopo(self):
2873 def istopo(self):
2871 """True if the set will iterate in topographical order"""
2874 """True if the set will iterate in topographical order"""
2872 raise NotImplementedError()
2875 raise NotImplementedError()
2873
2876
2874 @util.cachefunc
2877 @util.cachefunc
2875 def min(self):
2878 def min(self):
2876 """return the minimum element in the set"""
2879 """return the minimum element in the set"""
2877 if self.fastasc is not None:
2880 if self.fastasc is not None:
2878 for r in self.fastasc():
2881 for r in self.fastasc():
2879 return r
2882 return r
2880 raise ValueError('arg is an empty sequence')
2883 raise ValueError('arg is an empty sequence')
2881 return min(self)
2884 return min(self)
2882
2885
2883 @util.cachefunc
2886 @util.cachefunc
2884 def max(self):
2887 def max(self):
2885 """return the maximum element in the set"""
2888 """return the maximum element in the set"""
2886 if self.fastdesc is not None:
2889 if self.fastdesc is not None:
2887 for r in self.fastdesc():
2890 for r in self.fastdesc():
2888 return r
2891 return r
2889 raise ValueError('arg is an empty sequence')
2892 raise ValueError('arg is an empty sequence')
2890 return max(self)
2893 return max(self)
2891
2894
2892 def first(self):
2895 def first(self):
2893 """return the first element in the set (user iteration perspective)
2896 """return the first element in the set (user iteration perspective)
2894
2897
2895 Return None if the set is empty"""
2898 Return None if the set is empty"""
2896 raise NotImplementedError()
2899 raise NotImplementedError()
2897
2900
2898 def last(self):
2901 def last(self):
2899 """return the last element in the set (user iteration perspective)
2902 """return the last element in the set (user iteration perspective)
2900
2903
2901 Return None if the set is empty"""
2904 Return None if the set is empty"""
2902 raise NotImplementedError()
2905 raise NotImplementedError()
2903
2906
2904 def __len__(self):
2907 def __len__(self):
2905 """return the length of the smartsets
2908 """return the length of the smartsets
2906
2909
2907 This can be expensive on smartset that could be lazy otherwise."""
2910 This can be expensive on smartset that could be lazy otherwise."""
2908 raise NotImplementedError()
2911 raise NotImplementedError()
2909
2912
2910 def reverse(self):
2913 def reverse(self):
2911 """reverse the expected iteration order"""
2914 """reverse the expected iteration order"""
2912 raise NotImplementedError()
2915 raise NotImplementedError()
2913
2916
2914 def sort(self, reverse=True):
2917 def sort(self, reverse=True):
2915 """get the set to iterate in an ascending or descending order"""
2918 """get the set to iterate in an ascending or descending order"""
2916 raise NotImplementedError()
2919 raise NotImplementedError()
2917
2920
2918 def __and__(self, other):
2921 def __and__(self, other):
2919 """Returns a new object with the intersection of the two collections.
2922 """Returns a new object with the intersection of the two collections.
2920
2923
2921 This is part of the mandatory API for smartset."""
2924 This is part of the mandatory API for smartset."""
2922 if isinstance(other, fullreposet):
2925 if isinstance(other, fullreposet):
2923 return self
2926 return self
2924 return self.filter(other.__contains__, condrepr=other, cache=False)
2927 return self.filter(other.__contains__, condrepr=other, cache=False)
2925
2928
2926 def __add__(self, other):
2929 def __add__(self, other):
2927 """Returns a new object with the union of the two collections.
2930 """Returns a new object with the union of the two collections.
2928
2931
2929 This is part of the mandatory API for smartset."""
2932 This is part of the mandatory API for smartset."""
2930 return addset(self, other)
2933 return addset(self, other)
2931
2934
2932 def __sub__(self, other):
2935 def __sub__(self, other):
2933 """Returns a new object with the substraction of the two collections.
2936 """Returns a new object with the substraction of the two collections.
2934
2937
2935 This is part of the mandatory API for smartset."""
2938 This is part of the mandatory API for smartset."""
2936 c = other.__contains__
2939 c = other.__contains__
2937 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2940 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2938 cache=False)
2941 cache=False)
2939
2942
2940 def filter(self, condition, condrepr=None, cache=True):
2943 def filter(self, condition, condrepr=None, cache=True):
2941 """Returns this smartset filtered by condition as a new smartset.
2944 """Returns this smartset filtered by condition as a new smartset.
2942
2945
2943 `condition` is a callable which takes a revision number and returns a
2946 `condition` is a callable which takes a revision number and returns a
2944 boolean. Optional `condrepr` provides a printable representation of
2947 boolean. Optional `condrepr` provides a printable representation of
2945 the given `condition`.
2948 the given `condition`.
2946
2949
2947 This is part of the mandatory API for smartset."""
2950 This is part of the mandatory API for smartset."""
2948 # builtin cannot be cached. but do not needs to
2951 # builtin cannot be cached. but do not needs to
2949 if cache and util.safehasattr(condition, 'func_code'):
2952 if cache and util.safehasattr(condition, 'func_code'):
2950 condition = util.cachefunc(condition)
2953 condition = util.cachefunc(condition)
2951 return filteredset(self, condition, condrepr)
2954 return filteredset(self, condition, condrepr)
2952
2955
2953 class baseset(abstractsmartset):
2956 class baseset(abstractsmartset):
2954 """Basic data structure that represents a revset and contains the basic
2957 """Basic data structure that represents a revset and contains the basic
2955 operation that it should be able to perform.
2958 operation that it should be able to perform.
2956
2959
2957 Every method in this class should be implemented by any smartset class.
2960 Every method in this class should be implemented by any smartset class.
2958 """
2961 """
2959 def __init__(self, data=(), datarepr=None, istopo=False):
2962 def __init__(self, data=(), datarepr=None, istopo=False):
2960 """
2963 """
2961 datarepr: a tuple of (format, obj, ...), a function or an object that
2964 datarepr: a tuple of (format, obj, ...), a function or an object that
2962 provides a printable representation of the given data.
2965 provides a printable representation of the given data.
2963 """
2966 """
2964 self._ascending = None
2967 self._ascending = None
2965 self._istopo = istopo
2968 self._istopo = istopo
2966 if not isinstance(data, list):
2969 if not isinstance(data, list):
2967 if isinstance(data, set):
2970 if isinstance(data, set):
2968 self._set = data
2971 self._set = data
2969 # set has no order we pick one for stability purpose
2972 # set has no order we pick one for stability purpose
2970 self._ascending = True
2973 self._ascending = True
2971 data = list(data)
2974 data = list(data)
2972 self._list = data
2975 self._list = data
2973 self._datarepr = datarepr
2976 self._datarepr = datarepr
2974
2977
2975 @util.propertycache
2978 @util.propertycache
2976 def _set(self):
2979 def _set(self):
2977 return set(self._list)
2980 return set(self._list)
2978
2981
2979 @util.propertycache
2982 @util.propertycache
2980 def _asclist(self):
2983 def _asclist(self):
2981 asclist = self._list[:]
2984 asclist = self._list[:]
2982 asclist.sort()
2985 asclist.sort()
2983 return asclist
2986 return asclist
2984
2987
2985 def __iter__(self):
2988 def __iter__(self):
2986 if self._ascending is None:
2989 if self._ascending is None:
2987 return iter(self._list)
2990 return iter(self._list)
2988 elif self._ascending:
2991 elif self._ascending:
2989 return iter(self._asclist)
2992 return iter(self._asclist)
2990 else:
2993 else:
2991 return reversed(self._asclist)
2994 return reversed(self._asclist)
2992
2995
2993 def fastasc(self):
2996 def fastasc(self):
2994 return iter(self._asclist)
2997 return iter(self._asclist)
2995
2998
2996 def fastdesc(self):
2999 def fastdesc(self):
2997 return reversed(self._asclist)
3000 return reversed(self._asclist)
2998
3001
2999 @util.propertycache
3002 @util.propertycache
3000 def __contains__(self):
3003 def __contains__(self):
3001 return self._set.__contains__
3004 return self._set.__contains__
3002
3005
3003 def __nonzero__(self):
3006 def __nonzero__(self):
3004 return bool(self._list)
3007 return bool(self._list)
3005
3008
3006 def sort(self, reverse=False):
3009 def sort(self, reverse=False):
3007 self._ascending = not bool(reverse)
3010 self._ascending = not bool(reverse)
3008 self._istopo = False
3011 self._istopo = False
3009
3012
3010 def reverse(self):
3013 def reverse(self):
3011 if self._ascending is None:
3014 if self._ascending is None:
3012 self._list.reverse()
3015 self._list.reverse()
3013 else:
3016 else:
3014 self._ascending = not self._ascending
3017 self._ascending = not self._ascending
3015 self._istopo = False
3018 self._istopo = False
3016
3019
3017 def __len__(self):
3020 def __len__(self):
3018 return len(self._list)
3021 return len(self._list)
3019
3022
3020 def isascending(self):
3023 def isascending(self):
3021 """Returns True if the collection is ascending order, False if not.
3024 """Returns True if the collection is ascending order, False if not.
3022
3025
3023 This is part of the mandatory API for smartset."""
3026 This is part of the mandatory API for smartset."""
3024 if len(self) <= 1:
3027 if len(self) <= 1:
3025 return True
3028 return True
3026 return self._ascending is not None and self._ascending
3029 return self._ascending is not None and self._ascending
3027
3030
3028 def isdescending(self):
3031 def isdescending(self):
3029 """Returns True if the collection is descending order, False if not.
3032 """Returns True if the collection is descending order, False if not.
3030
3033
3031 This is part of the mandatory API for smartset."""
3034 This is part of the mandatory API for smartset."""
3032 if len(self) <= 1:
3035 if len(self) <= 1:
3033 return True
3036 return True
3034 return self._ascending is not None and not self._ascending
3037 return self._ascending is not None and not self._ascending
3035
3038
3036 def istopo(self):
3039 def istopo(self):
3037 """Is the collection is in topographical order or not.
3040 """Is the collection is in topographical order or not.
3038
3041
3039 This is part of the mandatory API for smartset."""
3042 This is part of the mandatory API for smartset."""
3040 if len(self) <= 1:
3043 if len(self) <= 1:
3041 return True
3044 return True
3042 return self._istopo
3045 return self._istopo
3043
3046
3044 def first(self):
3047 def first(self):
3045 if self:
3048 if self:
3046 if self._ascending is None:
3049 if self._ascending is None:
3047 return self._list[0]
3050 return self._list[0]
3048 elif self._ascending:
3051 elif self._ascending:
3049 return self._asclist[0]
3052 return self._asclist[0]
3050 else:
3053 else:
3051 return self._asclist[-1]
3054 return self._asclist[-1]
3052 return None
3055 return None
3053
3056
3054 def last(self):
3057 def last(self):
3055 if self:
3058 if self:
3056 if self._ascending is None:
3059 if self._ascending is None:
3057 return self._list[-1]
3060 return self._list[-1]
3058 elif self._ascending:
3061 elif self._ascending:
3059 return self._asclist[-1]
3062 return self._asclist[-1]
3060 else:
3063 else:
3061 return self._asclist[0]
3064 return self._asclist[0]
3062 return None
3065 return None
3063
3066
3064 def __repr__(self):
3067 def __repr__(self):
3065 d = {None: '', False: '-', True: '+'}[self._ascending]
3068 d = {None: '', False: '-', True: '+'}[self._ascending]
3066 s = _formatsetrepr(self._datarepr)
3069 s = _formatsetrepr(self._datarepr)
3067 if not s:
3070 if not s:
3068 l = self._list
3071 l = self._list
3069 # if _list has been built from a set, it might have a different
3072 # if _list has been built from a set, it might have a different
3070 # order from one python implementation to another.
3073 # order from one python implementation to another.
3071 # We fallback to the sorted version for a stable output.
3074 # We fallback to the sorted version for a stable output.
3072 if self._ascending is not None:
3075 if self._ascending is not None:
3073 l = self._asclist
3076 l = self._asclist
3074 s = repr(l)
3077 s = repr(l)
3075 return '<%s%s %s>' % (type(self).__name__, d, s)
3078 return '<%s%s %s>' % (type(self).__name__, d, s)
3076
3079
3077 class filteredset(abstractsmartset):
3080 class filteredset(abstractsmartset):
3078 """Duck type for baseset class which iterates lazily over the revisions in
3081 """Duck type for baseset class which iterates lazily over the revisions in
3079 the subset and contains a function which tests for membership in the
3082 the subset and contains a function which tests for membership in the
3080 revset
3083 revset
3081 """
3084 """
3082 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3085 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3083 """
3086 """
3084 condition: a function that decide whether a revision in the subset
3087 condition: a function that decide whether a revision in the subset
3085 belongs to the revset or not.
3088 belongs to the revset or not.
3086 condrepr: a tuple of (format, obj, ...), a function or an object that
3089 condrepr: a tuple of (format, obj, ...), a function or an object that
3087 provides a printable representation of the given condition.
3090 provides a printable representation of the given condition.
3088 """
3091 """
3089 self._subset = subset
3092 self._subset = subset
3090 self._condition = condition
3093 self._condition = condition
3091 self._condrepr = condrepr
3094 self._condrepr = condrepr
3092
3095
3093 def __contains__(self, x):
3096 def __contains__(self, x):
3094 return x in self._subset and self._condition(x)
3097 return x in self._subset and self._condition(x)
3095
3098
3096 def __iter__(self):
3099 def __iter__(self):
3097 return self._iterfilter(self._subset)
3100 return self._iterfilter(self._subset)
3098
3101
3099 def _iterfilter(self, it):
3102 def _iterfilter(self, it):
3100 cond = self._condition
3103 cond = self._condition
3101 for x in it:
3104 for x in it:
3102 if cond(x):
3105 if cond(x):
3103 yield x
3106 yield x
3104
3107
3105 @property
3108 @property
3106 def fastasc(self):
3109 def fastasc(self):
3107 it = self._subset.fastasc
3110 it = self._subset.fastasc
3108 if it is None:
3111 if it is None:
3109 return None
3112 return None
3110 return lambda: self._iterfilter(it())
3113 return lambda: self._iterfilter(it())
3111
3114
3112 @property
3115 @property
3113 def fastdesc(self):
3116 def fastdesc(self):
3114 it = self._subset.fastdesc
3117 it = self._subset.fastdesc
3115 if it is None:
3118 if it is None:
3116 return None
3119 return None
3117 return lambda: self._iterfilter(it())
3120 return lambda: self._iterfilter(it())
3118
3121
3119 def __nonzero__(self):
3122 def __nonzero__(self):
3120 fast = None
3123 fast = None
3121 candidates = [self.fastasc if self.isascending() else None,
3124 candidates = [self.fastasc if self.isascending() else None,
3122 self.fastdesc if self.isdescending() else None,
3125 self.fastdesc if self.isdescending() else None,
3123 self.fastasc,
3126 self.fastasc,
3124 self.fastdesc]
3127 self.fastdesc]
3125 for candidate in candidates:
3128 for candidate in candidates:
3126 if candidate is not None:
3129 if candidate is not None:
3127 fast = candidate
3130 fast = candidate
3128 break
3131 break
3129
3132
3130 if fast is not None:
3133 if fast is not None:
3131 it = fast()
3134 it = fast()
3132 else:
3135 else:
3133 it = self
3136 it = self
3134
3137
3135 for r in it:
3138 for r in it:
3136 return True
3139 return True
3137 return False
3140 return False
3138
3141
3139 def __len__(self):
3142 def __len__(self):
3140 # Basic implementation to be changed in future patches.
3143 # Basic implementation to be changed in future patches.
3141 # until this gets improved, we use generator expression
3144 # until this gets improved, we use generator expression
3142 # here, since list compr is free to call __len__ again
3145 # here, since list compr is free to call __len__ again
3143 # causing infinite recursion
3146 # causing infinite recursion
3144 l = baseset(r for r in self)
3147 l = baseset(r for r in self)
3145 return len(l)
3148 return len(l)
3146
3149
3147 def sort(self, reverse=False):
3150 def sort(self, reverse=False):
3148 self._subset.sort(reverse=reverse)
3151 self._subset.sort(reverse=reverse)
3149
3152
3150 def reverse(self):
3153 def reverse(self):
3151 self._subset.reverse()
3154 self._subset.reverse()
3152
3155
3153 def isascending(self):
3156 def isascending(self):
3154 return self._subset.isascending()
3157 return self._subset.isascending()
3155
3158
3156 def isdescending(self):
3159 def isdescending(self):
3157 return self._subset.isdescending()
3160 return self._subset.isdescending()
3158
3161
3159 def istopo(self):
3162 def istopo(self):
3160 return self._subset.istopo()
3163 return self._subset.istopo()
3161
3164
3162 def first(self):
3165 def first(self):
3163 for x in self:
3166 for x in self:
3164 return x
3167 return x
3165 return None
3168 return None
3166
3169
3167 def last(self):
3170 def last(self):
3168 it = None
3171 it = None
3169 if self.isascending():
3172 if self.isascending():
3170 it = self.fastdesc
3173 it = self.fastdesc
3171 elif self.isdescending():
3174 elif self.isdescending():
3172 it = self.fastasc
3175 it = self.fastasc
3173 if it is not None:
3176 if it is not None:
3174 for x in it():
3177 for x in it():
3175 return x
3178 return x
3176 return None #empty case
3179 return None #empty case
3177 else:
3180 else:
3178 x = None
3181 x = None
3179 for x in self:
3182 for x in self:
3180 pass
3183 pass
3181 return x
3184 return x
3182
3185
3183 def __repr__(self):
3186 def __repr__(self):
3184 xs = [repr(self._subset)]
3187 xs = [repr(self._subset)]
3185 s = _formatsetrepr(self._condrepr)
3188 s = _formatsetrepr(self._condrepr)
3186 if s:
3189 if s:
3187 xs.append(s)
3190 xs.append(s)
3188 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3191 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3189
3192
3190 def _iterordered(ascending, iter1, iter2):
3193 def _iterordered(ascending, iter1, iter2):
3191 """produce an ordered iteration from two iterators with the same order
3194 """produce an ordered iteration from two iterators with the same order
3192
3195
3193 The ascending is used to indicated the iteration direction.
3196 The ascending is used to indicated the iteration direction.
3194 """
3197 """
3195 choice = max
3198 choice = max
3196 if ascending:
3199 if ascending:
3197 choice = min
3200 choice = min
3198
3201
3199 val1 = None
3202 val1 = None
3200 val2 = None
3203 val2 = None
3201 try:
3204 try:
3202 # Consume both iterators in an ordered way until one is empty
3205 # Consume both iterators in an ordered way until one is empty
3203 while True:
3206 while True:
3204 if val1 is None:
3207 if val1 is None:
3205 val1 = next(iter1)
3208 val1 = next(iter1)
3206 if val2 is None:
3209 if val2 is None:
3207 val2 = next(iter2)
3210 val2 = next(iter2)
3208 n = choice(val1, val2)
3211 n = choice(val1, val2)
3209 yield n
3212 yield n
3210 if val1 == n:
3213 if val1 == n:
3211 val1 = None
3214 val1 = None
3212 if val2 == n:
3215 if val2 == n:
3213 val2 = None
3216 val2 = None
3214 except StopIteration:
3217 except StopIteration:
3215 # Flush any remaining values and consume the other one
3218 # Flush any remaining values and consume the other one
3216 it = iter2
3219 it = iter2
3217 if val1 is not None:
3220 if val1 is not None:
3218 yield val1
3221 yield val1
3219 it = iter1
3222 it = iter1
3220 elif val2 is not None:
3223 elif val2 is not None:
3221 # might have been equality and both are empty
3224 # might have been equality and both are empty
3222 yield val2
3225 yield val2
3223 for val in it:
3226 for val in it:
3224 yield val
3227 yield val
3225
3228
3226 class addset(abstractsmartset):
3229 class addset(abstractsmartset):
3227 """Represent the addition of two sets
3230 """Represent the addition of two sets
3228
3231
3229 Wrapper structure for lazily adding two structures without losing much
3232 Wrapper structure for lazily adding two structures without losing much
3230 performance on the __contains__ method
3233 performance on the __contains__ method
3231
3234
3232 If the ascending attribute is set, that means the two structures are
3235 If the ascending attribute is set, that means the two structures are
3233 ordered in either an ascending or descending way. Therefore, we can add
3236 ordered in either an ascending or descending way. Therefore, we can add
3234 them maintaining the order by iterating over both at the same time
3237 them maintaining the order by iterating over both at the same time
3235
3238
3236 >>> xs = baseset([0, 3, 2])
3239 >>> xs = baseset([0, 3, 2])
3237 >>> ys = baseset([5, 2, 4])
3240 >>> ys = baseset([5, 2, 4])
3238
3241
3239 >>> rs = addset(xs, ys)
3242 >>> rs = addset(xs, ys)
3240 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3243 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3241 (True, True, False, True, 0, 4)
3244 (True, True, False, True, 0, 4)
3242 >>> rs = addset(xs, baseset([]))
3245 >>> rs = addset(xs, baseset([]))
3243 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3246 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3244 (True, True, False, 0, 2)
3247 (True, True, False, 0, 2)
3245 >>> rs = addset(baseset([]), baseset([]))
3248 >>> rs = addset(baseset([]), baseset([]))
3246 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3249 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3247 (False, False, None, None)
3250 (False, False, None, None)
3248
3251
3249 iterate unsorted:
3252 iterate unsorted:
3250 >>> rs = addset(xs, ys)
3253 >>> rs = addset(xs, ys)
3251 >>> # (use generator because pypy could call len())
3254 >>> # (use generator because pypy could call len())
3252 >>> list(x for x in rs) # without _genlist
3255 >>> list(x for x in rs) # without _genlist
3253 [0, 3, 2, 5, 4]
3256 [0, 3, 2, 5, 4]
3254 >>> assert not rs._genlist
3257 >>> assert not rs._genlist
3255 >>> len(rs)
3258 >>> len(rs)
3256 5
3259 5
3257 >>> [x for x in rs] # with _genlist
3260 >>> [x for x in rs] # with _genlist
3258 [0, 3, 2, 5, 4]
3261 [0, 3, 2, 5, 4]
3259 >>> assert rs._genlist
3262 >>> assert rs._genlist
3260
3263
3261 iterate ascending:
3264 iterate ascending:
3262 >>> rs = addset(xs, ys, ascending=True)
3265 >>> rs = addset(xs, ys, ascending=True)
3263 >>> # (use generator because pypy could call len())
3266 >>> # (use generator because pypy could call len())
3264 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3267 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3265 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3268 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3266 >>> assert not rs._asclist
3269 >>> assert not rs._asclist
3267 >>> len(rs)
3270 >>> len(rs)
3268 5
3271 5
3269 >>> [x for x in rs], [x for x in rs.fastasc()]
3272 >>> [x for x in rs], [x for x in rs.fastasc()]
3270 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3273 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3271 >>> assert rs._asclist
3274 >>> assert rs._asclist
3272
3275
3273 iterate descending:
3276 iterate descending:
3274 >>> rs = addset(xs, ys, ascending=False)
3277 >>> rs = addset(xs, ys, ascending=False)
3275 >>> # (use generator because pypy could call len())
3278 >>> # (use generator because pypy could call len())
3276 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3279 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3277 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3280 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3278 >>> assert not rs._asclist
3281 >>> assert not rs._asclist
3279 >>> len(rs)
3282 >>> len(rs)
3280 5
3283 5
3281 >>> [x for x in rs], [x for x in rs.fastdesc()]
3284 >>> [x for x in rs], [x for x in rs.fastdesc()]
3282 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3285 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3283 >>> assert rs._asclist
3286 >>> assert rs._asclist
3284
3287
3285 iterate ascending without fastasc:
3288 iterate ascending without fastasc:
3286 >>> rs = addset(xs, generatorset(ys), ascending=True)
3289 >>> rs = addset(xs, generatorset(ys), ascending=True)
3287 >>> assert rs.fastasc is None
3290 >>> assert rs.fastasc is None
3288 >>> [x for x in rs]
3291 >>> [x for x in rs]
3289 [0, 2, 3, 4, 5]
3292 [0, 2, 3, 4, 5]
3290
3293
3291 iterate descending without fastdesc:
3294 iterate descending without fastdesc:
3292 >>> rs = addset(generatorset(xs), ys, ascending=False)
3295 >>> rs = addset(generatorset(xs), ys, ascending=False)
3293 >>> assert rs.fastdesc is None
3296 >>> assert rs.fastdesc is None
3294 >>> [x for x in rs]
3297 >>> [x for x in rs]
3295 [5, 4, 3, 2, 0]
3298 [5, 4, 3, 2, 0]
3296 """
3299 """
3297 def __init__(self, revs1, revs2, ascending=None):
3300 def __init__(self, revs1, revs2, ascending=None):
3298 self._r1 = revs1
3301 self._r1 = revs1
3299 self._r2 = revs2
3302 self._r2 = revs2
3300 self._iter = None
3303 self._iter = None
3301 self._ascending = ascending
3304 self._ascending = ascending
3302 self._genlist = None
3305 self._genlist = None
3303 self._asclist = None
3306 self._asclist = None
3304
3307
3305 def __len__(self):
3308 def __len__(self):
3306 return len(self._list)
3309 return len(self._list)
3307
3310
3308 def __nonzero__(self):
3311 def __nonzero__(self):
3309 return bool(self._r1) or bool(self._r2)
3312 return bool(self._r1) or bool(self._r2)
3310
3313
3311 @util.propertycache
3314 @util.propertycache
3312 def _list(self):
3315 def _list(self):
3313 if not self._genlist:
3316 if not self._genlist:
3314 self._genlist = baseset(iter(self))
3317 self._genlist = baseset(iter(self))
3315 return self._genlist
3318 return self._genlist
3316
3319
3317 def __iter__(self):
3320 def __iter__(self):
3318 """Iterate over both collections without repeating elements
3321 """Iterate over both collections without repeating elements
3319
3322
3320 If the ascending attribute is not set, iterate over the first one and
3323 If the ascending attribute is not set, iterate over the first one and
3321 then over the second one checking for membership on the first one so we
3324 then over the second one checking for membership on the first one so we
3322 dont yield any duplicates.
3325 dont yield any duplicates.
3323
3326
3324 If the ascending attribute is set, iterate over both collections at the
3327 If the ascending attribute is set, iterate over both collections at the
3325 same time, yielding only one value at a time in the given order.
3328 same time, yielding only one value at a time in the given order.
3326 """
3329 """
3327 if self._ascending is None:
3330 if self._ascending is None:
3328 if self._genlist:
3331 if self._genlist:
3329 return iter(self._genlist)
3332 return iter(self._genlist)
3330 def arbitraryordergen():
3333 def arbitraryordergen():
3331 for r in self._r1:
3334 for r in self._r1:
3332 yield r
3335 yield r
3333 inr1 = self._r1.__contains__
3336 inr1 = self._r1.__contains__
3334 for r in self._r2:
3337 for r in self._r2:
3335 if not inr1(r):
3338 if not inr1(r):
3336 yield r
3339 yield r
3337 return arbitraryordergen()
3340 return arbitraryordergen()
3338 # try to use our own fast iterator if it exists
3341 # try to use our own fast iterator if it exists
3339 self._trysetasclist()
3342 self._trysetasclist()
3340 if self._ascending:
3343 if self._ascending:
3341 attr = 'fastasc'
3344 attr = 'fastasc'
3342 else:
3345 else:
3343 attr = 'fastdesc'
3346 attr = 'fastdesc'
3344 it = getattr(self, attr)
3347 it = getattr(self, attr)
3345 if it is not None:
3348 if it is not None:
3346 return it()
3349 return it()
3347 # maybe half of the component supports fast
3350 # maybe half of the component supports fast
3348 # get iterator for _r1
3351 # get iterator for _r1
3349 iter1 = getattr(self._r1, attr)
3352 iter1 = getattr(self._r1, attr)
3350 if iter1 is None:
3353 if iter1 is None:
3351 # let's avoid side effect (not sure it matters)
3354 # let's avoid side effect (not sure it matters)
3352 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3355 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3353 else:
3356 else:
3354 iter1 = iter1()
3357 iter1 = iter1()
3355 # get iterator for _r2
3358 # get iterator for _r2
3356 iter2 = getattr(self._r2, attr)
3359 iter2 = getattr(self._r2, attr)
3357 if iter2 is None:
3360 if iter2 is None:
3358 # let's avoid side effect (not sure it matters)
3361 # let's avoid side effect (not sure it matters)
3359 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3362 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3360 else:
3363 else:
3361 iter2 = iter2()
3364 iter2 = iter2()
3362 return _iterordered(self._ascending, iter1, iter2)
3365 return _iterordered(self._ascending, iter1, iter2)
3363
3366
3364 def _trysetasclist(self):
3367 def _trysetasclist(self):
3365 """populate the _asclist attribute if possible and necessary"""
3368 """populate the _asclist attribute if possible and necessary"""
3366 if self._genlist is not None and self._asclist is None:
3369 if self._genlist is not None and self._asclist is None:
3367 self._asclist = sorted(self._genlist)
3370 self._asclist = sorted(self._genlist)
3368
3371
3369 @property
3372 @property
3370 def fastasc(self):
3373 def fastasc(self):
3371 self._trysetasclist()
3374 self._trysetasclist()
3372 if self._asclist is not None:
3375 if self._asclist is not None:
3373 return self._asclist.__iter__
3376 return self._asclist.__iter__
3374 iter1 = self._r1.fastasc
3377 iter1 = self._r1.fastasc
3375 iter2 = self._r2.fastasc
3378 iter2 = self._r2.fastasc
3376 if None in (iter1, iter2):
3379 if None in (iter1, iter2):
3377 return None
3380 return None
3378 return lambda: _iterordered(True, iter1(), iter2())
3381 return lambda: _iterordered(True, iter1(), iter2())
3379
3382
3380 @property
3383 @property
3381 def fastdesc(self):
3384 def fastdesc(self):
3382 self._trysetasclist()
3385 self._trysetasclist()
3383 if self._asclist is not None:
3386 if self._asclist is not None:
3384 return self._asclist.__reversed__
3387 return self._asclist.__reversed__
3385 iter1 = self._r1.fastdesc
3388 iter1 = self._r1.fastdesc
3386 iter2 = self._r2.fastdesc
3389 iter2 = self._r2.fastdesc
3387 if None in (iter1, iter2):
3390 if None in (iter1, iter2):
3388 return None
3391 return None
3389 return lambda: _iterordered(False, iter1(), iter2())
3392 return lambda: _iterordered(False, iter1(), iter2())
3390
3393
3391 def __contains__(self, x):
3394 def __contains__(self, x):
3392 return x in self._r1 or x in self._r2
3395 return x in self._r1 or x in self._r2
3393
3396
3394 def sort(self, reverse=False):
3397 def sort(self, reverse=False):
3395 """Sort the added set
3398 """Sort the added set
3396
3399
3397 For this we use the cached list with all the generated values and if we
3400 For this we use the cached list with all the generated values and if we
3398 know they are ascending or descending we can sort them in a smart way.
3401 know they are ascending or descending we can sort them in a smart way.
3399 """
3402 """
3400 self._ascending = not reverse
3403 self._ascending = not reverse
3401
3404
3402 def isascending(self):
3405 def isascending(self):
3403 return self._ascending is not None and self._ascending
3406 return self._ascending is not None and self._ascending
3404
3407
3405 def isdescending(self):
3408 def isdescending(self):
3406 return self._ascending is not None and not self._ascending
3409 return self._ascending is not None and not self._ascending
3407
3410
3408 def istopo(self):
3411 def istopo(self):
3409 # not worth the trouble asserting if the two sets combined are still
3412 # not worth the trouble asserting if the two sets combined are still
3410 # in topographical order. Use the sort() predicate to explicitly sort
3413 # in topographical order. Use the sort() predicate to explicitly sort
3411 # again instead.
3414 # again instead.
3412 return False
3415 return False
3413
3416
3414 def reverse(self):
3417 def reverse(self):
3415 if self._ascending is None:
3418 if self._ascending is None:
3416 self._list.reverse()
3419 self._list.reverse()
3417 else:
3420 else:
3418 self._ascending = not self._ascending
3421 self._ascending = not self._ascending
3419
3422
3420 def first(self):
3423 def first(self):
3421 for x in self:
3424 for x in self:
3422 return x
3425 return x
3423 return None
3426 return None
3424
3427
3425 def last(self):
3428 def last(self):
3426 self.reverse()
3429 self.reverse()
3427 val = self.first()
3430 val = self.first()
3428 self.reverse()
3431 self.reverse()
3429 return val
3432 return val
3430
3433
3431 def __repr__(self):
3434 def __repr__(self):
3432 d = {None: '', False: '-', True: '+'}[self._ascending]
3435 d = {None: '', False: '-', True: '+'}[self._ascending]
3433 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3436 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3434
3437
3435 class generatorset(abstractsmartset):
3438 class generatorset(abstractsmartset):
3436 """Wrap a generator for lazy iteration
3439 """Wrap a generator for lazy iteration
3437
3440
3438 Wrapper structure for generators that provides lazy membership and can
3441 Wrapper structure for generators that provides lazy membership and can
3439 be iterated more than once.
3442 be iterated more than once.
3440 When asked for membership it generates values until either it finds the
3443 When asked for membership it generates values until either it finds the
3441 requested one or has gone through all the elements in the generator
3444 requested one or has gone through all the elements in the generator
3442 """
3445 """
3443 def __init__(self, gen, iterasc=None):
3446 def __init__(self, gen, iterasc=None):
3444 """
3447 """
3445 gen: a generator producing the values for the generatorset.
3448 gen: a generator producing the values for the generatorset.
3446 """
3449 """
3447 self._gen = gen
3450 self._gen = gen
3448 self._asclist = None
3451 self._asclist = None
3449 self._cache = {}
3452 self._cache = {}
3450 self._genlist = []
3453 self._genlist = []
3451 self._finished = False
3454 self._finished = False
3452 self._ascending = True
3455 self._ascending = True
3453 if iterasc is not None:
3456 if iterasc is not None:
3454 if iterasc:
3457 if iterasc:
3455 self.fastasc = self._iterator
3458 self.fastasc = self._iterator
3456 self.__contains__ = self._asccontains
3459 self.__contains__ = self._asccontains
3457 else:
3460 else:
3458 self.fastdesc = self._iterator
3461 self.fastdesc = self._iterator
3459 self.__contains__ = self._desccontains
3462 self.__contains__ = self._desccontains
3460
3463
3461 def __nonzero__(self):
3464 def __nonzero__(self):
3462 # Do not use 'for r in self' because it will enforce the iteration
3465 # Do not use 'for r in self' because it will enforce the iteration
3463 # order (default ascending), possibly unrolling a whole descending
3466 # order (default ascending), possibly unrolling a whole descending
3464 # iterator.
3467 # iterator.
3465 if self._genlist:
3468 if self._genlist:
3466 return True
3469 return True
3467 for r in self._consumegen():
3470 for r in self._consumegen():
3468 return True
3471 return True
3469 return False
3472 return False
3470
3473
3471 def __contains__(self, x):
3474 def __contains__(self, x):
3472 if x in self._cache:
3475 if x in self._cache:
3473 return self._cache[x]
3476 return self._cache[x]
3474
3477
3475 # Use new values only, as existing values would be cached.
3478 # Use new values only, as existing values would be cached.
3476 for l in self._consumegen():
3479 for l in self._consumegen():
3477 if l == x:
3480 if l == x:
3478 return True
3481 return True
3479
3482
3480 self._cache[x] = False
3483 self._cache[x] = False
3481 return False
3484 return False
3482
3485
3483 def _asccontains(self, x):
3486 def _asccontains(self, x):
3484 """version of contains optimised for ascending generator"""
3487 """version of contains optimised for ascending generator"""
3485 if x in self._cache:
3488 if x in self._cache:
3486 return self._cache[x]
3489 return self._cache[x]
3487
3490
3488 # Use new values only, as existing values would be cached.
3491 # Use new values only, as existing values would be cached.
3489 for l in self._consumegen():
3492 for l in self._consumegen():
3490 if l == x:
3493 if l == x:
3491 return True
3494 return True
3492 if l > x:
3495 if l > x:
3493 break
3496 break
3494
3497
3495 self._cache[x] = False
3498 self._cache[x] = False
3496 return False
3499 return False
3497
3500
3498 def _desccontains(self, x):
3501 def _desccontains(self, x):
3499 """version of contains optimised for descending generator"""
3502 """version of contains optimised for descending generator"""
3500 if x in self._cache:
3503 if x in self._cache:
3501 return self._cache[x]
3504 return self._cache[x]
3502
3505
3503 # Use new values only, as existing values would be cached.
3506 # Use new values only, as existing values would be cached.
3504 for l in self._consumegen():
3507 for l in self._consumegen():
3505 if l == x:
3508 if l == x:
3506 return True
3509 return True
3507 if l < x:
3510 if l < x:
3508 break
3511 break
3509
3512
3510 self._cache[x] = False
3513 self._cache[x] = False
3511 return False
3514 return False
3512
3515
3513 def __iter__(self):
3516 def __iter__(self):
3514 if self._ascending:
3517 if self._ascending:
3515 it = self.fastasc
3518 it = self.fastasc
3516 else:
3519 else:
3517 it = self.fastdesc
3520 it = self.fastdesc
3518 if it is not None:
3521 if it is not None:
3519 return it()
3522 return it()
3520 # we need to consume the iterator
3523 # we need to consume the iterator
3521 for x in self._consumegen():
3524 for x in self._consumegen():
3522 pass
3525 pass
3523 # recall the same code
3526 # recall the same code
3524 return iter(self)
3527 return iter(self)
3525
3528
3526 def _iterator(self):
3529 def _iterator(self):
3527 if self._finished:
3530 if self._finished:
3528 return iter(self._genlist)
3531 return iter(self._genlist)
3529
3532
3530 # We have to use this complex iteration strategy to allow multiple
3533 # We have to use this complex iteration strategy to allow multiple
3531 # iterations at the same time. We need to be able to catch revision
3534 # iterations at the same time. We need to be able to catch revision
3532 # removed from _consumegen and added to genlist in another instance.
3535 # removed from _consumegen and added to genlist in another instance.
3533 #
3536 #
3534 # Getting rid of it would provide an about 15% speed up on this
3537 # Getting rid of it would provide an about 15% speed up on this
3535 # iteration.
3538 # iteration.
3536 genlist = self._genlist
3539 genlist = self._genlist
3537 nextrev = self._consumegen().next
3540 nextrev = self._consumegen().next
3538 _len = len # cache global lookup
3541 _len = len # cache global lookup
3539 def gen():
3542 def gen():
3540 i = 0
3543 i = 0
3541 while True:
3544 while True:
3542 if i < _len(genlist):
3545 if i < _len(genlist):
3543 yield genlist[i]
3546 yield genlist[i]
3544 else:
3547 else:
3545 yield nextrev()
3548 yield nextrev()
3546 i += 1
3549 i += 1
3547 return gen()
3550 return gen()
3548
3551
3549 def _consumegen(self):
3552 def _consumegen(self):
3550 cache = self._cache
3553 cache = self._cache
3551 genlist = self._genlist.append
3554 genlist = self._genlist.append
3552 for item in self._gen:
3555 for item in self._gen:
3553 cache[item] = True
3556 cache[item] = True
3554 genlist(item)
3557 genlist(item)
3555 yield item
3558 yield item
3556 if not self._finished:
3559 if not self._finished:
3557 self._finished = True
3560 self._finished = True
3558 asc = self._genlist[:]
3561 asc = self._genlist[:]
3559 asc.sort()
3562 asc.sort()
3560 self._asclist = asc
3563 self._asclist = asc
3561 self.fastasc = asc.__iter__
3564 self.fastasc = asc.__iter__
3562 self.fastdesc = asc.__reversed__
3565 self.fastdesc = asc.__reversed__
3563
3566
3564 def __len__(self):
3567 def __len__(self):
3565 for x in self._consumegen():
3568 for x in self._consumegen():
3566 pass
3569 pass
3567 return len(self._genlist)
3570 return len(self._genlist)
3568
3571
3569 def sort(self, reverse=False):
3572 def sort(self, reverse=False):
3570 self._ascending = not reverse
3573 self._ascending = not reverse
3571
3574
3572 def reverse(self):
3575 def reverse(self):
3573 self._ascending = not self._ascending
3576 self._ascending = not self._ascending
3574
3577
3575 def isascending(self):
3578 def isascending(self):
3576 return self._ascending
3579 return self._ascending
3577
3580
3578 def isdescending(self):
3581 def isdescending(self):
3579 return not self._ascending
3582 return not self._ascending
3580
3583
3581 def istopo(self):
3584 def istopo(self):
3582 # not worth the trouble asserting if the two sets combined are still
3585 # not worth the trouble asserting if the two sets combined are still
3583 # in topographical order. Use the sort() predicate to explicitly sort
3586 # in topographical order. Use the sort() predicate to explicitly sort
3584 # again instead.
3587 # again instead.
3585 return False
3588 return False
3586
3589
3587 def first(self):
3590 def first(self):
3588 if self._ascending:
3591 if self._ascending:
3589 it = self.fastasc
3592 it = self.fastasc
3590 else:
3593 else:
3591 it = self.fastdesc
3594 it = self.fastdesc
3592 if it is None:
3595 if it is None:
3593 # we need to consume all and try again
3596 # we need to consume all and try again
3594 for x in self._consumegen():
3597 for x in self._consumegen():
3595 pass
3598 pass
3596 return self.first()
3599 return self.first()
3597 return next(it(), None)
3600 return next(it(), None)
3598
3601
3599 def last(self):
3602 def last(self):
3600 if self._ascending:
3603 if self._ascending:
3601 it = self.fastdesc
3604 it = self.fastdesc
3602 else:
3605 else:
3603 it = self.fastasc
3606 it = self.fastasc
3604 if it is None:
3607 if it is None:
3605 # we need to consume all and try again
3608 # we need to consume all and try again
3606 for x in self._consumegen():
3609 for x in self._consumegen():
3607 pass
3610 pass
3608 return self.first()
3611 return self.first()
3609 return next(it(), None)
3612 return next(it(), None)
3610
3613
3611 def __repr__(self):
3614 def __repr__(self):
3612 d = {False: '-', True: '+'}[self._ascending]
3615 d = {False: '-', True: '+'}[self._ascending]
3613 return '<%s%s>' % (type(self).__name__, d)
3616 return '<%s%s>' % (type(self).__name__, d)
3614
3617
3615 class spanset(abstractsmartset):
3618 class spanset(abstractsmartset):
3616 """Duck type for baseset class which represents a range of revisions and
3619 """Duck type for baseset class which represents a range of revisions and
3617 can work lazily and without having all the range in memory
3620 can work lazily and without having all the range in memory
3618
3621
3619 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3622 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3620 notable points:
3623 notable points:
3621 - when x < y it will be automatically descending,
3624 - when x < y it will be automatically descending,
3622 - revision filtered with this repoview will be skipped.
3625 - revision filtered with this repoview will be skipped.
3623
3626
3624 """
3627 """
3625 def __init__(self, repo, start=0, end=None):
3628 def __init__(self, repo, start=0, end=None):
3626 """
3629 """
3627 start: first revision included the set
3630 start: first revision included the set
3628 (default to 0)
3631 (default to 0)
3629 end: first revision excluded (last+1)
3632 end: first revision excluded (last+1)
3630 (default to len(repo)
3633 (default to len(repo)
3631
3634
3632 Spanset will be descending if `end` < `start`.
3635 Spanset will be descending if `end` < `start`.
3633 """
3636 """
3634 if end is None:
3637 if end is None:
3635 end = len(repo)
3638 end = len(repo)
3636 self._ascending = start <= end
3639 self._ascending = start <= end
3637 if not self._ascending:
3640 if not self._ascending:
3638 start, end = end + 1, start +1
3641 start, end = end + 1, start +1
3639 self._start = start
3642 self._start = start
3640 self._end = end
3643 self._end = end
3641 self._hiddenrevs = repo.changelog.filteredrevs
3644 self._hiddenrevs = repo.changelog.filteredrevs
3642
3645
3643 def sort(self, reverse=False):
3646 def sort(self, reverse=False):
3644 self._ascending = not reverse
3647 self._ascending = not reverse
3645
3648
3646 def reverse(self):
3649 def reverse(self):
3647 self._ascending = not self._ascending
3650 self._ascending = not self._ascending
3648
3651
3649 def istopo(self):
3652 def istopo(self):
3650 # not worth the trouble asserting if the two sets combined are still
3653 # not worth the trouble asserting if the two sets combined are still
3651 # in topographical order. Use the sort() predicate to explicitly sort
3654 # in topographical order. Use the sort() predicate to explicitly sort
3652 # again instead.
3655 # again instead.
3653 return False
3656 return False
3654
3657
3655 def _iterfilter(self, iterrange):
3658 def _iterfilter(self, iterrange):
3656 s = self._hiddenrevs
3659 s = self._hiddenrevs
3657 for r in iterrange:
3660 for r in iterrange:
3658 if r not in s:
3661 if r not in s:
3659 yield r
3662 yield r
3660
3663
3661 def __iter__(self):
3664 def __iter__(self):
3662 if self._ascending:
3665 if self._ascending:
3663 return self.fastasc()
3666 return self.fastasc()
3664 else:
3667 else:
3665 return self.fastdesc()
3668 return self.fastdesc()
3666
3669
3667 def fastasc(self):
3670 def fastasc(self):
3668 iterrange = xrange(self._start, self._end)
3671 iterrange = xrange(self._start, self._end)
3669 if self._hiddenrevs:
3672 if self._hiddenrevs:
3670 return self._iterfilter(iterrange)
3673 return self._iterfilter(iterrange)
3671 return iter(iterrange)
3674 return iter(iterrange)
3672
3675
3673 def fastdesc(self):
3676 def fastdesc(self):
3674 iterrange = xrange(self._end - 1, self._start - 1, -1)
3677 iterrange = xrange(self._end - 1, self._start - 1, -1)
3675 if self._hiddenrevs:
3678 if self._hiddenrevs:
3676 return self._iterfilter(iterrange)
3679 return self._iterfilter(iterrange)
3677 return iter(iterrange)
3680 return iter(iterrange)
3678
3681
3679 def __contains__(self, rev):
3682 def __contains__(self, rev):
3680 hidden = self._hiddenrevs
3683 hidden = self._hiddenrevs
3681 return ((self._start <= rev < self._end)
3684 return ((self._start <= rev < self._end)
3682 and not (hidden and rev in hidden))
3685 and not (hidden and rev in hidden))
3683
3686
3684 def __nonzero__(self):
3687 def __nonzero__(self):
3685 for r in self:
3688 for r in self:
3686 return True
3689 return True
3687 return False
3690 return False
3688
3691
3689 def __len__(self):
3692 def __len__(self):
3690 if not self._hiddenrevs:
3693 if not self._hiddenrevs:
3691 return abs(self._end - self._start)
3694 return abs(self._end - self._start)
3692 else:
3695 else:
3693 count = 0
3696 count = 0
3694 start = self._start
3697 start = self._start
3695 end = self._end
3698 end = self._end
3696 for rev in self._hiddenrevs:
3699 for rev in self._hiddenrevs:
3697 if (end < rev <= start) or (start <= rev < end):
3700 if (end < rev <= start) or (start <= rev < end):
3698 count += 1
3701 count += 1
3699 return abs(self._end - self._start) - count
3702 return abs(self._end - self._start) - count
3700
3703
3701 def isascending(self):
3704 def isascending(self):
3702 return self._ascending
3705 return self._ascending
3703
3706
3704 def isdescending(self):
3707 def isdescending(self):
3705 return not self._ascending
3708 return not self._ascending
3706
3709
3707 def first(self):
3710 def first(self):
3708 if self._ascending:
3711 if self._ascending:
3709 it = self.fastasc
3712 it = self.fastasc
3710 else:
3713 else:
3711 it = self.fastdesc
3714 it = self.fastdesc
3712 for x in it():
3715 for x in it():
3713 return x
3716 return x
3714 return None
3717 return None
3715
3718
3716 def last(self):
3719 def last(self):
3717 if self._ascending:
3720 if self._ascending:
3718 it = self.fastdesc
3721 it = self.fastdesc
3719 else:
3722 else:
3720 it = self.fastasc
3723 it = self.fastasc
3721 for x in it():
3724 for x in it():
3722 return x
3725 return x
3723 return None
3726 return None
3724
3727
3725 def __repr__(self):
3728 def __repr__(self):
3726 d = {False: '-', True: '+'}[self._ascending]
3729 d = {False: '-', True: '+'}[self._ascending]
3727 return '<%s%s %d:%d>' % (type(self).__name__, d,
3730 return '<%s%s %d:%d>' % (type(self).__name__, d,
3728 self._start, self._end - 1)
3731 self._start, self._end - 1)
3729
3732
3730 class fullreposet(spanset):
3733 class fullreposet(spanset):
3731 """a set containing all revisions in the repo
3734 """a set containing all revisions in the repo
3732
3735
3733 This class exists to host special optimization and magic to handle virtual
3736 This class exists to host special optimization and magic to handle virtual
3734 revisions such as "null".
3737 revisions such as "null".
3735 """
3738 """
3736
3739
3737 def __init__(self, repo):
3740 def __init__(self, repo):
3738 super(fullreposet, self).__init__(repo)
3741 super(fullreposet, self).__init__(repo)
3739
3742
3740 def __and__(self, other):
3743 def __and__(self, other):
3741 """As self contains the whole repo, all of the other set should also be
3744 """As self contains the whole repo, all of the other set should also be
3742 in self. Therefore `self & other = other`.
3745 in self. Therefore `self & other = other`.
3743
3746
3744 This boldly assumes the other contains valid revs only.
3747 This boldly assumes the other contains valid revs only.
3745 """
3748 """
3746 # other not a smartset, make is so
3749 # other not a smartset, make is so
3747 if not util.safehasattr(other, 'isascending'):
3750 if not util.safehasattr(other, 'isascending'):
3748 # filter out hidden revision
3751 # filter out hidden revision
3749 # (this boldly assumes all smartset are pure)
3752 # (this boldly assumes all smartset are pure)
3750 #
3753 #
3751 # `other` was used with "&", let's assume this is a set like
3754 # `other` was used with "&", let's assume this is a set like
3752 # object.
3755 # object.
3753 other = baseset(other - self._hiddenrevs)
3756 other = baseset(other - self._hiddenrevs)
3754
3757
3755 # XXX As fullreposet is also used as bootstrap, this is wrong.
3758 # XXX As fullreposet is also used as bootstrap, this is wrong.
3756 #
3759 #
3757 # With a giveme312() revset returning [3,1,2], this makes
3760 # With a giveme312() revset returning [3,1,2], this makes
3758 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3761 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3759 # We cannot just drop it because other usage still need to sort it:
3762 # We cannot just drop it because other usage still need to sort it:
3760 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3763 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3761 #
3764 #
3762 # There is also some faulty revset implementations that rely on it
3765 # There is also some faulty revset implementations that rely on it
3763 # (eg: children as of its state in e8075329c5fb)
3766 # (eg: children as of its state in e8075329c5fb)
3764 #
3767 #
3765 # When we fix the two points above we can move this into the if clause
3768 # When we fix the two points above we can move this into the if clause
3766 other.sort(reverse=self.isdescending())
3769 other.sort(reverse=self.isdescending())
3767 return other
3770 return other
3768
3771
3769 def prettyformatset(revs):
3772 def prettyformatset(revs):
3770 lines = []
3773 lines = []
3771 rs = repr(revs)
3774 rs = repr(revs)
3772 p = 0
3775 p = 0
3773 while p < len(rs):
3776 while p < len(rs):
3774 q = rs.find('<', p + 1)
3777 q = rs.find('<', p + 1)
3775 if q < 0:
3778 if q < 0:
3776 q = len(rs)
3779 q = len(rs)
3777 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3780 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3778 assert l >= 0
3781 assert l >= 0
3779 lines.append((l, rs[p:q].rstrip()))
3782 lines.append((l, rs[p:q].rstrip()))
3780 p = q
3783 p = q
3781 return '\n'.join(' ' * l + s for l, s in lines)
3784 return '\n'.join(' ' * l + s for l, s in lines)
3782
3785
3783 def loadpredicate(ui, extname, registrarobj):
3786 def loadpredicate(ui, extname, registrarobj):
3784 """Load revset predicates from specified registrarobj
3787 """Load revset predicates from specified registrarobj
3785 """
3788 """
3786 for name, func in registrarobj._table.iteritems():
3789 for name, func in registrarobj._table.iteritems():
3787 symbols[name] = func
3790 symbols[name] = func
3788 if func._safe:
3791 if func._safe:
3789 safesymbols.add(name)
3792 safesymbols.add(name)
3790
3793
3791 # load built-in predicates explicitly to setup safesymbols
3794 # load built-in predicates explicitly to setup safesymbols
3792 loadpredicate(None, None, predicate)
3795 loadpredicate(None, None, predicate)
3793
3796
3794 # tell hggettext to extract docstrings from these functions:
3797 # tell hggettext to extract docstrings from these functions:
3795 i18nfunctions = symbols.values()
3798 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now