##// END OF EJS Templates
registrar: define revsetpredicate to decorate revset predicate...
FUJIWARA Katsunori -
r28393:ac11ba7c default
parent child Browse files
Show More
@@ -1,207 +1,240
1 1 # registrar.py - utilities to register function for specific purpose
2 2 #
3 3 # Copyright FUJIWARA Katsunori <foozy@lares.dti.ne.jp> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from . import (
11 11 util,
12 12 )
13 13
14 14 class funcregistrar(object):
15 15 """Base of decorator to register a fuction for specific purpose
16 16
17 17 The least derived class can be defined by overriding 'table' and
18 18 'formatdoc', for example::
19 19
20 20 symbols = {}
21 21 class keyword(funcregistrar):
22 22 table = symbols
23 23 formatdoc = ":%s: %s"
24 24
25 25 @keyword('bar')
26 26 def barfunc(*args, **kwargs):
27 27 '''Explanation of bar keyword ....
28 28 '''
29 29 pass
30 30
31 31 In this case:
32 32
33 33 - 'barfunc' is registered as 'bar' in 'symbols'
34 34 - online help uses ":bar: Explanation of bar keyword"
35 35 """
36 36
37 37 def __init__(self, decl):
38 38 """'decl' is a name or more descriptive string of a function
39 39
40 40 Specification of 'decl' depends on registration purpose.
41 41 """
42 42 self.decl = decl
43 43
44 44 table = None
45 45
46 46 def __call__(self, func):
47 47 """Execute actual registration for specified function
48 48 """
49 49 name = self.getname()
50 50
51 51 if func.__doc__ and not util.safehasattr(func, '_origdoc'):
52 52 doc = func.__doc__.strip()
53 53 func._origdoc = doc
54 54 if callable(self.formatdoc):
55 55 func.__doc__ = self.formatdoc(doc)
56 56 else:
57 57 # convenient shortcut for simple format
58 58 func.__doc__ = self.formatdoc % (self.decl, doc)
59 59
60 60 self.table[name] = func
61 61 self.extraaction(name, func)
62 62
63 63 return func
64 64
65 65 def getname(self):
66 66 """Return the name of the registered function from self.decl
67 67
68 68 Derived class should override this, if it allows more
69 69 descriptive 'decl' string than just a name.
70 70 """
71 71 return self.decl
72 72
73 73 def parsefuncdecl(self):
74 74 """Parse function declaration and return the name of function in it
75 75 """
76 76 i = self.decl.find('(')
77 77 if i > 0:
78 78 return self.decl[:i]
79 79 else:
80 80 return self.decl
81 81
82 82 def formatdoc(self, doc):
83 83 """Return formatted document of the registered function for help
84 84
85 85 'doc' is '__doc__.strip()' of the registered function.
86 86
87 87 If this is overridden by non-callable object in derived class,
88 88 such value is treated as "format string" and used to format
89 89 document by 'self.formatdoc % (self.decl, doc)' for convenience.
90 90 """
91 91 raise NotImplementedError()
92 92
93 93 def extraaction(self, name, func):
94 94 """Execute exra action for registered function, if needed
95 95 """
96 96 pass
97 97
98 98 class delayregistrar(object):
99 99 """Decorator to delay actual registration until uisetup or so
100 100
101 101 For example, the decorator class to delay registration by
102 102 'keyword' funcregistrar can be defined as below::
103 103
104 104 class extkeyword(delayregistrar):
105 105 registrar = keyword
106 106 """
107 107 def __init__(self):
108 108 self._list = []
109 109
110 110 registrar = None
111 111
112 112 def __call__(self, *args, **kwargs):
113 113 """Return the decorator to delay actual registration until setup
114 114 """
115 115 assert self.registrar is not None
116 116 def decorator(func):
117 117 # invocation of self.registrar() here can detect argument
118 118 # mismatching immediately
119 119 self._list.append((func, self.registrar(*args, **kwargs)))
120 120 return func
121 121 return decorator
122 122
123 123 def setup(self):
124 124 """Execute actual registration
125 125 """
126 126 while self._list:
127 127 func, decorator = self._list.pop(0)
128 128 decorator(func)
129 129
130 130 class _funcregistrarbase(object):
131 131 """Base of decorator to register a fuction for specific purpose
132 132
133 133 This decorator stores decorated functions into own dict 'table'.
134 134
135 135 The least derived class can be defined by overriding 'formatdoc',
136 136 for example::
137 137
138 138 class keyword(_funcregistrarbase):
139 139 _docformat = ":%s: %s"
140 140
141 141 This should be used as below:
142 142
143 143 keyword = registrar.keyword()
144 144
145 145 @keyword('bar')
146 146 def barfunc(*args, **kwargs):
147 147 '''Explanation of bar keyword ....
148 148 '''
149 149 pass
150 150
151 151 In this case:
152 152
153 153 - 'barfunc' is stored as 'bar' in '_table' of an instance 'keyword' above
154 154 - 'barfunc.__doc__' becomes ":bar: Explanation of bar keyword"
155 155 """
156 156 def __init__(self, table=None):
157 157 if table is None:
158 158 self._table = {}
159 159 else:
160 160 self._table = table
161 161
162 162 def __call__(self, decl, *args, **kwargs):
163 163 return lambda func: self._doregister(func, decl, *args, **kwargs)
164 164
165 165 def _doregister(self, func, decl, *args, **kwargs):
166 166 name = self._getname(decl)
167 167
168 168 if func.__doc__ and not util.safehasattr(func, '_origdoc'):
169 169 doc = func.__doc__.strip()
170 170 func._origdoc = doc
171 171 func.__doc__ = self._formatdoc(decl, doc)
172 172
173 173 self._table[name] = func
174 174 self._extrasetup(name, func, *args, **kwargs)
175 175
176 176 return func
177 177
178 178 def _parsefuncdecl(self, decl):
179 179 """Parse function declaration and return the name of function in it
180 180 """
181 181 i = decl.find('(')
182 182 if i >= 0:
183 183 return decl[:i]
184 184 else:
185 185 return decl
186 186
187 187 def _getname(self, decl):
188 188 """Return the name of the registered function from decl
189 189
190 190 Derived class should override this, if it allows more
191 191 descriptive 'decl' string than just a name.
192 192 """
193 193 return decl
194 194
195 195 _docformat = None
196 196
197 197 def _formatdoc(self, decl, doc):
198 198 """Return formatted document of the registered function for help
199 199
200 200 'doc' is '__doc__.strip()' of the registered function.
201 201 """
202 202 return self._docformat % (decl, doc)
203 203
204 204 def _extrasetup(self, name, func):
205 205 """Execute exra setup for registered function, if needed
206 206 """
207 207 pass
208
209 class revsetpredicate(_funcregistrarbase):
210 """Decorator to register revset predicate
211
212 Usage::
213
214 revsetpredicate = registrar.revsetpredicate()
215
216 @revsetpredicate('mypredicate(arg1, arg2[, arg3])')
217 def mypredicatefunc(repo, subset, x):
218 '''Explanation of this revset predicate ....
219 '''
220 pass
221
222 The first string argument is used also in online help.
223
224 Optional argument 'safe' indicates whether a predicate is safe for
225 DoS attack (False by default).
226
227 'revsetpredicate' instance in example above can be used to
228 decorate multiple functions.
229
230 Decorated functions are registered automatically at loading
231 extension, if an instance named as 'revsetpredicate' is used for
232 decorating in extension.
233
234 Otherwise, explicit 'revset.loadpredicate()' is needed.
235 """
236 _getname = _funcregistrarbase._parsefuncdecl
237 _docformat = "``%s``\n %s"
238
239 def _extrasetup(self, name, func, safe=False):
240 func._safe = safe
@@ -1,3632 +1,3640
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 def parseerrordetail(inst):
304 304 """Compose error message from specified ParseError object
305 305 """
306 306 if len(inst.args) > 1:
307 307 return _('at %s: %s') % (inst.args[1], inst.args[0])
308 308 else:
309 309 return inst.args[0]
310 310
311 311 # helpers
312 312
313 313 def getstring(x, err):
314 314 if x and (x[0] == 'string' or x[0] == 'symbol'):
315 315 return x[1]
316 316 raise error.ParseError(err)
317 317
318 318 def getlist(x):
319 319 if not x:
320 320 return []
321 321 if x[0] == 'list':
322 322 return list(x[1:])
323 323 return [x]
324 324
325 325 def getargs(x, min, max, err):
326 326 l = getlist(x)
327 327 if len(l) < min or (max >= 0 and len(l) > max):
328 328 raise error.ParseError(err)
329 329 return l
330 330
331 331 def getargsdict(x, funcname, keys):
332 332 return parser.buildargsdict(getlist(x), funcname, keys.split(),
333 333 keyvaluenode='keyvalue', keynode='symbol')
334 334
335 335 def isvalidsymbol(tree):
336 336 """Examine whether specified ``tree`` is valid ``symbol`` or not
337 337 """
338 338 return tree[0] == 'symbol' and len(tree) > 1
339 339
340 340 def getsymbol(tree):
341 341 """Get symbol name from valid ``symbol`` in ``tree``
342 342
343 343 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
344 344 """
345 345 return tree[1]
346 346
347 347 def isvalidfunc(tree):
348 348 """Examine whether specified ``tree`` is valid ``func`` or not
349 349 """
350 350 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
351 351
352 352 def getfuncname(tree):
353 353 """Get function name from valid ``func`` in ``tree``
354 354
355 355 This assumes that ``tree`` is already examined by ``isvalidfunc``.
356 356 """
357 357 return getsymbol(tree[1])
358 358
359 359 def getfuncargs(tree):
360 360 """Get list of function arguments from valid ``func`` in ``tree``
361 361
362 362 This assumes that ``tree`` is already examined by ``isvalidfunc``.
363 363 """
364 364 if len(tree) > 2:
365 365 return getlist(tree[2])
366 366 else:
367 367 return []
368 368
369 369 def getset(repo, subset, x):
370 370 if not x:
371 371 raise error.ParseError(_("missing argument"))
372 372 s = methods[x[0]](repo, subset, *x[1:])
373 373 if util.safehasattr(s, 'isascending'):
374 374 return s
375 375 if (repo.ui.configbool('devel', 'all-warnings')
376 376 or repo.ui.configbool('devel', 'old-revset')):
377 377 # else case should not happen, because all non-func are internal,
378 378 # ignoring for now.
379 379 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
380 380 repo.ui.develwarn('revset "%s" use list instead of smartset, '
381 381 '(upgrade your code)' % x[1][1])
382 382 return baseset(s)
383 383
384 384 def _getrevsource(repo, r):
385 385 extra = repo[r].extra()
386 386 for label in ('source', 'transplant_source', 'rebase_source'):
387 387 if label in extra:
388 388 try:
389 389 return repo[extra[label]].rev()
390 390 except error.RepoLookupError:
391 391 pass
392 392 return None
393 393
394 394 # operator methods
395 395
396 396 def stringset(repo, subset, x):
397 397 x = repo[x].rev()
398 398 if (x in subset
399 399 or x == node.nullrev and isinstance(subset, fullreposet)):
400 400 return baseset([x])
401 401 return baseset()
402 402
403 403 def rangeset(repo, subset, x, y):
404 404 m = getset(repo, fullreposet(repo), x)
405 405 n = getset(repo, fullreposet(repo), y)
406 406
407 407 if not m or not n:
408 408 return baseset()
409 409 m, n = m.first(), n.last()
410 410
411 411 if m == n:
412 412 r = baseset([m])
413 413 elif n == node.wdirrev:
414 414 r = spanset(repo, m, len(repo)) + baseset([n])
415 415 elif m == node.wdirrev:
416 416 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
417 417 elif m < n:
418 418 r = spanset(repo, m, n + 1)
419 419 else:
420 420 r = spanset(repo, m, n - 1)
421 421 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
422 422 # necessary to ensure we preserve the order in subset.
423 423 #
424 424 # This has performance implication, carrying the sorting over when possible
425 425 # would be more efficient.
426 426 return r & subset
427 427
428 428 def dagrange(repo, subset, x, y):
429 429 r = fullreposet(repo)
430 430 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
431 431 includepath=True)
432 432 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
433 433 # necessary to ensure we preserve the order in subset.
434 434 return xs & subset
435 435
436 436 def andset(repo, subset, x, y):
437 437 return getset(repo, getset(repo, subset, x), y)
438 438
439 439 def differenceset(repo, subset, x, y):
440 440 return getset(repo, subset, x) - getset(repo, subset, y)
441 441
442 442 def orset(repo, subset, *xs):
443 443 assert xs
444 444 if len(xs) == 1:
445 445 return getset(repo, subset, xs[0])
446 446 p = len(xs) // 2
447 447 a = orset(repo, subset, *xs[:p])
448 448 b = orset(repo, subset, *xs[p:])
449 449 return a + b
450 450
451 451 def notset(repo, subset, x):
452 452 return subset - getset(repo, subset, x)
453 453
454 454 def listset(repo, subset, *xs):
455 455 raise error.ParseError(_("can't use a list in this context"),
456 456 hint=_('see hg help "revsets.x or y"'))
457 457
458 458 def keyvaluepair(repo, subset, k, v):
459 459 raise error.ParseError(_("can't use a key-value pair in this context"))
460 460
461 461 def func(repo, subset, a, b):
462 462 if a[0] == 'symbol' and a[1] in symbols:
463 463 return symbols[a[1]](repo, subset, b)
464 464
465 465 keep = lambda fn: getattr(fn, '__doc__', None) is not None
466 466
467 467 syms = [s for (s, fn) in symbols.items() if keep(fn)]
468 468 raise error.UnknownIdentifier(a[1], syms)
469 469
470 470 # functions
471 471
472 472 # symbols are callables like:
473 473 # fn(repo, subset, x)
474 474 # with:
475 475 # repo - current repository instance
476 476 # subset - of revisions to be examined
477 477 # x - argument in tree form
478 478 symbols = {}
479 479
480 480 # symbols which can't be used for a DoS attack for any given input
481 481 # (e.g. those which accept regexes as plain strings shouldn't be included)
482 482 # functions that just return a lot of changesets (like all) don't count here
483 483 safesymbols = set()
484 484
485 485 class predicate(registrar.funcregistrar):
486 486 """Decorator to register revset predicate
487 487
488 488 Usage::
489 489
490 490 @predicate('mypredicate(arg1, arg2[, arg3])')
491 491 def mypredicatefunc(repo, subset, x):
492 492 '''Explanation of this revset predicate ....
493 493 '''
494 494 pass
495 495
496 496 The first string argument of the constructor is used also in
497 497 online help.
498 498
499 499 Use 'extpredicate' instead of this to register revset predicate in
500 500 extensions.
501 501 """
502 502 table = symbols
503 503 formatdoc = "``%s``\n %s"
504 504 getname = registrar.funcregistrar.parsefuncdecl
505 505
506 506 def __init__(self, decl, safe=False):
507 507 """'safe' indicates whether a predicate is safe for DoS attack
508 508 """
509 509 super(predicate, self).__init__(decl)
510 510 self.safe = safe
511 511
512 512 def extraaction(self, name, func):
513 513 if self.safe:
514 514 safesymbols.add(name)
515 515
516 516 class extpredicate(registrar.delayregistrar):
517 517 """Decorator to register revset predicate in extensions
518 518
519 519 Usage::
520 520
521 521 revsetpredicate = revset.extpredicate()
522 522
523 523 @revsetpredicate('mypredicate(arg1, arg2[, arg3])')
524 524 def mypredicatefunc(repo, subset, x):
525 525 '''Explanation of this revset predicate ....
526 526 '''
527 527 pass
528 528
529 529 def uisetup(ui):
530 530 revsetpredicate.setup()
531 531
532 532 'revsetpredicate' instance above can be used to decorate multiple
533 533 functions, and 'setup()' on it registers all such functions at
534 534 once.
535 535 """
536 536 registrar = predicate
537 537
538 538 @predicate('_destupdate')
539 539 def _destupdate(repo, subset, x):
540 540 # experimental revset for update destination
541 541 args = getargsdict(x, 'limit', 'clean check')
542 542 return subset & baseset([destutil.destupdate(repo, **args)[0]])
543 543
544 544 @predicate('_destmerge')
545 545 def _destmerge(repo, subset, x):
546 546 # experimental revset for merge destination
547 547 sourceset = None
548 548 if x is not None:
549 549 sourceset = getset(repo, fullreposet(repo), x)
550 550 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
551 551
552 552 @predicate('adds(pattern)', safe=True)
553 553 def adds(repo, subset, x):
554 554 """Changesets that add a file matching pattern.
555 555
556 556 The pattern without explicit kind like ``glob:`` is expected to be
557 557 relative to the current directory and match against a file or a
558 558 directory.
559 559 """
560 560 # i18n: "adds" is a keyword
561 561 pat = getstring(x, _("adds requires a pattern"))
562 562 return checkstatus(repo, subset, pat, 1)
563 563
564 564 @predicate('ancestor(*changeset)', safe=True)
565 565 def ancestor(repo, subset, x):
566 566 """A greatest common ancestor of the changesets.
567 567
568 568 Accepts 0 or more changesets.
569 569 Will return empty list when passed no args.
570 570 Greatest common ancestor of a single changeset is that changeset.
571 571 """
572 572 # i18n: "ancestor" is a keyword
573 573 l = getlist(x)
574 574 rl = fullreposet(repo)
575 575 anc = None
576 576
577 577 # (getset(repo, rl, i) for i in l) generates a list of lists
578 578 for revs in (getset(repo, rl, i) for i in l):
579 579 for r in revs:
580 580 if anc is None:
581 581 anc = repo[r]
582 582 else:
583 583 anc = anc.ancestor(repo[r])
584 584
585 585 if anc is not None and anc.rev() in subset:
586 586 return baseset([anc.rev()])
587 587 return baseset()
588 588
589 589 def _ancestors(repo, subset, x, followfirst=False):
590 590 heads = getset(repo, fullreposet(repo), x)
591 591 if not heads:
592 592 return baseset()
593 593 s = _revancestors(repo, heads, followfirst)
594 594 return subset & s
595 595
596 596 @predicate('ancestors(set)', safe=True)
597 597 def ancestors(repo, subset, x):
598 598 """Changesets that are ancestors of a changeset in set.
599 599 """
600 600 return _ancestors(repo, subset, x)
601 601
602 602 @predicate('_firstancestors', safe=True)
603 603 def _firstancestors(repo, subset, x):
604 604 # ``_firstancestors(set)``
605 605 # Like ``ancestors(set)`` but follows only the first parents.
606 606 return _ancestors(repo, subset, x, followfirst=True)
607 607
608 608 def ancestorspec(repo, subset, x, n):
609 609 """``set~n``
610 610 Changesets that are the Nth ancestor (first parents only) of a changeset
611 611 in set.
612 612 """
613 613 try:
614 614 n = int(n[1])
615 615 except (TypeError, ValueError):
616 616 raise error.ParseError(_("~ expects a number"))
617 617 ps = set()
618 618 cl = repo.changelog
619 619 for r in getset(repo, fullreposet(repo), x):
620 620 for i in range(n):
621 621 r = cl.parentrevs(r)[0]
622 622 ps.add(r)
623 623 return subset & ps
624 624
625 625 @predicate('author(string)', safe=True)
626 626 def author(repo, subset, x):
627 627 """Alias for ``user(string)``.
628 628 """
629 629 # i18n: "author" is a keyword
630 630 n = encoding.lower(getstring(x, _("author requires a string")))
631 631 kind, pattern, matcher = _substringmatcher(n)
632 632 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
633 633
634 634 @predicate('bisect(string)', safe=True)
635 635 def bisect(repo, subset, x):
636 636 """Changesets marked in the specified bisect status:
637 637
638 638 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
639 639 - ``goods``, ``bads`` : csets topologically good/bad
640 640 - ``range`` : csets taking part in the bisection
641 641 - ``pruned`` : csets that are goods, bads or skipped
642 642 - ``untested`` : csets whose fate is yet unknown
643 643 - ``ignored`` : csets ignored due to DAG topology
644 644 - ``current`` : the cset currently being bisected
645 645 """
646 646 # i18n: "bisect" is a keyword
647 647 status = getstring(x, _("bisect requires a string")).lower()
648 648 state = set(hbisect.get(repo, status))
649 649 return subset & state
650 650
651 651 # Backward-compatibility
652 652 # - no help entry so that we do not advertise it any more
653 653 @predicate('bisected', safe=True)
654 654 def bisected(repo, subset, x):
655 655 return bisect(repo, subset, x)
656 656
657 657 @predicate('bookmark([name])', safe=True)
658 658 def bookmark(repo, subset, x):
659 659 """The named bookmark or all bookmarks.
660 660
661 661 If `name` starts with `re:`, the remainder of the name is treated as
662 662 a regular expression. To match a bookmark that actually starts with `re:`,
663 663 use the prefix `literal:`.
664 664 """
665 665 # i18n: "bookmark" is a keyword
666 666 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
667 667 if args:
668 668 bm = getstring(args[0],
669 669 # i18n: "bookmark" is a keyword
670 670 _('the argument to bookmark must be a string'))
671 671 kind, pattern, matcher = util.stringmatcher(bm)
672 672 bms = set()
673 673 if kind == 'literal':
674 674 bmrev = repo._bookmarks.get(pattern, None)
675 675 if not bmrev:
676 676 raise error.RepoLookupError(_("bookmark '%s' does not exist")
677 677 % pattern)
678 678 bms.add(repo[bmrev].rev())
679 679 else:
680 680 matchrevs = set()
681 681 for name, bmrev in repo._bookmarks.iteritems():
682 682 if matcher(name):
683 683 matchrevs.add(bmrev)
684 684 if not matchrevs:
685 685 raise error.RepoLookupError(_("no bookmarks exist"
686 686 " that match '%s'") % pattern)
687 687 for bmrev in matchrevs:
688 688 bms.add(repo[bmrev].rev())
689 689 else:
690 690 bms = set([repo[r].rev()
691 691 for r in repo._bookmarks.values()])
692 692 bms -= set([node.nullrev])
693 693 return subset & bms
694 694
695 695 @predicate('branch(string or set)', safe=True)
696 696 def branch(repo, subset, x):
697 697 """
698 698 All changesets belonging to the given branch or the branches of the given
699 699 changesets.
700 700
701 701 If `string` starts with `re:`, the remainder of the name is treated as
702 702 a regular expression. To match a branch that actually starts with `re:`,
703 703 use the prefix `literal:`.
704 704 """
705 705 getbi = repo.revbranchcache().branchinfo
706 706
707 707 try:
708 708 b = getstring(x, '')
709 709 except error.ParseError:
710 710 # not a string, but another revspec, e.g. tip()
711 711 pass
712 712 else:
713 713 kind, pattern, matcher = util.stringmatcher(b)
714 714 if kind == 'literal':
715 715 # note: falls through to the revspec case if no branch with
716 716 # this name exists and pattern kind is not specified explicitly
717 717 if pattern in repo.branchmap():
718 718 return subset.filter(lambda r: matcher(getbi(r)[0]))
719 719 if b.startswith('literal:'):
720 720 raise error.RepoLookupError(_("branch '%s' does not exist")
721 721 % pattern)
722 722 else:
723 723 return subset.filter(lambda r: matcher(getbi(r)[0]))
724 724
725 725 s = getset(repo, fullreposet(repo), x)
726 726 b = set()
727 727 for r in s:
728 728 b.add(getbi(r)[0])
729 729 c = s.__contains__
730 730 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
731 731
732 732 @predicate('bumped()', safe=True)
733 733 def bumped(repo, subset, x):
734 734 """Mutable changesets marked as successors of public changesets.
735 735
736 736 Only non-public and non-obsolete changesets can be `bumped`.
737 737 """
738 738 # i18n: "bumped" is a keyword
739 739 getargs(x, 0, 0, _("bumped takes no arguments"))
740 740 bumped = obsmod.getrevs(repo, 'bumped')
741 741 return subset & bumped
742 742
743 743 @predicate('bundle()', safe=True)
744 744 def bundle(repo, subset, x):
745 745 """Changesets in the bundle.
746 746
747 747 Bundle must be specified by the -R option."""
748 748
749 749 try:
750 750 bundlerevs = repo.changelog.bundlerevs
751 751 except AttributeError:
752 752 raise error.Abort(_("no bundle provided - specify with -R"))
753 753 return subset & bundlerevs
754 754
755 755 def checkstatus(repo, subset, pat, field):
756 756 hasset = matchmod.patkind(pat) == 'set'
757 757
758 758 mcache = [None]
759 759 def matches(x):
760 760 c = repo[x]
761 761 if not mcache[0] or hasset:
762 762 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
763 763 m = mcache[0]
764 764 fname = None
765 765 if not m.anypats() and len(m.files()) == 1:
766 766 fname = m.files()[0]
767 767 if fname is not None:
768 768 if fname not in c.files():
769 769 return False
770 770 else:
771 771 for f in c.files():
772 772 if m(f):
773 773 break
774 774 else:
775 775 return False
776 776 files = repo.status(c.p1().node(), c.node())[field]
777 777 if fname is not None:
778 778 if fname in files:
779 779 return True
780 780 else:
781 781 for f in files:
782 782 if m(f):
783 783 return True
784 784
785 785 return subset.filter(matches)
786 786
787 787 def _children(repo, narrow, parentset):
788 788 if not parentset:
789 789 return baseset()
790 790 cs = set()
791 791 pr = repo.changelog.parentrevs
792 792 minrev = parentset.min()
793 793 for r in narrow:
794 794 if r <= minrev:
795 795 continue
796 796 for p in pr(r):
797 797 if p in parentset:
798 798 cs.add(r)
799 799 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
800 800 # This does not break because of other fullreposet misbehavior.
801 801 return baseset(cs)
802 802
803 803 @predicate('children(set)', safe=True)
804 804 def children(repo, subset, x):
805 805 """Child changesets of changesets in set.
806 806 """
807 807 s = getset(repo, fullreposet(repo), x)
808 808 cs = _children(repo, subset, s)
809 809 return subset & cs
810 810
811 811 @predicate('closed()', safe=True)
812 812 def closed(repo, subset, x):
813 813 """Changeset is closed.
814 814 """
815 815 # i18n: "closed" is a keyword
816 816 getargs(x, 0, 0, _("closed takes no arguments"))
817 817 return subset.filter(lambda r: repo[r].closesbranch())
818 818
819 819 @predicate('contains(pattern)')
820 820 def contains(repo, subset, x):
821 821 """The revision's manifest contains a file matching pattern (but might not
822 822 modify it). See :hg:`help patterns` for information about file patterns.
823 823
824 824 The pattern without explicit kind like ``glob:`` is expected to be
825 825 relative to the current directory and match against a file exactly
826 826 for efficiency.
827 827 """
828 828 # i18n: "contains" is a keyword
829 829 pat = getstring(x, _("contains requires a pattern"))
830 830
831 831 def matches(x):
832 832 if not matchmod.patkind(pat):
833 833 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
834 834 if pats in repo[x]:
835 835 return True
836 836 else:
837 837 c = repo[x]
838 838 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
839 839 for f in c.manifest():
840 840 if m(f):
841 841 return True
842 842 return False
843 843
844 844 return subset.filter(matches)
845 845
846 846 @predicate('converted([id])', safe=True)
847 847 def converted(repo, subset, x):
848 848 """Changesets converted from the given identifier in the old repository if
849 849 present, or all converted changesets if no identifier is specified.
850 850 """
851 851
852 852 # There is exactly no chance of resolving the revision, so do a simple
853 853 # string compare and hope for the best
854 854
855 855 rev = None
856 856 # i18n: "converted" is a keyword
857 857 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
858 858 if l:
859 859 # i18n: "converted" is a keyword
860 860 rev = getstring(l[0], _('converted requires a revision'))
861 861
862 862 def _matchvalue(r):
863 863 source = repo[r].extra().get('convert_revision', None)
864 864 return source is not None and (rev is None or source.startswith(rev))
865 865
866 866 return subset.filter(lambda r: _matchvalue(r))
867 867
868 868 @predicate('date(interval)', safe=True)
869 869 def date(repo, subset, x):
870 870 """Changesets within the interval, see :hg:`help dates`.
871 871 """
872 872 # i18n: "date" is a keyword
873 873 ds = getstring(x, _("date requires a string"))
874 874 dm = util.matchdate(ds)
875 875 return subset.filter(lambda x: dm(repo[x].date()[0]))
876 876
877 877 @predicate('desc(string)', safe=True)
878 878 def desc(repo, subset, x):
879 879 """Search commit message for string. The match is case-insensitive.
880 880 """
881 881 # i18n: "desc" is a keyword
882 882 ds = encoding.lower(getstring(x, _("desc requires a string")))
883 883
884 884 def matches(x):
885 885 c = repo[x]
886 886 return ds in encoding.lower(c.description())
887 887
888 888 return subset.filter(matches)
889 889
890 890 def _descendants(repo, subset, x, followfirst=False):
891 891 roots = getset(repo, fullreposet(repo), x)
892 892 if not roots:
893 893 return baseset()
894 894 s = _revdescendants(repo, roots, followfirst)
895 895
896 896 # Both sets need to be ascending in order to lazily return the union
897 897 # in the correct order.
898 898 base = subset & roots
899 899 desc = subset & s
900 900 result = base + desc
901 901 if subset.isascending():
902 902 result.sort()
903 903 elif subset.isdescending():
904 904 result.sort(reverse=True)
905 905 else:
906 906 result = subset & result
907 907 return result
908 908
909 909 @predicate('descendants(set)', safe=True)
910 910 def descendants(repo, subset, x):
911 911 """Changesets which are descendants of changesets in set.
912 912 """
913 913 return _descendants(repo, subset, x)
914 914
915 915 @predicate('_firstdescendants', safe=True)
916 916 def _firstdescendants(repo, subset, x):
917 917 # ``_firstdescendants(set)``
918 918 # Like ``descendants(set)`` but follows only the first parents.
919 919 return _descendants(repo, subset, x, followfirst=True)
920 920
921 921 @predicate('destination([set])', safe=True)
922 922 def destination(repo, subset, x):
923 923 """Changesets that were created by a graft, transplant or rebase operation,
924 924 with the given revisions specified as the source. Omitting the optional set
925 925 is the same as passing all().
926 926 """
927 927 if x is not None:
928 928 sources = getset(repo, fullreposet(repo), x)
929 929 else:
930 930 sources = fullreposet(repo)
931 931
932 932 dests = set()
933 933
934 934 # subset contains all of the possible destinations that can be returned, so
935 935 # iterate over them and see if their source(s) were provided in the arg set.
936 936 # Even if the immediate src of r is not in the arg set, src's source (or
937 937 # further back) may be. Scanning back further than the immediate src allows
938 938 # transitive transplants and rebases to yield the same results as transitive
939 939 # grafts.
940 940 for r in subset:
941 941 src = _getrevsource(repo, r)
942 942 lineage = None
943 943
944 944 while src is not None:
945 945 if lineage is None:
946 946 lineage = list()
947 947
948 948 lineage.append(r)
949 949
950 950 # The visited lineage is a match if the current source is in the arg
951 951 # set. Since every candidate dest is visited by way of iterating
952 952 # subset, any dests further back in the lineage will be tested by a
953 953 # different iteration over subset. Likewise, if the src was already
954 954 # selected, the current lineage can be selected without going back
955 955 # further.
956 956 if src in sources or src in dests:
957 957 dests.update(lineage)
958 958 break
959 959
960 960 r = src
961 961 src = _getrevsource(repo, r)
962 962
963 963 return subset.filter(dests.__contains__)
964 964
965 965 @predicate('divergent()', safe=True)
966 966 def divergent(repo, subset, x):
967 967 """
968 968 Final successors of changesets with an alternative set of final successors.
969 969 """
970 970 # i18n: "divergent" is a keyword
971 971 getargs(x, 0, 0, _("divergent takes no arguments"))
972 972 divergent = obsmod.getrevs(repo, 'divergent')
973 973 return subset & divergent
974 974
975 975 @predicate('extinct()', safe=True)
976 976 def extinct(repo, subset, x):
977 977 """Obsolete changesets with obsolete descendants only.
978 978 """
979 979 # i18n: "extinct" is a keyword
980 980 getargs(x, 0, 0, _("extinct takes no arguments"))
981 981 extincts = obsmod.getrevs(repo, 'extinct')
982 982 return subset & extincts
983 983
984 984 @predicate('extra(label, [value])', safe=True)
985 985 def extra(repo, subset, x):
986 986 """Changesets with the given label in the extra metadata, with the given
987 987 optional value.
988 988
989 989 If `value` starts with `re:`, the remainder of the value is treated as
990 990 a regular expression. To match a value that actually starts with `re:`,
991 991 use the prefix `literal:`.
992 992 """
993 993 args = getargsdict(x, 'extra', 'label value')
994 994 if 'label' not in args:
995 995 # i18n: "extra" is a keyword
996 996 raise error.ParseError(_('extra takes at least 1 argument'))
997 997 # i18n: "extra" is a keyword
998 998 label = getstring(args['label'], _('first argument to extra must be '
999 999 'a string'))
1000 1000 value = None
1001 1001
1002 1002 if 'value' in args:
1003 1003 # i18n: "extra" is a keyword
1004 1004 value = getstring(args['value'], _('second argument to extra must be '
1005 1005 'a string'))
1006 1006 kind, value, matcher = util.stringmatcher(value)
1007 1007
1008 1008 def _matchvalue(r):
1009 1009 extra = repo[r].extra()
1010 1010 return label in extra and (value is None or matcher(extra[label]))
1011 1011
1012 1012 return subset.filter(lambda r: _matchvalue(r))
1013 1013
1014 1014 @predicate('filelog(pattern)', safe=True)
1015 1015 def filelog(repo, subset, x):
1016 1016 """Changesets connected to the specified filelog.
1017 1017
1018 1018 For performance reasons, visits only revisions mentioned in the file-level
1019 1019 filelog, rather than filtering through all changesets (much faster, but
1020 1020 doesn't include deletes or duplicate changes). For a slower, more accurate
1021 1021 result, use ``file()``.
1022 1022
1023 1023 The pattern without explicit kind like ``glob:`` is expected to be
1024 1024 relative to the current directory and match against a file exactly
1025 1025 for efficiency.
1026 1026
1027 1027 If some linkrev points to revisions filtered by the current repoview, we'll
1028 1028 work around it to return a non-filtered value.
1029 1029 """
1030 1030
1031 1031 # i18n: "filelog" is a keyword
1032 1032 pat = getstring(x, _("filelog requires a pattern"))
1033 1033 s = set()
1034 1034 cl = repo.changelog
1035 1035
1036 1036 if not matchmod.patkind(pat):
1037 1037 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1038 1038 files = [f]
1039 1039 else:
1040 1040 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1041 1041 files = (f for f in repo[None] if m(f))
1042 1042
1043 1043 for f in files:
1044 1044 fl = repo.file(f)
1045 1045 known = {}
1046 1046 scanpos = 0
1047 1047 for fr in list(fl):
1048 1048 fn = fl.node(fr)
1049 1049 if fn in known:
1050 1050 s.add(known[fn])
1051 1051 continue
1052 1052
1053 1053 lr = fl.linkrev(fr)
1054 1054 if lr in cl:
1055 1055 s.add(lr)
1056 1056 elif scanpos is not None:
1057 1057 # lowest matching changeset is filtered, scan further
1058 1058 # ahead in changelog
1059 1059 start = max(lr, scanpos) + 1
1060 1060 scanpos = None
1061 1061 for r in cl.revs(start):
1062 1062 # minimize parsing of non-matching entries
1063 1063 if f in cl.revision(r) and f in cl.readfiles(r):
1064 1064 try:
1065 1065 # try to use manifest delta fastpath
1066 1066 n = repo[r].filenode(f)
1067 1067 if n not in known:
1068 1068 if n == fn:
1069 1069 s.add(r)
1070 1070 scanpos = r
1071 1071 break
1072 1072 else:
1073 1073 known[n] = r
1074 1074 except error.ManifestLookupError:
1075 1075 # deletion in changelog
1076 1076 continue
1077 1077
1078 1078 return subset & s
1079 1079
1080 1080 @predicate('first(set, [n])', safe=True)
1081 1081 def first(repo, subset, x):
1082 1082 """An alias for limit().
1083 1083 """
1084 1084 return limit(repo, subset, x)
1085 1085
1086 1086 def _follow(repo, subset, x, name, followfirst=False):
1087 1087 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1088 1088 c = repo['.']
1089 1089 if l:
1090 1090 x = getstring(l[0], _("%s expected a pattern") % name)
1091 1091 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1092 1092 ctx=repo[None], default='path')
1093 1093
1094 1094 files = c.manifest().walk(matcher)
1095 1095
1096 1096 s = set()
1097 1097 for fname in files:
1098 1098 fctx = c[fname]
1099 1099 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1100 1100 # include the revision responsible for the most recent version
1101 1101 s.add(fctx.introrev())
1102 1102 else:
1103 1103 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1104 1104
1105 1105 return subset & s
1106 1106
1107 1107 @predicate('follow([pattern])', safe=True)
1108 1108 def follow(repo, subset, x):
1109 1109 """
1110 1110 An alias for ``::.`` (ancestors of the working directory's first parent).
1111 1111 If pattern is specified, the histories of files matching given
1112 1112 pattern is followed, including copies.
1113 1113 """
1114 1114 return _follow(repo, subset, x, 'follow')
1115 1115
1116 1116 @predicate('_followfirst', safe=True)
1117 1117 def _followfirst(repo, subset, x):
1118 1118 # ``followfirst([pattern])``
1119 1119 # Like ``follow([pattern])`` but follows only the first parent of
1120 1120 # every revisions or files revisions.
1121 1121 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1122 1122
1123 1123 @predicate('all()', safe=True)
1124 1124 def getall(repo, subset, x):
1125 1125 """All changesets, the same as ``0:tip``.
1126 1126 """
1127 1127 # i18n: "all" is a keyword
1128 1128 getargs(x, 0, 0, _("all takes no arguments"))
1129 1129 return subset & spanset(repo) # drop "null" if any
1130 1130
1131 1131 @predicate('grep(regex)')
1132 1132 def grep(repo, subset, x):
1133 1133 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1134 1134 to ensure special escape characters are handled correctly. Unlike
1135 1135 ``keyword(string)``, the match is case-sensitive.
1136 1136 """
1137 1137 try:
1138 1138 # i18n: "grep" is a keyword
1139 1139 gr = re.compile(getstring(x, _("grep requires a string")))
1140 1140 except re.error as e:
1141 1141 raise error.ParseError(_('invalid match pattern: %s') % e)
1142 1142
1143 1143 def matches(x):
1144 1144 c = repo[x]
1145 1145 for e in c.files() + [c.user(), c.description()]:
1146 1146 if gr.search(e):
1147 1147 return True
1148 1148 return False
1149 1149
1150 1150 return subset.filter(matches)
1151 1151
1152 1152 @predicate('_matchfiles', safe=True)
1153 1153 def _matchfiles(repo, subset, x):
1154 1154 # _matchfiles takes a revset list of prefixed arguments:
1155 1155 #
1156 1156 # [p:foo, i:bar, x:baz]
1157 1157 #
1158 1158 # builds a match object from them and filters subset. Allowed
1159 1159 # prefixes are 'p:' for regular patterns, 'i:' for include
1160 1160 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1161 1161 # a revision identifier, or the empty string to reference the
1162 1162 # working directory, from which the match object is
1163 1163 # initialized. Use 'd:' to set the default matching mode, default
1164 1164 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1165 1165
1166 1166 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1167 1167 pats, inc, exc = [], [], []
1168 1168 rev, default = None, None
1169 1169 for arg in l:
1170 1170 s = getstring(arg, "_matchfiles requires string arguments")
1171 1171 prefix, value = s[:2], s[2:]
1172 1172 if prefix == 'p:':
1173 1173 pats.append(value)
1174 1174 elif prefix == 'i:':
1175 1175 inc.append(value)
1176 1176 elif prefix == 'x:':
1177 1177 exc.append(value)
1178 1178 elif prefix == 'r:':
1179 1179 if rev is not None:
1180 1180 raise error.ParseError('_matchfiles expected at most one '
1181 1181 'revision')
1182 1182 if value != '': # empty means working directory; leave rev as None
1183 1183 rev = value
1184 1184 elif prefix == 'd:':
1185 1185 if default is not None:
1186 1186 raise error.ParseError('_matchfiles expected at most one '
1187 1187 'default mode')
1188 1188 default = value
1189 1189 else:
1190 1190 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1191 1191 if not default:
1192 1192 default = 'glob'
1193 1193
1194 1194 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1195 1195 exclude=exc, ctx=repo[rev], default=default)
1196 1196
1197 1197 # This directly read the changelog data as creating changectx for all
1198 1198 # revisions is quite expensive.
1199 1199 getfiles = repo.changelog.readfiles
1200 1200 wdirrev = node.wdirrev
1201 1201 def matches(x):
1202 1202 if x == wdirrev:
1203 1203 files = repo[x].files()
1204 1204 else:
1205 1205 files = getfiles(x)
1206 1206 for f in files:
1207 1207 if m(f):
1208 1208 return True
1209 1209 return False
1210 1210
1211 1211 return subset.filter(matches)
1212 1212
1213 1213 @predicate('file(pattern)', safe=True)
1214 1214 def hasfile(repo, subset, x):
1215 1215 """Changesets affecting files matched by pattern.
1216 1216
1217 1217 For a faster but less accurate result, consider using ``filelog()``
1218 1218 instead.
1219 1219
1220 1220 This predicate uses ``glob:`` as the default kind of pattern.
1221 1221 """
1222 1222 # i18n: "file" is a keyword
1223 1223 pat = getstring(x, _("file requires a pattern"))
1224 1224 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1225 1225
1226 1226 @predicate('head()', safe=True)
1227 1227 def head(repo, subset, x):
1228 1228 """Changeset is a named branch head.
1229 1229 """
1230 1230 # i18n: "head" is a keyword
1231 1231 getargs(x, 0, 0, _("head takes no arguments"))
1232 1232 hs = set()
1233 1233 cl = repo.changelog
1234 1234 for b, ls in repo.branchmap().iteritems():
1235 1235 hs.update(cl.rev(h) for h in ls)
1236 1236 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1237 1237 # This does not break because of other fullreposet misbehavior.
1238 1238 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1239 1239 # necessary to ensure we preserve the order in subset.
1240 1240 return baseset(hs) & subset
1241 1241
1242 1242 @predicate('heads(set)', safe=True)
1243 1243 def heads(repo, subset, x):
1244 1244 """Members of set with no children in set.
1245 1245 """
1246 1246 s = getset(repo, subset, x)
1247 1247 ps = parents(repo, subset, x)
1248 1248 return s - ps
1249 1249
1250 1250 @predicate('hidden()', safe=True)
1251 1251 def hidden(repo, subset, x):
1252 1252 """Hidden changesets.
1253 1253 """
1254 1254 # i18n: "hidden" is a keyword
1255 1255 getargs(x, 0, 0, _("hidden takes no arguments"))
1256 1256 hiddenrevs = repoview.filterrevs(repo, 'visible')
1257 1257 return subset & hiddenrevs
1258 1258
1259 1259 @predicate('keyword(string)', safe=True)
1260 1260 def keyword(repo, subset, x):
1261 1261 """Search commit message, user name, and names of changed files for
1262 1262 string. The match is case-insensitive.
1263 1263 """
1264 1264 # i18n: "keyword" is a keyword
1265 1265 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1266 1266
1267 1267 def matches(r):
1268 1268 c = repo[r]
1269 1269 return any(kw in encoding.lower(t)
1270 1270 for t in c.files() + [c.user(), c.description()])
1271 1271
1272 1272 return subset.filter(matches)
1273 1273
1274 1274 @predicate('limit(set[, n[, offset]])', safe=True)
1275 1275 def limit(repo, subset, x):
1276 1276 """First n members of set, defaulting to 1, starting from offset.
1277 1277 """
1278 1278 args = getargsdict(x, 'limit', 'set n offset')
1279 1279 if 'set' not in args:
1280 1280 # i18n: "limit" is a keyword
1281 1281 raise error.ParseError(_("limit requires one to three arguments"))
1282 1282 try:
1283 1283 lim, ofs = 1, 0
1284 1284 if 'n' in args:
1285 1285 # i18n: "limit" is a keyword
1286 1286 lim = int(getstring(args['n'], _("limit requires a number")))
1287 1287 if 'offset' in args:
1288 1288 # i18n: "limit" is a keyword
1289 1289 ofs = int(getstring(args['offset'], _("limit requires a number")))
1290 1290 if ofs < 0:
1291 1291 raise error.ParseError(_("negative offset"))
1292 1292 except (TypeError, ValueError):
1293 1293 # i18n: "limit" is a keyword
1294 1294 raise error.ParseError(_("limit expects a number"))
1295 1295 os = getset(repo, fullreposet(repo), args['set'])
1296 1296 result = []
1297 1297 it = iter(os)
1298 1298 for x in xrange(ofs):
1299 1299 y = next(it, None)
1300 1300 if y is None:
1301 1301 break
1302 1302 for x in xrange(lim):
1303 1303 y = next(it, None)
1304 1304 if y is None:
1305 1305 break
1306 1306 elif y in subset:
1307 1307 result.append(y)
1308 1308 return baseset(result)
1309 1309
1310 1310 @predicate('last(set, [n])', safe=True)
1311 1311 def last(repo, subset, x):
1312 1312 """Last n members of set, defaulting to 1.
1313 1313 """
1314 1314 # i18n: "last" is a keyword
1315 1315 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1316 1316 try:
1317 1317 lim = 1
1318 1318 if len(l) == 2:
1319 1319 # i18n: "last" is a keyword
1320 1320 lim = int(getstring(l[1], _("last requires a number")))
1321 1321 except (TypeError, ValueError):
1322 1322 # i18n: "last" is a keyword
1323 1323 raise error.ParseError(_("last expects a number"))
1324 1324 os = getset(repo, fullreposet(repo), l[0])
1325 1325 os.reverse()
1326 1326 result = []
1327 1327 it = iter(os)
1328 1328 for x in xrange(lim):
1329 1329 y = next(it, None)
1330 1330 if y is None:
1331 1331 break
1332 1332 elif y in subset:
1333 1333 result.append(y)
1334 1334 return baseset(result)
1335 1335
1336 1336 @predicate('max(set)', safe=True)
1337 1337 def maxrev(repo, subset, x):
1338 1338 """Changeset with highest revision number in set.
1339 1339 """
1340 1340 os = getset(repo, fullreposet(repo), x)
1341 1341 try:
1342 1342 m = os.max()
1343 1343 if m in subset:
1344 1344 return baseset([m])
1345 1345 except ValueError:
1346 1346 # os.max() throws a ValueError when the collection is empty.
1347 1347 # Same as python's max().
1348 1348 pass
1349 1349 return baseset()
1350 1350
1351 1351 @predicate('merge()', safe=True)
1352 1352 def merge(repo, subset, x):
1353 1353 """Changeset is a merge changeset.
1354 1354 """
1355 1355 # i18n: "merge" is a keyword
1356 1356 getargs(x, 0, 0, _("merge takes no arguments"))
1357 1357 cl = repo.changelog
1358 1358 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1359 1359
1360 1360 @predicate('branchpoint()', safe=True)
1361 1361 def branchpoint(repo, subset, x):
1362 1362 """Changesets with more than one child.
1363 1363 """
1364 1364 # i18n: "branchpoint" is a keyword
1365 1365 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1366 1366 cl = repo.changelog
1367 1367 if not subset:
1368 1368 return baseset()
1369 1369 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1370 1370 # (and if it is not, it should.)
1371 1371 baserev = min(subset)
1372 1372 parentscount = [0]*(len(repo) - baserev)
1373 1373 for r in cl.revs(start=baserev + 1):
1374 1374 for p in cl.parentrevs(r):
1375 1375 if p >= baserev:
1376 1376 parentscount[p - baserev] += 1
1377 1377 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1378 1378
1379 1379 @predicate('min(set)', safe=True)
1380 1380 def minrev(repo, subset, x):
1381 1381 """Changeset with lowest revision number in set.
1382 1382 """
1383 1383 os = getset(repo, fullreposet(repo), x)
1384 1384 try:
1385 1385 m = os.min()
1386 1386 if m in subset:
1387 1387 return baseset([m])
1388 1388 except ValueError:
1389 1389 # os.min() throws a ValueError when the collection is empty.
1390 1390 # Same as python's min().
1391 1391 pass
1392 1392 return baseset()
1393 1393
1394 1394 @predicate('modifies(pattern)', safe=True)
1395 1395 def modifies(repo, subset, x):
1396 1396 """Changesets modifying files matched by pattern.
1397 1397
1398 1398 The pattern without explicit kind like ``glob:`` is expected to be
1399 1399 relative to the current directory and match against a file or a
1400 1400 directory.
1401 1401 """
1402 1402 # i18n: "modifies" is a keyword
1403 1403 pat = getstring(x, _("modifies requires a pattern"))
1404 1404 return checkstatus(repo, subset, pat, 0)
1405 1405
1406 1406 @predicate('named(namespace)')
1407 1407 def named(repo, subset, x):
1408 1408 """The changesets in a given namespace.
1409 1409
1410 1410 If `namespace` starts with `re:`, the remainder of the string is treated as
1411 1411 a regular expression. To match a namespace that actually starts with `re:`,
1412 1412 use the prefix `literal:`.
1413 1413 """
1414 1414 # i18n: "named" is a keyword
1415 1415 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1416 1416
1417 1417 ns = getstring(args[0],
1418 1418 # i18n: "named" is a keyword
1419 1419 _('the argument to named must be a string'))
1420 1420 kind, pattern, matcher = util.stringmatcher(ns)
1421 1421 namespaces = set()
1422 1422 if kind == 'literal':
1423 1423 if pattern not in repo.names:
1424 1424 raise error.RepoLookupError(_("namespace '%s' does not exist")
1425 1425 % ns)
1426 1426 namespaces.add(repo.names[pattern])
1427 1427 else:
1428 1428 for name, ns in repo.names.iteritems():
1429 1429 if matcher(name):
1430 1430 namespaces.add(ns)
1431 1431 if not namespaces:
1432 1432 raise error.RepoLookupError(_("no namespace exists"
1433 1433 " that match '%s'") % pattern)
1434 1434
1435 1435 names = set()
1436 1436 for ns in namespaces:
1437 1437 for name in ns.listnames(repo):
1438 1438 if name not in ns.deprecated:
1439 1439 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1440 1440
1441 1441 names -= set([node.nullrev])
1442 1442 return subset & names
1443 1443
1444 1444 @predicate('id(string)', safe=True)
1445 1445 def node_(repo, subset, x):
1446 1446 """Revision non-ambiguously specified by the given hex string prefix.
1447 1447 """
1448 1448 # i18n: "id" is a keyword
1449 1449 l = getargs(x, 1, 1, _("id requires one argument"))
1450 1450 # i18n: "id" is a keyword
1451 1451 n = getstring(l[0], _("id requires a string"))
1452 1452 if len(n) == 40:
1453 1453 try:
1454 1454 rn = repo.changelog.rev(node.bin(n))
1455 1455 except (LookupError, TypeError):
1456 1456 rn = None
1457 1457 else:
1458 1458 rn = None
1459 1459 pm = repo.changelog._partialmatch(n)
1460 1460 if pm is not None:
1461 1461 rn = repo.changelog.rev(pm)
1462 1462
1463 1463 if rn is None:
1464 1464 return baseset()
1465 1465 result = baseset([rn])
1466 1466 return result & subset
1467 1467
1468 1468 @predicate('obsolete()', safe=True)
1469 1469 def obsolete(repo, subset, x):
1470 1470 """Mutable changeset with a newer version."""
1471 1471 # i18n: "obsolete" is a keyword
1472 1472 getargs(x, 0, 0, _("obsolete takes no arguments"))
1473 1473 obsoletes = obsmod.getrevs(repo, 'obsolete')
1474 1474 return subset & obsoletes
1475 1475
1476 1476 @predicate('only(set, [set])', safe=True)
1477 1477 def only(repo, subset, x):
1478 1478 """Changesets that are ancestors of the first set that are not ancestors
1479 1479 of any other head in the repo. If a second set is specified, the result
1480 1480 is ancestors of the first set that are not ancestors of the second set
1481 1481 (i.e. ::<set1> - ::<set2>).
1482 1482 """
1483 1483 cl = repo.changelog
1484 1484 # i18n: "only" is a keyword
1485 1485 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1486 1486 include = getset(repo, fullreposet(repo), args[0])
1487 1487 if len(args) == 1:
1488 1488 if not include:
1489 1489 return baseset()
1490 1490
1491 1491 descendants = set(_revdescendants(repo, include, False))
1492 1492 exclude = [rev for rev in cl.headrevs()
1493 1493 if not rev in descendants and not rev in include]
1494 1494 else:
1495 1495 exclude = getset(repo, fullreposet(repo), args[1])
1496 1496
1497 1497 results = set(cl.findmissingrevs(common=exclude, heads=include))
1498 1498 # XXX we should turn this into a baseset instead of a set, smartset may do
1499 1499 # some optimisations from the fact this is a baseset.
1500 1500 return subset & results
1501 1501
1502 1502 @predicate('origin([set])', safe=True)
1503 1503 def origin(repo, subset, x):
1504 1504 """
1505 1505 Changesets that were specified as a source for the grafts, transplants or
1506 1506 rebases that created the given revisions. Omitting the optional set is the
1507 1507 same as passing all(). If a changeset created by these operations is itself
1508 1508 specified as a source for one of these operations, only the source changeset
1509 1509 for the first operation is selected.
1510 1510 """
1511 1511 if x is not None:
1512 1512 dests = getset(repo, fullreposet(repo), x)
1513 1513 else:
1514 1514 dests = fullreposet(repo)
1515 1515
1516 1516 def _firstsrc(rev):
1517 1517 src = _getrevsource(repo, rev)
1518 1518 if src is None:
1519 1519 return None
1520 1520
1521 1521 while True:
1522 1522 prev = _getrevsource(repo, src)
1523 1523
1524 1524 if prev is None:
1525 1525 return src
1526 1526 src = prev
1527 1527
1528 1528 o = set([_firstsrc(r) for r in dests])
1529 1529 o -= set([None])
1530 1530 # XXX we should turn this into a baseset instead of a set, smartset may do
1531 1531 # some optimisations from the fact this is a baseset.
1532 1532 return subset & o
1533 1533
1534 1534 @predicate('outgoing([path])', safe=True)
1535 1535 def outgoing(repo, subset, x):
1536 1536 """Changesets not found in the specified destination repository, or the
1537 1537 default push location.
1538 1538 """
1539 1539 # Avoid cycles.
1540 1540 from . import (
1541 1541 discovery,
1542 1542 hg,
1543 1543 )
1544 1544 # i18n: "outgoing" is a keyword
1545 1545 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1546 1546 # i18n: "outgoing" is a keyword
1547 1547 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1548 1548 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1549 1549 dest, branches = hg.parseurl(dest)
1550 1550 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1551 1551 if revs:
1552 1552 revs = [repo.lookup(rev) for rev in revs]
1553 1553 other = hg.peer(repo, {}, dest)
1554 1554 repo.ui.pushbuffer()
1555 1555 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1556 1556 repo.ui.popbuffer()
1557 1557 cl = repo.changelog
1558 1558 o = set([cl.rev(r) for r in outgoing.missing])
1559 1559 return subset & o
1560 1560
1561 1561 @predicate('p1([set])', safe=True)
1562 1562 def p1(repo, subset, x):
1563 1563 """First parent of changesets in set, or the working directory.
1564 1564 """
1565 1565 if x is None:
1566 1566 p = repo[x].p1().rev()
1567 1567 if p >= 0:
1568 1568 return subset & baseset([p])
1569 1569 return baseset()
1570 1570
1571 1571 ps = set()
1572 1572 cl = repo.changelog
1573 1573 for r in getset(repo, fullreposet(repo), x):
1574 1574 ps.add(cl.parentrevs(r)[0])
1575 1575 ps -= set([node.nullrev])
1576 1576 # XXX we should turn this into a baseset instead of a set, smartset may do
1577 1577 # some optimisations from the fact this is a baseset.
1578 1578 return subset & ps
1579 1579
1580 1580 @predicate('p2([set])', safe=True)
1581 1581 def p2(repo, subset, x):
1582 1582 """Second parent of changesets in set, or the working directory.
1583 1583 """
1584 1584 if x is None:
1585 1585 ps = repo[x].parents()
1586 1586 try:
1587 1587 p = ps[1].rev()
1588 1588 if p >= 0:
1589 1589 return subset & baseset([p])
1590 1590 return baseset()
1591 1591 except IndexError:
1592 1592 return baseset()
1593 1593
1594 1594 ps = set()
1595 1595 cl = repo.changelog
1596 1596 for r in getset(repo, fullreposet(repo), x):
1597 1597 ps.add(cl.parentrevs(r)[1])
1598 1598 ps -= set([node.nullrev])
1599 1599 # XXX we should turn this into a baseset instead of a set, smartset may do
1600 1600 # some optimisations from the fact this is a baseset.
1601 1601 return subset & ps
1602 1602
1603 1603 @predicate('parents([set])', safe=True)
1604 1604 def parents(repo, subset, x):
1605 1605 """
1606 1606 The set of all parents for all changesets in set, or the working directory.
1607 1607 """
1608 1608 if x is None:
1609 1609 ps = set(p.rev() for p in repo[x].parents())
1610 1610 else:
1611 1611 ps = set()
1612 1612 cl = repo.changelog
1613 1613 up = ps.update
1614 1614 parentrevs = cl.parentrevs
1615 1615 for r in getset(repo, fullreposet(repo), x):
1616 1616 if r == node.wdirrev:
1617 1617 up(p.rev() for p in repo[r].parents())
1618 1618 else:
1619 1619 up(parentrevs(r))
1620 1620 ps -= set([node.nullrev])
1621 1621 return subset & ps
1622 1622
1623 1623 def _phase(repo, subset, target):
1624 1624 """helper to select all rev in phase <target>"""
1625 1625 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1626 1626 if repo._phasecache._phasesets:
1627 1627 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1628 1628 s = baseset(s)
1629 1629 s.sort() # set are non ordered, so we enforce ascending
1630 1630 return subset & s
1631 1631 else:
1632 1632 phase = repo._phasecache.phase
1633 1633 condition = lambda r: phase(repo, r) == target
1634 1634 return subset.filter(condition, cache=False)
1635 1635
1636 1636 @predicate('draft()', safe=True)
1637 1637 def draft(repo, subset, x):
1638 1638 """Changeset in draft phase."""
1639 1639 # i18n: "draft" is a keyword
1640 1640 getargs(x, 0, 0, _("draft takes no arguments"))
1641 1641 target = phases.draft
1642 1642 return _phase(repo, subset, target)
1643 1643
1644 1644 @predicate('secret()', safe=True)
1645 1645 def secret(repo, subset, x):
1646 1646 """Changeset in secret phase."""
1647 1647 # i18n: "secret" is a keyword
1648 1648 getargs(x, 0, 0, _("secret takes no arguments"))
1649 1649 target = phases.secret
1650 1650 return _phase(repo, subset, target)
1651 1651
1652 1652 def parentspec(repo, subset, x, n):
1653 1653 """``set^0``
1654 1654 The set.
1655 1655 ``set^1`` (or ``set^``), ``set^2``
1656 1656 First or second parent, respectively, of all changesets in set.
1657 1657 """
1658 1658 try:
1659 1659 n = int(n[1])
1660 1660 if n not in (0, 1, 2):
1661 1661 raise ValueError
1662 1662 except (TypeError, ValueError):
1663 1663 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1664 1664 ps = set()
1665 1665 cl = repo.changelog
1666 1666 for r in getset(repo, fullreposet(repo), x):
1667 1667 if n == 0:
1668 1668 ps.add(r)
1669 1669 elif n == 1:
1670 1670 ps.add(cl.parentrevs(r)[0])
1671 1671 elif n == 2:
1672 1672 parents = cl.parentrevs(r)
1673 1673 if len(parents) > 1:
1674 1674 ps.add(parents[1])
1675 1675 return subset & ps
1676 1676
1677 1677 @predicate('present(set)', safe=True)
1678 1678 def present(repo, subset, x):
1679 1679 """An empty set, if any revision in set isn't found; otherwise,
1680 1680 all revisions in set.
1681 1681
1682 1682 If any of specified revisions is not present in the local repository,
1683 1683 the query is normally aborted. But this predicate allows the query
1684 1684 to continue even in such cases.
1685 1685 """
1686 1686 try:
1687 1687 return getset(repo, subset, x)
1688 1688 except error.RepoLookupError:
1689 1689 return baseset()
1690 1690
1691 1691 # for internal use
1692 1692 @predicate('_notpublic', safe=True)
1693 1693 def _notpublic(repo, subset, x):
1694 1694 getargs(x, 0, 0, "_notpublic takes no arguments")
1695 1695 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1696 1696 if repo._phasecache._phasesets:
1697 1697 s = set()
1698 1698 for u in repo._phasecache._phasesets[1:]:
1699 1699 s.update(u)
1700 1700 s = baseset(s - repo.changelog.filteredrevs)
1701 1701 s.sort()
1702 1702 return subset & s
1703 1703 else:
1704 1704 phase = repo._phasecache.phase
1705 1705 target = phases.public
1706 1706 condition = lambda r: phase(repo, r) != target
1707 1707 return subset.filter(condition, cache=False)
1708 1708
1709 1709 @predicate('public()', safe=True)
1710 1710 def public(repo, subset, x):
1711 1711 """Changeset in public phase."""
1712 1712 # i18n: "public" is a keyword
1713 1713 getargs(x, 0, 0, _("public takes no arguments"))
1714 1714 phase = repo._phasecache.phase
1715 1715 target = phases.public
1716 1716 condition = lambda r: phase(repo, r) == target
1717 1717 return subset.filter(condition, cache=False)
1718 1718
1719 1719 @predicate('remote([id [,path]])', safe=True)
1720 1720 def remote(repo, subset, x):
1721 1721 """Local revision that corresponds to the given identifier in a
1722 1722 remote repository, if present. Here, the '.' identifier is a
1723 1723 synonym for the current local branch.
1724 1724 """
1725 1725
1726 1726 from . import hg # avoid start-up nasties
1727 1727 # i18n: "remote" is a keyword
1728 1728 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1729 1729
1730 1730 q = '.'
1731 1731 if len(l) > 0:
1732 1732 # i18n: "remote" is a keyword
1733 1733 q = getstring(l[0], _("remote requires a string id"))
1734 1734 if q == '.':
1735 1735 q = repo['.'].branch()
1736 1736
1737 1737 dest = ''
1738 1738 if len(l) > 1:
1739 1739 # i18n: "remote" is a keyword
1740 1740 dest = getstring(l[1], _("remote requires a repository path"))
1741 1741 dest = repo.ui.expandpath(dest or 'default')
1742 1742 dest, branches = hg.parseurl(dest)
1743 1743 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1744 1744 if revs:
1745 1745 revs = [repo.lookup(rev) for rev in revs]
1746 1746 other = hg.peer(repo, {}, dest)
1747 1747 n = other.lookup(q)
1748 1748 if n in repo:
1749 1749 r = repo[n].rev()
1750 1750 if r in subset:
1751 1751 return baseset([r])
1752 1752 return baseset()
1753 1753
1754 1754 @predicate('removes(pattern)', safe=True)
1755 1755 def removes(repo, subset, x):
1756 1756 """Changesets which remove files matching pattern.
1757 1757
1758 1758 The pattern without explicit kind like ``glob:`` is expected to be
1759 1759 relative to the current directory and match against a file or a
1760 1760 directory.
1761 1761 """
1762 1762 # i18n: "removes" is a keyword
1763 1763 pat = getstring(x, _("removes requires a pattern"))
1764 1764 return checkstatus(repo, subset, pat, 2)
1765 1765
1766 1766 @predicate('rev(number)', safe=True)
1767 1767 def rev(repo, subset, x):
1768 1768 """Revision with the given numeric identifier.
1769 1769 """
1770 1770 # i18n: "rev" is a keyword
1771 1771 l = getargs(x, 1, 1, _("rev requires one argument"))
1772 1772 try:
1773 1773 # i18n: "rev" is a keyword
1774 1774 l = int(getstring(l[0], _("rev requires a number")))
1775 1775 except (TypeError, ValueError):
1776 1776 # i18n: "rev" is a keyword
1777 1777 raise error.ParseError(_("rev expects a number"))
1778 1778 if l not in repo.changelog and l != node.nullrev:
1779 1779 return baseset()
1780 1780 return subset & baseset([l])
1781 1781
1782 1782 @predicate('matching(revision [, field])', safe=True)
1783 1783 def matching(repo, subset, x):
1784 1784 """Changesets in which a given set of fields match the set of fields in the
1785 1785 selected revision or set.
1786 1786
1787 1787 To match more than one field pass the list of fields to match separated
1788 1788 by spaces (e.g. ``author description``).
1789 1789
1790 1790 Valid fields are most regular revision fields and some special fields.
1791 1791
1792 1792 Regular revision fields are ``description``, ``author``, ``branch``,
1793 1793 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1794 1794 and ``diff``.
1795 1795 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1796 1796 contents of the revision. Two revisions matching their ``diff`` will
1797 1797 also match their ``files``.
1798 1798
1799 1799 Special fields are ``summary`` and ``metadata``:
1800 1800 ``summary`` matches the first line of the description.
1801 1801 ``metadata`` is equivalent to matching ``description user date``
1802 1802 (i.e. it matches the main metadata fields).
1803 1803
1804 1804 ``metadata`` is the default field which is used when no fields are
1805 1805 specified. You can match more than one field at a time.
1806 1806 """
1807 1807 # i18n: "matching" is a keyword
1808 1808 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1809 1809
1810 1810 revs = getset(repo, fullreposet(repo), l[0])
1811 1811
1812 1812 fieldlist = ['metadata']
1813 1813 if len(l) > 1:
1814 1814 fieldlist = getstring(l[1],
1815 1815 # i18n: "matching" is a keyword
1816 1816 _("matching requires a string "
1817 1817 "as its second argument")).split()
1818 1818
1819 1819 # Make sure that there are no repeated fields,
1820 1820 # expand the 'special' 'metadata' field type
1821 1821 # and check the 'files' whenever we check the 'diff'
1822 1822 fields = []
1823 1823 for field in fieldlist:
1824 1824 if field == 'metadata':
1825 1825 fields += ['user', 'description', 'date']
1826 1826 elif field == 'diff':
1827 1827 # a revision matching the diff must also match the files
1828 1828 # since matching the diff is very costly, make sure to
1829 1829 # also match the files first
1830 1830 fields += ['files', 'diff']
1831 1831 else:
1832 1832 if field == 'author':
1833 1833 field = 'user'
1834 1834 fields.append(field)
1835 1835 fields = set(fields)
1836 1836 if 'summary' in fields and 'description' in fields:
1837 1837 # If a revision matches its description it also matches its summary
1838 1838 fields.discard('summary')
1839 1839
1840 1840 # We may want to match more than one field
1841 1841 # Not all fields take the same amount of time to be matched
1842 1842 # Sort the selected fields in order of increasing matching cost
1843 1843 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1844 1844 'files', 'description', 'substate', 'diff']
1845 1845 def fieldkeyfunc(f):
1846 1846 try:
1847 1847 return fieldorder.index(f)
1848 1848 except ValueError:
1849 1849 # assume an unknown field is very costly
1850 1850 return len(fieldorder)
1851 1851 fields = list(fields)
1852 1852 fields.sort(key=fieldkeyfunc)
1853 1853
1854 1854 # Each field will be matched with its own "getfield" function
1855 1855 # which will be added to the getfieldfuncs array of functions
1856 1856 getfieldfuncs = []
1857 1857 _funcs = {
1858 1858 'user': lambda r: repo[r].user(),
1859 1859 'branch': lambda r: repo[r].branch(),
1860 1860 'date': lambda r: repo[r].date(),
1861 1861 'description': lambda r: repo[r].description(),
1862 1862 'files': lambda r: repo[r].files(),
1863 1863 'parents': lambda r: repo[r].parents(),
1864 1864 'phase': lambda r: repo[r].phase(),
1865 1865 'substate': lambda r: repo[r].substate,
1866 1866 'summary': lambda r: repo[r].description().splitlines()[0],
1867 1867 'diff': lambda r: list(repo[r].diff(git=True),)
1868 1868 }
1869 1869 for info in fields:
1870 1870 getfield = _funcs.get(info, None)
1871 1871 if getfield is None:
1872 1872 raise error.ParseError(
1873 1873 # i18n: "matching" is a keyword
1874 1874 _("unexpected field name passed to matching: %s") % info)
1875 1875 getfieldfuncs.append(getfield)
1876 1876 # convert the getfield array of functions into a "getinfo" function
1877 1877 # which returns an array of field values (or a single value if there
1878 1878 # is only one field to match)
1879 1879 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1880 1880
1881 1881 def matches(x):
1882 1882 for rev in revs:
1883 1883 target = getinfo(rev)
1884 1884 match = True
1885 1885 for n, f in enumerate(getfieldfuncs):
1886 1886 if target[n] != f(x):
1887 1887 match = False
1888 1888 if match:
1889 1889 return True
1890 1890 return False
1891 1891
1892 1892 return subset.filter(matches)
1893 1893
1894 1894 @predicate('reverse(set)', safe=True)
1895 1895 def reverse(repo, subset, x):
1896 1896 """Reverse order of set.
1897 1897 """
1898 1898 l = getset(repo, subset, x)
1899 1899 l.reverse()
1900 1900 return l
1901 1901
1902 1902 @predicate('roots(set)', safe=True)
1903 1903 def roots(repo, subset, x):
1904 1904 """Changesets in set with no parent changeset in set.
1905 1905 """
1906 1906 s = getset(repo, fullreposet(repo), x)
1907 1907 parents = repo.changelog.parentrevs
1908 1908 def filter(r):
1909 1909 for p in parents(r):
1910 1910 if 0 <= p and p in s:
1911 1911 return False
1912 1912 return True
1913 1913 return subset & s.filter(filter)
1914 1914
1915 1915 @predicate('sort(set[, [-]key...])', safe=True)
1916 1916 def sort(repo, subset, x):
1917 1917 """Sort set by keys. The default sort order is ascending, specify a key
1918 1918 as ``-key`` to sort in descending order.
1919 1919
1920 1920 The keys can be:
1921 1921
1922 1922 - ``rev`` for the revision number,
1923 1923 - ``branch`` for the branch name,
1924 1924 - ``desc`` for the commit message (description),
1925 1925 - ``user`` for user name (``author`` can be used as an alias),
1926 1926 - ``date`` for the commit date
1927 1927 """
1928 1928 # i18n: "sort" is a keyword
1929 1929 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1930 1930 keys = "rev"
1931 1931 if len(l) == 2:
1932 1932 # i18n: "sort" is a keyword
1933 1933 keys = getstring(l[1], _("sort spec must be a string"))
1934 1934
1935 1935 s = l[0]
1936 1936 keys = keys.split()
1937 1937 l = []
1938 1938 def invert(s):
1939 1939 return "".join(chr(255 - ord(c)) for c in s)
1940 1940 revs = getset(repo, subset, s)
1941 1941 if keys == ["rev"]:
1942 1942 revs.sort()
1943 1943 return revs
1944 1944 elif keys == ["-rev"]:
1945 1945 revs.sort(reverse=True)
1946 1946 return revs
1947 1947 for r in revs:
1948 1948 c = repo[r]
1949 1949 e = []
1950 1950 for k in keys:
1951 1951 if k == 'rev':
1952 1952 e.append(r)
1953 1953 elif k == '-rev':
1954 1954 e.append(-r)
1955 1955 elif k == 'branch':
1956 1956 e.append(c.branch())
1957 1957 elif k == '-branch':
1958 1958 e.append(invert(c.branch()))
1959 1959 elif k == 'desc':
1960 1960 e.append(c.description())
1961 1961 elif k == '-desc':
1962 1962 e.append(invert(c.description()))
1963 1963 elif k in 'user author':
1964 1964 e.append(c.user())
1965 1965 elif k in '-user -author':
1966 1966 e.append(invert(c.user()))
1967 1967 elif k == 'date':
1968 1968 e.append(c.date()[0])
1969 1969 elif k == '-date':
1970 1970 e.append(-c.date()[0])
1971 1971 else:
1972 1972 raise error.ParseError(_("unknown sort key %r") % k)
1973 1973 e.append(r)
1974 1974 l.append(e)
1975 1975 l.sort()
1976 1976 return baseset([e[-1] for e in l])
1977 1977
1978 1978 @predicate('subrepo([pattern])')
1979 1979 def subrepo(repo, subset, x):
1980 1980 """Changesets that add, modify or remove the given subrepo. If no subrepo
1981 1981 pattern is named, any subrepo changes are returned.
1982 1982 """
1983 1983 # i18n: "subrepo" is a keyword
1984 1984 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1985 1985 pat = None
1986 1986 if len(args) != 0:
1987 1987 pat = getstring(args[0], _("subrepo requires a pattern"))
1988 1988
1989 1989 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1990 1990
1991 1991 def submatches(names):
1992 1992 k, p, m = util.stringmatcher(pat)
1993 1993 for name in names:
1994 1994 if m(name):
1995 1995 yield name
1996 1996
1997 1997 def matches(x):
1998 1998 c = repo[x]
1999 1999 s = repo.status(c.p1().node(), c.node(), match=m)
2000 2000
2001 2001 if pat is None:
2002 2002 return s.added or s.modified or s.removed
2003 2003
2004 2004 if s.added:
2005 2005 return any(submatches(c.substate.keys()))
2006 2006
2007 2007 if s.modified:
2008 2008 subs = set(c.p1().substate.keys())
2009 2009 subs.update(c.substate.keys())
2010 2010
2011 2011 for path in submatches(subs):
2012 2012 if c.p1().substate.get(path) != c.substate.get(path):
2013 2013 return True
2014 2014
2015 2015 if s.removed:
2016 2016 return any(submatches(c.p1().substate.keys()))
2017 2017
2018 2018 return False
2019 2019
2020 2020 return subset.filter(matches)
2021 2021
2022 2022 def _substringmatcher(pattern):
2023 2023 kind, pattern, matcher = util.stringmatcher(pattern)
2024 2024 if kind == 'literal':
2025 2025 matcher = lambda s: pattern in s
2026 2026 return kind, pattern, matcher
2027 2027
2028 2028 @predicate('tag([name])', safe=True)
2029 2029 def tag(repo, subset, x):
2030 2030 """The specified tag by name, or all tagged revisions if no name is given.
2031 2031
2032 2032 If `name` starts with `re:`, the remainder of the name is treated as
2033 2033 a regular expression. To match a tag that actually starts with `re:`,
2034 2034 use the prefix `literal:`.
2035 2035 """
2036 2036 # i18n: "tag" is a keyword
2037 2037 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2038 2038 cl = repo.changelog
2039 2039 if args:
2040 2040 pattern = getstring(args[0],
2041 2041 # i18n: "tag" is a keyword
2042 2042 _('the argument to tag must be a string'))
2043 2043 kind, pattern, matcher = util.stringmatcher(pattern)
2044 2044 if kind == 'literal':
2045 2045 # avoid resolving all tags
2046 2046 tn = repo._tagscache.tags.get(pattern, None)
2047 2047 if tn is None:
2048 2048 raise error.RepoLookupError(_("tag '%s' does not exist")
2049 2049 % pattern)
2050 2050 s = set([repo[tn].rev()])
2051 2051 else:
2052 2052 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2053 2053 else:
2054 2054 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2055 2055 return subset & s
2056 2056
2057 2057 @predicate('tagged', safe=True)
2058 2058 def tagged(repo, subset, x):
2059 2059 return tag(repo, subset, x)
2060 2060
2061 2061 @predicate('unstable()', safe=True)
2062 2062 def unstable(repo, subset, x):
2063 2063 """Non-obsolete changesets with obsolete ancestors.
2064 2064 """
2065 2065 # i18n: "unstable" is a keyword
2066 2066 getargs(x, 0, 0, _("unstable takes no arguments"))
2067 2067 unstables = obsmod.getrevs(repo, 'unstable')
2068 2068 return subset & unstables
2069 2069
2070 2070
2071 2071 @predicate('user(string)', safe=True)
2072 2072 def user(repo, subset, x):
2073 2073 """User name contains string. The match is case-insensitive.
2074 2074
2075 2075 If `string` starts with `re:`, the remainder of the string is treated as
2076 2076 a regular expression. To match a user that actually contains `re:`, use
2077 2077 the prefix `literal:`.
2078 2078 """
2079 2079 return author(repo, subset, x)
2080 2080
2081 2081 # experimental
2082 2082 @predicate('wdir', safe=True)
2083 2083 def wdir(repo, subset, x):
2084 2084 # i18n: "wdir" is a keyword
2085 2085 getargs(x, 0, 0, _("wdir takes no arguments"))
2086 2086 if node.wdirrev in subset or isinstance(subset, fullreposet):
2087 2087 return baseset([node.wdirrev])
2088 2088 return baseset()
2089 2089
2090 2090 # for internal use
2091 2091 @predicate('_list', safe=True)
2092 2092 def _list(repo, subset, x):
2093 2093 s = getstring(x, "internal error")
2094 2094 if not s:
2095 2095 return baseset()
2096 2096 # remove duplicates here. it's difficult for caller to deduplicate sets
2097 2097 # because different symbols can point to the same rev.
2098 2098 cl = repo.changelog
2099 2099 ls = []
2100 2100 seen = set()
2101 2101 for t in s.split('\0'):
2102 2102 try:
2103 2103 # fast path for integer revision
2104 2104 r = int(t)
2105 2105 if str(r) != t or r not in cl:
2106 2106 raise ValueError
2107 2107 revs = [r]
2108 2108 except ValueError:
2109 2109 revs = stringset(repo, subset, t)
2110 2110
2111 2111 for r in revs:
2112 2112 if r in seen:
2113 2113 continue
2114 2114 if (r in subset
2115 2115 or r == node.nullrev and isinstance(subset, fullreposet)):
2116 2116 ls.append(r)
2117 2117 seen.add(r)
2118 2118 return baseset(ls)
2119 2119
2120 2120 # for internal use
2121 2121 @predicate('_intlist', safe=True)
2122 2122 def _intlist(repo, subset, x):
2123 2123 s = getstring(x, "internal error")
2124 2124 if not s:
2125 2125 return baseset()
2126 2126 ls = [int(r) for r in s.split('\0')]
2127 2127 s = subset
2128 2128 return baseset([r for r in ls if r in s])
2129 2129
2130 2130 # for internal use
2131 2131 @predicate('_hexlist', safe=True)
2132 2132 def _hexlist(repo, subset, x):
2133 2133 s = getstring(x, "internal error")
2134 2134 if not s:
2135 2135 return baseset()
2136 2136 cl = repo.changelog
2137 2137 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2138 2138 s = subset
2139 2139 return baseset([r for r in ls if r in s])
2140 2140
2141 2141 methods = {
2142 2142 "range": rangeset,
2143 2143 "dagrange": dagrange,
2144 2144 "string": stringset,
2145 2145 "symbol": stringset,
2146 2146 "and": andset,
2147 2147 "or": orset,
2148 2148 "not": notset,
2149 2149 "difference": differenceset,
2150 2150 "list": listset,
2151 2151 "keyvalue": keyvaluepair,
2152 2152 "func": func,
2153 2153 "ancestor": ancestorspec,
2154 2154 "parent": parentspec,
2155 2155 "parentpost": p1,
2156 2156 }
2157 2157
2158 2158 def optimize(x, small):
2159 2159 if x is None:
2160 2160 return 0, x
2161 2161
2162 2162 smallbonus = 1
2163 2163 if small:
2164 2164 smallbonus = .5
2165 2165
2166 2166 op = x[0]
2167 2167 if op == 'minus':
2168 2168 return optimize(('and', x[1], ('not', x[2])), small)
2169 2169 elif op == 'only':
2170 2170 return optimize(('func', ('symbol', 'only'),
2171 2171 ('list', x[1], x[2])), small)
2172 2172 elif op == 'onlypost':
2173 2173 return optimize(('func', ('symbol', 'only'), x[1]), small)
2174 2174 elif op == 'dagrangepre':
2175 2175 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2176 2176 elif op == 'dagrangepost':
2177 2177 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2178 2178 elif op == 'rangeall':
2179 2179 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2180 2180 elif op == 'rangepre':
2181 2181 return optimize(('range', ('string', '0'), x[1]), small)
2182 2182 elif op == 'rangepost':
2183 2183 return optimize(('range', x[1], ('string', 'tip')), small)
2184 2184 elif op == 'negate':
2185 2185 return optimize(('string',
2186 2186 '-' + getstring(x[1], _("can't negate that"))), small)
2187 2187 elif op in 'string symbol negate':
2188 2188 return smallbonus, x # single revisions are small
2189 2189 elif op == 'and':
2190 2190 wa, ta = optimize(x[1], True)
2191 2191 wb, tb = optimize(x[2], True)
2192 2192
2193 2193 # (::x and not ::y)/(not ::y and ::x) have a fast path
2194 2194 def isonly(revs, bases):
2195 2195 return (
2196 2196 revs is not None
2197 2197 and revs[0] == 'func'
2198 2198 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2199 2199 and bases is not None
2200 2200 and bases[0] == 'not'
2201 2201 and bases[1][0] == 'func'
2202 2202 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2203 2203
2204 2204 w = min(wa, wb)
2205 2205 if isonly(ta, tb):
2206 2206 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2207 2207 if isonly(tb, ta):
2208 2208 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2209 2209
2210 2210 if tb is not None and tb[0] == 'not':
2211 2211 return wa, ('difference', ta, tb[1])
2212 2212
2213 2213 if wa > wb:
2214 2214 return w, (op, tb, ta)
2215 2215 return w, (op, ta, tb)
2216 2216 elif op == 'or':
2217 2217 # fast path for machine-generated expression, that is likely to have
2218 2218 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2219 2219 ws, ts, ss = [], [], []
2220 2220 def flushss():
2221 2221 if not ss:
2222 2222 return
2223 2223 if len(ss) == 1:
2224 2224 w, t = ss[0]
2225 2225 else:
2226 2226 s = '\0'.join(t[1] for w, t in ss)
2227 2227 y = ('func', ('symbol', '_list'), ('string', s))
2228 2228 w, t = optimize(y, False)
2229 2229 ws.append(w)
2230 2230 ts.append(t)
2231 2231 del ss[:]
2232 2232 for y in x[1:]:
2233 2233 w, t = optimize(y, False)
2234 2234 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2235 2235 ss.append((w, t))
2236 2236 continue
2237 2237 flushss()
2238 2238 ws.append(w)
2239 2239 ts.append(t)
2240 2240 flushss()
2241 2241 if len(ts) == 1:
2242 2242 return ws[0], ts[0] # 'or' operation is fully optimized out
2243 2243 # we can't reorder trees by weight because it would change the order.
2244 2244 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2245 2245 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2246 2246 return max(ws), (op,) + tuple(ts)
2247 2247 elif op == 'not':
2248 2248 # Optimize not public() to _notpublic() because we have a fast version
2249 2249 if x[1] == ('func', ('symbol', 'public'), None):
2250 2250 newsym = ('func', ('symbol', '_notpublic'), None)
2251 2251 o = optimize(newsym, not small)
2252 2252 return o[0], o[1]
2253 2253 else:
2254 2254 o = optimize(x[1], not small)
2255 2255 return o[0], (op, o[1])
2256 2256 elif op == 'parentpost':
2257 2257 o = optimize(x[1], small)
2258 2258 return o[0], (op, o[1])
2259 2259 elif op == 'group':
2260 2260 return optimize(x[1], small)
2261 2261 elif op in 'dagrange range parent ancestorspec':
2262 2262 if op == 'parent':
2263 2263 # x^:y means (x^) : y, not x ^ (:y)
2264 2264 post = ('parentpost', x[1])
2265 2265 if x[2][0] == 'dagrangepre':
2266 2266 return optimize(('dagrange', post, x[2][1]), small)
2267 2267 elif x[2][0] == 'rangepre':
2268 2268 return optimize(('range', post, x[2][1]), small)
2269 2269
2270 2270 wa, ta = optimize(x[1], small)
2271 2271 wb, tb = optimize(x[2], small)
2272 2272 return wa + wb, (op, ta, tb)
2273 2273 elif op == 'list':
2274 2274 ws, ts = zip(*(optimize(y, small) for y in x[1:]))
2275 2275 return sum(ws), (op,) + ts
2276 2276 elif op == 'func':
2277 2277 f = getstring(x[1], _("not a symbol"))
2278 2278 wa, ta = optimize(x[2], small)
2279 2279 if f in ("author branch closed date desc file grep keyword "
2280 2280 "outgoing user"):
2281 2281 w = 10 # slow
2282 2282 elif f in "modifies adds removes":
2283 2283 w = 30 # slower
2284 2284 elif f == "contains":
2285 2285 w = 100 # very slow
2286 2286 elif f == "ancestor":
2287 2287 w = 1 * smallbonus
2288 2288 elif f in "reverse limit first _intlist":
2289 2289 w = 0
2290 2290 elif f in "sort":
2291 2291 w = 10 # assume most sorts look at changelog
2292 2292 else:
2293 2293 w = 1
2294 2294 return w + wa, (op, x[1], ta)
2295 2295 return 1, x
2296 2296
2297 2297 _aliasarg = ('func', ('symbol', '_aliasarg'))
2298 2298 def _getaliasarg(tree):
2299 2299 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2300 2300 return X, None otherwise.
2301 2301 """
2302 2302 if (len(tree) == 3 and tree[:2] == _aliasarg
2303 2303 and tree[2][0] == 'string'):
2304 2304 return tree[2][1]
2305 2305 return None
2306 2306
2307 2307 def _checkaliasarg(tree, known=None):
2308 2308 """Check tree contains no _aliasarg construct or only ones which
2309 2309 value is in known. Used to avoid alias placeholders injection.
2310 2310 """
2311 2311 if isinstance(tree, tuple):
2312 2312 arg = _getaliasarg(tree)
2313 2313 if arg is not None and (not known or arg not in known):
2314 2314 raise error.UnknownIdentifier('_aliasarg', [])
2315 2315 for t in tree:
2316 2316 _checkaliasarg(t, known)
2317 2317
2318 2318 # the set of valid characters for the initial letter of symbols in
2319 2319 # alias declarations and definitions
2320 2320 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2321 2321 if c.isalnum() or c in '._@$' or ord(c) > 127)
2322 2322
2323 2323 def _tokenizealias(program, lookup=None):
2324 2324 """Parse alias declaration/definition into a stream of tokens
2325 2325
2326 2326 This allows symbol names to use also ``$`` as an initial letter
2327 2327 (for backward compatibility), and callers of this function should
2328 2328 examine whether ``$`` is used also for unexpected symbols or not.
2329 2329 """
2330 2330 return tokenize(program, lookup=lookup,
2331 2331 syminitletters=_aliassyminitletters)
2332 2332
2333 2333 def _parsealiasdecl(decl):
2334 2334 """Parse alias declaration ``decl``
2335 2335
2336 2336 This returns ``(name, tree, args, errorstr)`` tuple:
2337 2337
2338 2338 - ``name``: of declared alias (may be ``decl`` itself at error)
2339 2339 - ``tree``: parse result (or ``None`` at error)
2340 2340 - ``args``: list of alias argument names (or None for symbol declaration)
2341 2341 - ``errorstr``: detail about detected error (or None)
2342 2342
2343 2343 >>> _parsealiasdecl('foo')
2344 2344 ('foo', ('symbol', 'foo'), None, None)
2345 2345 >>> _parsealiasdecl('$foo')
2346 2346 ('$foo', None, None, "'$' not for alias arguments")
2347 2347 >>> _parsealiasdecl('foo::bar')
2348 2348 ('foo::bar', None, None, 'invalid format')
2349 2349 >>> _parsealiasdecl('foo bar')
2350 2350 ('foo bar', None, None, 'at 4: invalid token')
2351 2351 >>> _parsealiasdecl('foo()')
2352 2352 ('foo', ('func', ('symbol', 'foo')), [], None)
2353 2353 >>> _parsealiasdecl('$foo()')
2354 2354 ('$foo()', None, None, "'$' not for alias arguments")
2355 2355 >>> _parsealiasdecl('foo($1, $2)')
2356 2356 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2357 2357 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2358 2358 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2359 2359 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2360 2360 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2361 2361 >>> _parsealiasdecl('foo(bar($1, $2))')
2362 2362 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2363 2363 >>> _parsealiasdecl('foo("string")')
2364 2364 ('foo("string")', None, None, 'invalid argument list')
2365 2365 >>> _parsealiasdecl('foo($1, $2')
2366 2366 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2367 2367 >>> _parsealiasdecl('foo("string')
2368 2368 ('foo("string', None, None, 'at 5: unterminated string')
2369 2369 >>> _parsealiasdecl('foo($1, $2, $1)')
2370 2370 ('foo', None, None, 'argument names collide with each other')
2371 2371 """
2372 2372 p = parser.parser(elements)
2373 2373 try:
2374 2374 tree, pos = p.parse(_tokenizealias(decl))
2375 2375 if (pos != len(decl)):
2376 2376 raise error.ParseError(_('invalid token'), pos)
2377 2377 tree = parser.simplifyinfixops(tree, ('list',))
2378 2378
2379 2379 if isvalidsymbol(tree):
2380 2380 # "name = ...." style
2381 2381 name = getsymbol(tree)
2382 2382 if name.startswith('$'):
2383 2383 return (decl, None, None, _("'$' not for alias arguments"))
2384 2384 return (name, ('symbol', name), None, None)
2385 2385
2386 2386 if isvalidfunc(tree):
2387 2387 # "name(arg, ....) = ...." style
2388 2388 name = getfuncname(tree)
2389 2389 if name.startswith('$'):
2390 2390 return (decl, None, None, _("'$' not for alias arguments"))
2391 2391 args = []
2392 2392 for arg in getfuncargs(tree):
2393 2393 if not isvalidsymbol(arg):
2394 2394 return (decl, None, None, _("invalid argument list"))
2395 2395 args.append(getsymbol(arg))
2396 2396 if len(args) != len(set(args)):
2397 2397 return (name, None, None,
2398 2398 _("argument names collide with each other"))
2399 2399 return (name, ('func', ('symbol', name)), args, None)
2400 2400
2401 2401 return (decl, None, None, _("invalid format"))
2402 2402 except error.ParseError as inst:
2403 2403 return (decl, None, None, parseerrordetail(inst))
2404 2404
2405 2405 def _parsealiasdefn(defn, args):
2406 2406 """Parse alias definition ``defn``
2407 2407
2408 2408 This function also replaces alias argument references in the
2409 2409 specified definition by ``_aliasarg(ARGNAME)``.
2410 2410
2411 2411 ``args`` is a list of alias argument names, or None if the alias
2412 2412 is declared as a symbol.
2413 2413
2414 2414 This returns "tree" as parsing result.
2415 2415
2416 2416 >>> args = ['$1', '$2', 'foo']
2417 2417 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2418 2418 (or
2419 2419 (func
2420 2420 ('symbol', '_aliasarg')
2421 2421 ('string', '$1'))
2422 2422 (func
2423 2423 ('symbol', '_aliasarg')
2424 2424 ('string', 'foo')))
2425 2425 >>> try:
2426 2426 ... _parsealiasdefn('$1 or $bar', args)
2427 2427 ... except error.ParseError, inst:
2428 2428 ... print parseerrordetail(inst)
2429 2429 at 6: '$' not for alias arguments
2430 2430 >>> args = ['$1', '$10', 'foo']
2431 2431 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2432 2432 (or
2433 2433 (func
2434 2434 ('symbol', '_aliasarg')
2435 2435 ('string', '$10'))
2436 2436 ('symbol', 'foobar'))
2437 2437 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2438 2438 (or
2439 2439 ('string', '$1')
2440 2440 ('string', 'foo'))
2441 2441 """
2442 2442 def tokenizedefn(program, lookup=None):
2443 2443 if args:
2444 2444 argset = set(args)
2445 2445 else:
2446 2446 argset = set()
2447 2447
2448 2448 for t, value, pos in _tokenizealias(program, lookup=lookup):
2449 2449 if t == 'symbol':
2450 2450 if value in argset:
2451 2451 # emulate tokenization of "_aliasarg('ARGNAME')":
2452 2452 # "_aliasarg()" is an unknown symbol only used separate
2453 2453 # alias argument placeholders from regular strings.
2454 2454 yield ('symbol', '_aliasarg', pos)
2455 2455 yield ('(', None, pos)
2456 2456 yield ('string', value, pos)
2457 2457 yield (')', None, pos)
2458 2458 continue
2459 2459 elif value.startswith('$'):
2460 2460 raise error.ParseError(_("'$' not for alias arguments"),
2461 2461 pos)
2462 2462 yield (t, value, pos)
2463 2463
2464 2464 p = parser.parser(elements)
2465 2465 tree, pos = p.parse(tokenizedefn(defn))
2466 2466 if pos != len(defn):
2467 2467 raise error.ParseError(_('invalid token'), pos)
2468 2468 return parser.simplifyinfixops(tree, ('list', 'or'))
2469 2469
2470 2470 class revsetalias(object):
2471 2471 # whether own `error` information is already shown or not.
2472 2472 # this avoids showing same warning multiple times at each `findaliases`.
2473 2473 warned = False
2474 2474
2475 2475 def __init__(self, name, value):
2476 2476 '''Aliases like:
2477 2477
2478 2478 h = heads(default)
2479 2479 b($1) = ancestors($1) - ancestors(default)
2480 2480 '''
2481 2481 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2482 2482 if self.error:
2483 2483 self.error = _('failed to parse the declaration of revset alias'
2484 2484 ' "%s": %s') % (self.name, self.error)
2485 2485 return
2486 2486
2487 2487 try:
2488 2488 self.replacement = _parsealiasdefn(value, self.args)
2489 2489 # Check for placeholder injection
2490 2490 _checkaliasarg(self.replacement, self.args)
2491 2491 except error.ParseError as inst:
2492 2492 self.error = _('failed to parse the definition of revset alias'
2493 2493 ' "%s": %s') % (self.name, parseerrordetail(inst))
2494 2494
2495 2495 def _getalias(aliases, tree):
2496 2496 """If tree looks like an unexpanded alias, return it. Return None
2497 2497 otherwise.
2498 2498 """
2499 2499 if isinstance(tree, tuple) and tree:
2500 2500 if tree[0] == 'symbol' and len(tree) == 2:
2501 2501 name = tree[1]
2502 2502 alias = aliases.get(name)
2503 2503 if alias and alias.args is None and alias.tree == tree:
2504 2504 return alias
2505 2505 if tree[0] == 'func' and len(tree) > 1:
2506 2506 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2507 2507 name = tree[1][1]
2508 2508 alias = aliases.get(name)
2509 2509 if alias and alias.args is not None and alias.tree == tree[:2]:
2510 2510 return alias
2511 2511 return None
2512 2512
2513 2513 def _expandargs(tree, args):
2514 2514 """Replace _aliasarg instances with the substitution value of the
2515 2515 same name in args, recursively.
2516 2516 """
2517 2517 if not tree or not isinstance(tree, tuple):
2518 2518 return tree
2519 2519 arg = _getaliasarg(tree)
2520 2520 if arg is not None:
2521 2521 return args[arg]
2522 2522 return tuple(_expandargs(t, args) for t in tree)
2523 2523
2524 2524 def _expandaliases(aliases, tree, expanding, cache):
2525 2525 """Expand aliases in tree, recursively.
2526 2526
2527 2527 'aliases' is a dictionary mapping user defined aliases to
2528 2528 revsetalias objects.
2529 2529 """
2530 2530 if not isinstance(tree, tuple):
2531 2531 # Do not expand raw strings
2532 2532 return tree
2533 2533 alias = _getalias(aliases, tree)
2534 2534 if alias is not None:
2535 2535 if alias.error:
2536 2536 raise error.Abort(alias.error)
2537 2537 if alias in expanding:
2538 2538 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2539 2539 'detected') % alias.name)
2540 2540 expanding.append(alias)
2541 2541 if alias.name not in cache:
2542 2542 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2543 2543 expanding, cache)
2544 2544 result = cache[alias.name]
2545 2545 expanding.pop()
2546 2546 if alias.args is not None:
2547 2547 l = getlist(tree[2])
2548 2548 if len(l) != len(alias.args):
2549 2549 raise error.ParseError(
2550 2550 _('invalid number of arguments: %d') % len(l))
2551 2551 l = [_expandaliases(aliases, a, [], cache) for a in l]
2552 2552 result = _expandargs(result, dict(zip(alias.args, l)))
2553 2553 else:
2554 2554 result = tuple(_expandaliases(aliases, t, expanding, cache)
2555 2555 for t in tree)
2556 2556 return result
2557 2557
2558 2558 def findaliases(ui, tree, showwarning=None):
2559 2559 _checkaliasarg(tree)
2560 2560 aliases = {}
2561 2561 for k, v in ui.configitems('revsetalias'):
2562 2562 alias = revsetalias(k, v)
2563 2563 aliases[alias.name] = alias
2564 2564 tree = _expandaliases(aliases, tree, [], {})
2565 2565 if showwarning:
2566 2566 # warn about problematic (but not referred) aliases
2567 2567 for name, alias in sorted(aliases.iteritems()):
2568 2568 if alias.error and not alias.warned:
2569 2569 showwarning(_('warning: %s\n') % (alias.error))
2570 2570 alias.warned = True
2571 2571 return tree
2572 2572
2573 2573 def foldconcat(tree):
2574 2574 """Fold elements to be concatenated by `##`
2575 2575 """
2576 2576 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2577 2577 return tree
2578 2578 if tree[0] == '_concat':
2579 2579 pending = [tree]
2580 2580 l = []
2581 2581 while pending:
2582 2582 e = pending.pop()
2583 2583 if e[0] == '_concat':
2584 2584 pending.extend(reversed(e[1:]))
2585 2585 elif e[0] in ('string', 'symbol'):
2586 2586 l.append(e[1])
2587 2587 else:
2588 2588 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2589 2589 raise error.ParseError(msg)
2590 2590 return ('string', ''.join(l))
2591 2591 else:
2592 2592 return tuple(foldconcat(t) for t in tree)
2593 2593
2594 2594 def parse(spec, lookup=None):
2595 2595 p = parser.parser(elements)
2596 2596 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2597 2597 if pos != len(spec):
2598 2598 raise error.ParseError(_("invalid token"), pos)
2599 2599 return parser.simplifyinfixops(tree, ('list', 'or'))
2600 2600
2601 2601 def posttreebuilthook(tree, repo):
2602 2602 # hook for extensions to execute code on the optimized tree
2603 2603 pass
2604 2604
2605 2605 def match(ui, spec, repo=None):
2606 2606 if not spec:
2607 2607 raise error.ParseError(_("empty query"))
2608 2608 lookup = None
2609 2609 if repo:
2610 2610 lookup = repo.__contains__
2611 2611 tree = parse(spec, lookup)
2612 2612 return _makematcher(ui, tree, repo)
2613 2613
2614 2614 def matchany(ui, specs, repo=None):
2615 2615 """Create a matcher that will include any revisions matching one of the
2616 2616 given specs"""
2617 2617 if not specs:
2618 2618 def mfunc(repo, subset=None):
2619 2619 return baseset()
2620 2620 return mfunc
2621 2621 if not all(specs):
2622 2622 raise error.ParseError(_("empty query"))
2623 2623 lookup = None
2624 2624 if repo:
2625 2625 lookup = repo.__contains__
2626 2626 if len(specs) == 1:
2627 2627 tree = parse(specs[0], lookup)
2628 2628 else:
2629 2629 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2630 2630 return _makematcher(ui, tree, repo)
2631 2631
2632 2632 def _makematcher(ui, tree, repo):
2633 2633 if ui:
2634 2634 tree = findaliases(ui, tree, showwarning=ui.warn)
2635 2635 tree = foldconcat(tree)
2636 2636 weight, tree = optimize(tree, True)
2637 2637 posttreebuilthook(tree, repo)
2638 2638 def mfunc(repo, subset=None):
2639 2639 if subset is None:
2640 2640 subset = fullreposet(repo)
2641 2641 if util.safehasattr(subset, 'isascending'):
2642 2642 result = getset(repo, subset, tree)
2643 2643 else:
2644 2644 result = getset(repo, baseset(subset), tree)
2645 2645 return result
2646 2646 return mfunc
2647 2647
2648 2648 def formatspec(expr, *args):
2649 2649 '''
2650 2650 This is a convenience function for using revsets internally, and
2651 2651 escapes arguments appropriately. Aliases are intentionally ignored
2652 2652 so that intended expression behavior isn't accidentally subverted.
2653 2653
2654 2654 Supported arguments:
2655 2655
2656 2656 %r = revset expression, parenthesized
2657 2657 %d = int(arg), no quoting
2658 2658 %s = string(arg), escaped and single-quoted
2659 2659 %b = arg.branch(), escaped and single-quoted
2660 2660 %n = hex(arg), single-quoted
2661 2661 %% = a literal '%'
2662 2662
2663 2663 Prefixing the type with 'l' specifies a parenthesized list of that type.
2664 2664
2665 2665 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2666 2666 '(10 or 11):: and ((this()) or (that()))'
2667 2667 >>> formatspec('%d:: and not %d::', 10, 20)
2668 2668 '10:: and not 20::'
2669 2669 >>> formatspec('%ld or %ld', [], [1])
2670 2670 "_list('') or 1"
2671 2671 >>> formatspec('keyword(%s)', 'foo\\xe9')
2672 2672 "keyword('foo\\\\xe9')"
2673 2673 >>> b = lambda: 'default'
2674 2674 >>> b.branch = b
2675 2675 >>> formatspec('branch(%b)', b)
2676 2676 "branch('default')"
2677 2677 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2678 2678 "root(_list('a\\x00b\\x00c\\x00d'))"
2679 2679 '''
2680 2680
2681 2681 def quote(s):
2682 2682 return repr(str(s))
2683 2683
2684 2684 def argtype(c, arg):
2685 2685 if c == 'd':
2686 2686 return str(int(arg))
2687 2687 elif c == 's':
2688 2688 return quote(arg)
2689 2689 elif c == 'r':
2690 2690 parse(arg) # make sure syntax errors are confined
2691 2691 return '(%s)' % arg
2692 2692 elif c == 'n':
2693 2693 return quote(node.hex(arg))
2694 2694 elif c == 'b':
2695 2695 return quote(arg.branch())
2696 2696
2697 2697 def listexp(s, t):
2698 2698 l = len(s)
2699 2699 if l == 0:
2700 2700 return "_list('')"
2701 2701 elif l == 1:
2702 2702 return argtype(t, s[0])
2703 2703 elif t == 'd':
2704 2704 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2705 2705 elif t == 's':
2706 2706 return "_list('%s')" % "\0".join(s)
2707 2707 elif t == 'n':
2708 2708 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2709 2709 elif t == 'b':
2710 2710 return "_list('%s')" % "\0".join(a.branch() for a in s)
2711 2711
2712 2712 m = l // 2
2713 2713 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2714 2714
2715 2715 ret = ''
2716 2716 pos = 0
2717 2717 arg = 0
2718 2718 while pos < len(expr):
2719 2719 c = expr[pos]
2720 2720 if c == '%':
2721 2721 pos += 1
2722 2722 d = expr[pos]
2723 2723 if d == '%':
2724 2724 ret += d
2725 2725 elif d in 'dsnbr':
2726 2726 ret += argtype(d, args[arg])
2727 2727 arg += 1
2728 2728 elif d == 'l':
2729 2729 # a list of some type
2730 2730 pos += 1
2731 2731 d = expr[pos]
2732 2732 ret += listexp(list(args[arg]), d)
2733 2733 arg += 1
2734 2734 else:
2735 2735 raise error.Abort('unexpected revspec format character %s' % d)
2736 2736 else:
2737 2737 ret += c
2738 2738 pos += 1
2739 2739
2740 2740 return ret
2741 2741
2742 2742 def prettyformat(tree):
2743 2743 return parser.prettyformat(tree, ('string', 'symbol'))
2744 2744
2745 2745 def depth(tree):
2746 2746 if isinstance(tree, tuple):
2747 2747 return max(map(depth, tree)) + 1
2748 2748 else:
2749 2749 return 0
2750 2750
2751 2751 def funcsused(tree):
2752 2752 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2753 2753 return set()
2754 2754 else:
2755 2755 funcs = set()
2756 2756 for s in tree[1:]:
2757 2757 funcs |= funcsused(s)
2758 2758 if tree[0] == 'func':
2759 2759 funcs.add(tree[1][1])
2760 2760 return funcs
2761 2761
2762 2762 class abstractsmartset(object):
2763 2763
2764 2764 def __nonzero__(self):
2765 2765 """True if the smartset is not empty"""
2766 2766 raise NotImplementedError()
2767 2767
2768 2768 def __contains__(self, rev):
2769 2769 """provide fast membership testing"""
2770 2770 raise NotImplementedError()
2771 2771
2772 2772 def __iter__(self):
2773 2773 """iterate the set in the order it is supposed to be iterated"""
2774 2774 raise NotImplementedError()
2775 2775
2776 2776 # Attributes containing a function to perform a fast iteration in a given
2777 2777 # direction. A smartset can have none, one, or both defined.
2778 2778 #
2779 2779 # Default value is None instead of a function returning None to avoid
2780 2780 # initializing an iterator just for testing if a fast method exists.
2781 2781 fastasc = None
2782 2782 fastdesc = None
2783 2783
2784 2784 def isascending(self):
2785 2785 """True if the set will iterate in ascending order"""
2786 2786 raise NotImplementedError()
2787 2787
2788 2788 def isdescending(self):
2789 2789 """True if the set will iterate in descending order"""
2790 2790 raise NotImplementedError()
2791 2791
2792 2792 @util.cachefunc
2793 2793 def min(self):
2794 2794 """return the minimum element in the set"""
2795 2795 if self.fastasc is not None:
2796 2796 for r in self.fastasc():
2797 2797 return r
2798 2798 raise ValueError('arg is an empty sequence')
2799 2799 return min(self)
2800 2800
2801 2801 @util.cachefunc
2802 2802 def max(self):
2803 2803 """return the maximum element in the set"""
2804 2804 if self.fastdesc is not None:
2805 2805 for r in self.fastdesc():
2806 2806 return r
2807 2807 raise ValueError('arg is an empty sequence')
2808 2808 return max(self)
2809 2809
2810 2810 def first(self):
2811 2811 """return the first element in the set (user iteration perspective)
2812 2812
2813 2813 Return None if the set is empty"""
2814 2814 raise NotImplementedError()
2815 2815
2816 2816 def last(self):
2817 2817 """return the last element in the set (user iteration perspective)
2818 2818
2819 2819 Return None if the set is empty"""
2820 2820 raise NotImplementedError()
2821 2821
2822 2822 def __len__(self):
2823 2823 """return the length of the smartsets
2824 2824
2825 2825 This can be expensive on smartset that could be lazy otherwise."""
2826 2826 raise NotImplementedError()
2827 2827
2828 2828 def reverse(self):
2829 2829 """reverse the expected iteration order"""
2830 2830 raise NotImplementedError()
2831 2831
2832 2832 def sort(self, reverse=True):
2833 2833 """get the set to iterate in an ascending or descending order"""
2834 2834 raise NotImplementedError()
2835 2835
2836 2836 def __and__(self, other):
2837 2837 """Returns a new object with the intersection of the two collections.
2838 2838
2839 2839 This is part of the mandatory API for smartset."""
2840 2840 if isinstance(other, fullreposet):
2841 2841 return self
2842 2842 return self.filter(other.__contains__, cache=False)
2843 2843
2844 2844 def __add__(self, other):
2845 2845 """Returns a new object with the union of the two collections.
2846 2846
2847 2847 This is part of the mandatory API for smartset."""
2848 2848 return addset(self, other)
2849 2849
2850 2850 def __sub__(self, other):
2851 2851 """Returns a new object with the substraction of the two collections.
2852 2852
2853 2853 This is part of the mandatory API for smartset."""
2854 2854 c = other.__contains__
2855 2855 return self.filter(lambda r: not c(r), cache=False)
2856 2856
2857 2857 def filter(self, condition, cache=True):
2858 2858 """Returns this smartset filtered by condition as a new smartset.
2859 2859
2860 2860 `condition` is a callable which takes a revision number and returns a
2861 2861 boolean.
2862 2862
2863 2863 This is part of the mandatory API for smartset."""
2864 2864 # builtin cannot be cached. but do not needs to
2865 2865 if cache and util.safehasattr(condition, 'func_code'):
2866 2866 condition = util.cachefunc(condition)
2867 2867 return filteredset(self, condition)
2868 2868
2869 2869 class baseset(abstractsmartset):
2870 2870 """Basic data structure that represents a revset and contains the basic
2871 2871 operation that it should be able to perform.
2872 2872
2873 2873 Every method in this class should be implemented by any smartset class.
2874 2874 """
2875 2875 def __init__(self, data=()):
2876 2876 if not isinstance(data, list):
2877 2877 if isinstance(data, set):
2878 2878 self._set = data
2879 2879 data = list(data)
2880 2880 self._list = data
2881 2881 self._ascending = None
2882 2882
2883 2883 @util.propertycache
2884 2884 def _set(self):
2885 2885 return set(self._list)
2886 2886
2887 2887 @util.propertycache
2888 2888 def _asclist(self):
2889 2889 asclist = self._list[:]
2890 2890 asclist.sort()
2891 2891 return asclist
2892 2892
2893 2893 def __iter__(self):
2894 2894 if self._ascending is None:
2895 2895 return iter(self._list)
2896 2896 elif self._ascending:
2897 2897 return iter(self._asclist)
2898 2898 else:
2899 2899 return reversed(self._asclist)
2900 2900
2901 2901 def fastasc(self):
2902 2902 return iter(self._asclist)
2903 2903
2904 2904 def fastdesc(self):
2905 2905 return reversed(self._asclist)
2906 2906
2907 2907 @util.propertycache
2908 2908 def __contains__(self):
2909 2909 return self._set.__contains__
2910 2910
2911 2911 def __nonzero__(self):
2912 2912 return bool(self._list)
2913 2913
2914 2914 def sort(self, reverse=False):
2915 2915 self._ascending = not bool(reverse)
2916 2916
2917 2917 def reverse(self):
2918 2918 if self._ascending is None:
2919 2919 self._list.reverse()
2920 2920 else:
2921 2921 self._ascending = not self._ascending
2922 2922
2923 2923 def __len__(self):
2924 2924 return len(self._list)
2925 2925
2926 2926 def isascending(self):
2927 2927 """Returns True if the collection is ascending order, False if not.
2928 2928
2929 2929 This is part of the mandatory API for smartset."""
2930 2930 if len(self) <= 1:
2931 2931 return True
2932 2932 return self._ascending is not None and self._ascending
2933 2933
2934 2934 def isdescending(self):
2935 2935 """Returns True if the collection is descending order, False if not.
2936 2936
2937 2937 This is part of the mandatory API for smartset."""
2938 2938 if len(self) <= 1:
2939 2939 return True
2940 2940 return self._ascending is not None and not self._ascending
2941 2941
2942 2942 def first(self):
2943 2943 if self:
2944 2944 if self._ascending is None:
2945 2945 return self._list[0]
2946 2946 elif self._ascending:
2947 2947 return self._asclist[0]
2948 2948 else:
2949 2949 return self._asclist[-1]
2950 2950 return None
2951 2951
2952 2952 def last(self):
2953 2953 if self:
2954 2954 if self._ascending is None:
2955 2955 return self._list[-1]
2956 2956 elif self._ascending:
2957 2957 return self._asclist[-1]
2958 2958 else:
2959 2959 return self._asclist[0]
2960 2960 return None
2961 2961
2962 2962 def __repr__(self):
2963 2963 d = {None: '', False: '-', True: '+'}[self._ascending]
2964 2964 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2965 2965
2966 2966 class filteredset(abstractsmartset):
2967 2967 """Duck type for baseset class which iterates lazily over the revisions in
2968 2968 the subset and contains a function which tests for membership in the
2969 2969 revset
2970 2970 """
2971 2971 def __init__(self, subset, condition=lambda x: True):
2972 2972 """
2973 2973 condition: a function that decide whether a revision in the subset
2974 2974 belongs to the revset or not.
2975 2975 """
2976 2976 self._subset = subset
2977 2977 self._condition = condition
2978 2978
2979 2979 def __contains__(self, x):
2980 2980 return x in self._subset and self._condition(x)
2981 2981
2982 2982 def __iter__(self):
2983 2983 return self._iterfilter(self._subset)
2984 2984
2985 2985 def _iterfilter(self, it):
2986 2986 cond = self._condition
2987 2987 for x in it:
2988 2988 if cond(x):
2989 2989 yield x
2990 2990
2991 2991 @property
2992 2992 def fastasc(self):
2993 2993 it = self._subset.fastasc
2994 2994 if it is None:
2995 2995 return None
2996 2996 return lambda: self._iterfilter(it())
2997 2997
2998 2998 @property
2999 2999 def fastdesc(self):
3000 3000 it = self._subset.fastdesc
3001 3001 if it is None:
3002 3002 return None
3003 3003 return lambda: self._iterfilter(it())
3004 3004
3005 3005 def __nonzero__(self):
3006 3006 fast = self.fastasc
3007 3007 if fast is None:
3008 3008 fast = self.fastdesc
3009 3009 if fast is not None:
3010 3010 it = fast()
3011 3011 else:
3012 3012 it = self
3013 3013
3014 3014 for r in it:
3015 3015 return True
3016 3016 return False
3017 3017
3018 3018 def __len__(self):
3019 3019 # Basic implementation to be changed in future patches.
3020 3020 l = baseset([r for r in self])
3021 3021 return len(l)
3022 3022
3023 3023 def sort(self, reverse=False):
3024 3024 self._subset.sort(reverse=reverse)
3025 3025
3026 3026 def reverse(self):
3027 3027 self._subset.reverse()
3028 3028
3029 3029 def isascending(self):
3030 3030 return self._subset.isascending()
3031 3031
3032 3032 def isdescending(self):
3033 3033 return self._subset.isdescending()
3034 3034
3035 3035 def first(self):
3036 3036 for x in self:
3037 3037 return x
3038 3038 return None
3039 3039
3040 3040 def last(self):
3041 3041 it = None
3042 3042 if self.isascending():
3043 3043 it = self.fastdesc
3044 3044 elif self.isdescending():
3045 3045 it = self.fastasc
3046 3046 if it is not None:
3047 3047 for x in it():
3048 3048 return x
3049 3049 return None #empty case
3050 3050 else:
3051 3051 x = None
3052 3052 for x in self:
3053 3053 pass
3054 3054 return x
3055 3055
3056 3056 def __repr__(self):
3057 3057 return '<%s %r>' % (type(self).__name__, self._subset)
3058 3058
3059 3059 def _iterordered(ascending, iter1, iter2):
3060 3060 """produce an ordered iteration from two iterators with the same order
3061 3061
3062 3062 The ascending is used to indicated the iteration direction.
3063 3063 """
3064 3064 choice = max
3065 3065 if ascending:
3066 3066 choice = min
3067 3067
3068 3068 val1 = None
3069 3069 val2 = None
3070 3070 try:
3071 3071 # Consume both iterators in an ordered way until one is empty
3072 3072 while True:
3073 3073 if val1 is None:
3074 3074 val1 = iter1.next()
3075 3075 if val2 is None:
3076 3076 val2 = iter2.next()
3077 3077 next = choice(val1, val2)
3078 3078 yield next
3079 3079 if val1 == next:
3080 3080 val1 = None
3081 3081 if val2 == next:
3082 3082 val2 = None
3083 3083 except StopIteration:
3084 3084 # Flush any remaining values and consume the other one
3085 3085 it = iter2
3086 3086 if val1 is not None:
3087 3087 yield val1
3088 3088 it = iter1
3089 3089 elif val2 is not None:
3090 3090 # might have been equality and both are empty
3091 3091 yield val2
3092 3092 for val in it:
3093 3093 yield val
3094 3094
3095 3095 class addset(abstractsmartset):
3096 3096 """Represent the addition of two sets
3097 3097
3098 3098 Wrapper structure for lazily adding two structures without losing much
3099 3099 performance on the __contains__ method
3100 3100
3101 3101 If the ascending attribute is set, that means the two structures are
3102 3102 ordered in either an ascending or descending way. Therefore, we can add
3103 3103 them maintaining the order by iterating over both at the same time
3104 3104
3105 3105 >>> xs = baseset([0, 3, 2])
3106 3106 >>> ys = baseset([5, 2, 4])
3107 3107
3108 3108 >>> rs = addset(xs, ys)
3109 3109 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3110 3110 (True, True, False, True, 0, 4)
3111 3111 >>> rs = addset(xs, baseset([]))
3112 3112 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3113 3113 (True, True, False, 0, 2)
3114 3114 >>> rs = addset(baseset([]), baseset([]))
3115 3115 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3116 3116 (False, False, None, None)
3117 3117
3118 3118 iterate unsorted:
3119 3119 >>> rs = addset(xs, ys)
3120 3120 >>> [x for x in rs] # without _genlist
3121 3121 [0, 3, 2, 5, 4]
3122 3122 >>> assert not rs._genlist
3123 3123 >>> len(rs)
3124 3124 5
3125 3125 >>> [x for x in rs] # with _genlist
3126 3126 [0, 3, 2, 5, 4]
3127 3127 >>> assert rs._genlist
3128 3128
3129 3129 iterate ascending:
3130 3130 >>> rs = addset(xs, ys, ascending=True)
3131 3131 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3132 3132 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3133 3133 >>> assert not rs._asclist
3134 3134 >>> len(rs)
3135 3135 5
3136 3136 >>> [x for x in rs], [x for x in rs.fastasc()]
3137 3137 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3138 3138 >>> assert rs._asclist
3139 3139
3140 3140 iterate descending:
3141 3141 >>> rs = addset(xs, ys, ascending=False)
3142 3142 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3143 3143 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3144 3144 >>> assert not rs._asclist
3145 3145 >>> len(rs)
3146 3146 5
3147 3147 >>> [x for x in rs], [x for x in rs.fastdesc()]
3148 3148 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3149 3149 >>> assert rs._asclist
3150 3150
3151 3151 iterate ascending without fastasc:
3152 3152 >>> rs = addset(xs, generatorset(ys), ascending=True)
3153 3153 >>> assert rs.fastasc is None
3154 3154 >>> [x for x in rs]
3155 3155 [0, 2, 3, 4, 5]
3156 3156
3157 3157 iterate descending without fastdesc:
3158 3158 >>> rs = addset(generatorset(xs), ys, ascending=False)
3159 3159 >>> assert rs.fastdesc is None
3160 3160 >>> [x for x in rs]
3161 3161 [5, 4, 3, 2, 0]
3162 3162 """
3163 3163 def __init__(self, revs1, revs2, ascending=None):
3164 3164 self._r1 = revs1
3165 3165 self._r2 = revs2
3166 3166 self._iter = None
3167 3167 self._ascending = ascending
3168 3168 self._genlist = None
3169 3169 self._asclist = None
3170 3170
3171 3171 def __len__(self):
3172 3172 return len(self._list)
3173 3173
3174 3174 def __nonzero__(self):
3175 3175 return bool(self._r1) or bool(self._r2)
3176 3176
3177 3177 @util.propertycache
3178 3178 def _list(self):
3179 3179 if not self._genlist:
3180 3180 self._genlist = baseset(iter(self))
3181 3181 return self._genlist
3182 3182
3183 3183 def __iter__(self):
3184 3184 """Iterate over both collections without repeating elements
3185 3185
3186 3186 If the ascending attribute is not set, iterate over the first one and
3187 3187 then over the second one checking for membership on the first one so we
3188 3188 dont yield any duplicates.
3189 3189
3190 3190 If the ascending attribute is set, iterate over both collections at the
3191 3191 same time, yielding only one value at a time in the given order.
3192 3192 """
3193 3193 if self._ascending is None:
3194 3194 if self._genlist:
3195 3195 return iter(self._genlist)
3196 3196 def arbitraryordergen():
3197 3197 for r in self._r1:
3198 3198 yield r
3199 3199 inr1 = self._r1.__contains__
3200 3200 for r in self._r2:
3201 3201 if not inr1(r):
3202 3202 yield r
3203 3203 return arbitraryordergen()
3204 3204 # try to use our own fast iterator if it exists
3205 3205 self._trysetasclist()
3206 3206 if self._ascending:
3207 3207 attr = 'fastasc'
3208 3208 else:
3209 3209 attr = 'fastdesc'
3210 3210 it = getattr(self, attr)
3211 3211 if it is not None:
3212 3212 return it()
3213 3213 # maybe half of the component supports fast
3214 3214 # get iterator for _r1
3215 3215 iter1 = getattr(self._r1, attr)
3216 3216 if iter1 is None:
3217 3217 # let's avoid side effect (not sure it matters)
3218 3218 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3219 3219 else:
3220 3220 iter1 = iter1()
3221 3221 # get iterator for _r2
3222 3222 iter2 = getattr(self._r2, attr)
3223 3223 if iter2 is None:
3224 3224 # let's avoid side effect (not sure it matters)
3225 3225 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3226 3226 else:
3227 3227 iter2 = iter2()
3228 3228 return _iterordered(self._ascending, iter1, iter2)
3229 3229
3230 3230 def _trysetasclist(self):
3231 3231 """populate the _asclist attribute if possible and necessary"""
3232 3232 if self._genlist is not None and self._asclist is None:
3233 3233 self._asclist = sorted(self._genlist)
3234 3234
3235 3235 @property
3236 3236 def fastasc(self):
3237 3237 self._trysetasclist()
3238 3238 if self._asclist is not None:
3239 3239 return self._asclist.__iter__
3240 3240 iter1 = self._r1.fastasc
3241 3241 iter2 = self._r2.fastasc
3242 3242 if None in (iter1, iter2):
3243 3243 return None
3244 3244 return lambda: _iterordered(True, iter1(), iter2())
3245 3245
3246 3246 @property
3247 3247 def fastdesc(self):
3248 3248 self._trysetasclist()
3249 3249 if self._asclist is not None:
3250 3250 return self._asclist.__reversed__
3251 3251 iter1 = self._r1.fastdesc
3252 3252 iter2 = self._r2.fastdesc
3253 3253 if None in (iter1, iter2):
3254 3254 return None
3255 3255 return lambda: _iterordered(False, iter1(), iter2())
3256 3256
3257 3257 def __contains__(self, x):
3258 3258 return x in self._r1 or x in self._r2
3259 3259
3260 3260 def sort(self, reverse=False):
3261 3261 """Sort the added set
3262 3262
3263 3263 For this we use the cached list with all the generated values and if we
3264 3264 know they are ascending or descending we can sort them in a smart way.
3265 3265 """
3266 3266 self._ascending = not reverse
3267 3267
3268 3268 def isascending(self):
3269 3269 return self._ascending is not None and self._ascending
3270 3270
3271 3271 def isdescending(self):
3272 3272 return self._ascending is not None and not self._ascending
3273 3273
3274 3274 def reverse(self):
3275 3275 if self._ascending is None:
3276 3276 self._list.reverse()
3277 3277 else:
3278 3278 self._ascending = not self._ascending
3279 3279
3280 3280 def first(self):
3281 3281 for x in self:
3282 3282 return x
3283 3283 return None
3284 3284
3285 3285 def last(self):
3286 3286 self.reverse()
3287 3287 val = self.first()
3288 3288 self.reverse()
3289 3289 return val
3290 3290
3291 3291 def __repr__(self):
3292 3292 d = {None: '', False: '-', True: '+'}[self._ascending]
3293 3293 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3294 3294
3295 3295 class generatorset(abstractsmartset):
3296 3296 """Wrap a generator for lazy iteration
3297 3297
3298 3298 Wrapper structure for generators that provides lazy membership and can
3299 3299 be iterated more than once.
3300 3300 When asked for membership it generates values until either it finds the
3301 3301 requested one or has gone through all the elements in the generator
3302 3302 """
3303 3303 def __init__(self, gen, iterasc=None):
3304 3304 """
3305 3305 gen: a generator producing the values for the generatorset.
3306 3306 """
3307 3307 self._gen = gen
3308 3308 self._asclist = None
3309 3309 self._cache = {}
3310 3310 self._genlist = []
3311 3311 self._finished = False
3312 3312 self._ascending = True
3313 3313 if iterasc is not None:
3314 3314 if iterasc:
3315 3315 self.fastasc = self._iterator
3316 3316 self.__contains__ = self._asccontains
3317 3317 else:
3318 3318 self.fastdesc = self._iterator
3319 3319 self.__contains__ = self._desccontains
3320 3320
3321 3321 def __nonzero__(self):
3322 3322 # Do not use 'for r in self' because it will enforce the iteration
3323 3323 # order (default ascending), possibly unrolling a whole descending
3324 3324 # iterator.
3325 3325 if self._genlist:
3326 3326 return True
3327 3327 for r in self._consumegen():
3328 3328 return True
3329 3329 return False
3330 3330
3331 3331 def __contains__(self, x):
3332 3332 if x in self._cache:
3333 3333 return self._cache[x]
3334 3334
3335 3335 # Use new values only, as existing values would be cached.
3336 3336 for l in self._consumegen():
3337 3337 if l == x:
3338 3338 return True
3339 3339
3340 3340 self._cache[x] = False
3341 3341 return False
3342 3342
3343 3343 def _asccontains(self, x):
3344 3344 """version of contains optimised for ascending generator"""
3345 3345 if x in self._cache:
3346 3346 return self._cache[x]
3347 3347
3348 3348 # Use new values only, as existing values would be cached.
3349 3349 for l in self._consumegen():
3350 3350 if l == x:
3351 3351 return True
3352 3352 if l > x:
3353 3353 break
3354 3354
3355 3355 self._cache[x] = False
3356 3356 return False
3357 3357
3358 3358 def _desccontains(self, x):
3359 3359 """version of contains optimised for descending generator"""
3360 3360 if x in self._cache:
3361 3361 return self._cache[x]
3362 3362
3363 3363 # Use new values only, as existing values would be cached.
3364 3364 for l in self._consumegen():
3365 3365 if l == x:
3366 3366 return True
3367 3367 if l < x:
3368 3368 break
3369 3369
3370 3370 self._cache[x] = False
3371 3371 return False
3372 3372
3373 3373 def __iter__(self):
3374 3374 if self._ascending:
3375 3375 it = self.fastasc
3376 3376 else:
3377 3377 it = self.fastdesc
3378 3378 if it is not None:
3379 3379 return it()
3380 3380 # we need to consume the iterator
3381 3381 for x in self._consumegen():
3382 3382 pass
3383 3383 # recall the same code
3384 3384 return iter(self)
3385 3385
3386 3386 def _iterator(self):
3387 3387 if self._finished:
3388 3388 return iter(self._genlist)
3389 3389
3390 3390 # We have to use this complex iteration strategy to allow multiple
3391 3391 # iterations at the same time. We need to be able to catch revision
3392 3392 # removed from _consumegen and added to genlist in another instance.
3393 3393 #
3394 3394 # Getting rid of it would provide an about 15% speed up on this
3395 3395 # iteration.
3396 3396 genlist = self._genlist
3397 3397 nextrev = self._consumegen().next
3398 3398 _len = len # cache global lookup
3399 3399 def gen():
3400 3400 i = 0
3401 3401 while True:
3402 3402 if i < _len(genlist):
3403 3403 yield genlist[i]
3404 3404 else:
3405 3405 yield nextrev()
3406 3406 i += 1
3407 3407 return gen()
3408 3408
3409 3409 def _consumegen(self):
3410 3410 cache = self._cache
3411 3411 genlist = self._genlist.append
3412 3412 for item in self._gen:
3413 3413 cache[item] = True
3414 3414 genlist(item)
3415 3415 yield item
3416 3416 if not self._finished:
3417 3417 self._finished = True
3418 3418 asc = self._genlist[:]
3419 3419 asc.sort()
3420 3420 self._asclist = asc
3421 3421 self.fastasc = asc.__iter__
3422 3422 self.fastdesc = asc.__reversed__
3423 3423
3424 3424 def __len__(self):
3425 3425 for x in self._consumegen():
3426 3426 pass
3427 3427 return len(self._genlist)
3428 3428
3429 3429 def sort(self, reverse=False):
3430 3430 self._ascending = not reverse
3431 3431
3432 3432 def reverse(self):
3433 3433 self._ascending = not self._ascending
3434 3434
3435 3435 def isascending(self):
3436 3436 return self._ascending
3437 3437
3438 3438 def isdescending(self):
3439 3439 return not self._ascending
3440 3440
3441 3441 def first(self):
3442 3442 if self._ascending:
3443 3443 it = self.fastasc
3444 3444 else:
3445 3445 it = self.fastdesc
3446 3446 if it is None:
3447 3447 # we need to consume all and try again
3448 3448 for x in self._consumegen():
3449 3449 pass
3450 3450 return self.first()
3451 3451 return next(it(), None)
3452 3452
3453 3453 def last(self):
3454 3454 if self._ascending:
3455 3455 it = self.fastdesc
3456 3456 else:
3457 3457 it = self.fastasc
3458 3458 if it is None:
3459 3459 # we need to consume all and try again
3460 3460 for x in self._consumegen():
3461 3461 pass
3462 3462 return self.first()
3463 3463 return next(it(), None)
3464 3464
3465 3465 def __repr__(self):
3466 3466 d = {False: '-', True: '+'}[self._ascending]
3467 3467 return '<%s%s>' % (type(self).__name__, d)
3468 3468
3469 3469 class spanset(abstractsmartset):
3470 3470 """Duck type for baseset class which represents a range of revisions and
3471 3471 can work lazily and without having all the range in memory
3472 3472
3473 3473 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3474 3474 notable points:
3475 3475 - when x < y it will be automatically descending,
3476 3476 - revision filtered with this repoview will be skipped.
3477 3477
3478 3478 """
3479 3479 def __init__(self, repo, start=0, end=None):
3480 3480 """
3481 3481 start: first revision included the set
3482 3482 (default to 0)
3483 3483 end: first revision excluded (last+1)
3484 3484 (default to len(repo)
3485 3485
3486 3486 Spanset will be descending if `end` < `start`.
3487 3487 """
3488 3488 if end is None:
3489 3489 end = len(repo)
3490 3490 self._ascending = start <= end
3491 3491 if not self._ascending:
3492 3492 start, end = end + 1, start +1
3493 3493 self._start = start
3494 3494 self._end = end
3495 3495 self._hiddenrevs = repo.changelog.filteredrevs
3496 3496
3497 3497 def sort(self, reverse=False):
3498 3498 self._ascending = not reverse
3499 3499
3500 3500 def reverse(self):
3501 3501 self._ascending = not self._ascending
3502 3502
3503 3503 def _iterfilter(self, iterrange):
3504 3504 s = self._hiddenrevs
3505 3505 for r in iterrange:
3506 3506 if r not in s:
3507 3507 yield r
3508 3508
3509 3509 def __iter__(self):
3510 3510 if self._ascending:
3511 3511 return self.fastasc()
3512 3512 else:
3513 3513 return self.fastdesc()
3514 3514
3515 3515 def fastasc(self):
3516 3516 iterrange = xrange(self._start, self._end)
3517 3517 if self._hiddenrevs:
3518 3518 return self._iterfilter(iterrange)
3519 3519 return iter(iterrange)
3520 3520
3521 3521 def fastdesc(self):
3522 3522 iterrange = xrange(self._end - 1, self._start - 1, -1)
3523 3523 if self._hiddenrevs:
3524 3524 return self._iterfilter(iterrange)
3525 3525 return iter(iterrange)
3526 3526
3527 3527 def __contains__(self, rev):
3528 3528 hidden = self._hiddenrevs
3529 3529 return ((self._start <= rev < self._end)
3530 3530 and not (hidden and rev in hidden))
3531 3531
3532 3532 def __nonzero__(self):
3533 3533 for r in self:
3534 3534 return True
3535 3535 return False
3536 3536
3537 3537 def __len__(self):
3538 3538 if not self._hiddenrevs:
3539 3539 return abs(self._end - self._start)
3540 3540 else:
3541 3541 count = 0
3542 3542 start = self._start
3543 3543 end = self._end
3544 3544 for rev in self._hiddenrevs:
3545 3545 if (end < rev <= start) or (start <= rev < end):
3546 3546 count += 1
3547 3547 return abs(self._end - self._start) - count
3548 3548
3549 3549 def isascending(self):
3550 3550 return self._ascending
3551 3551
3552 3552 def isdescending(self):
3553 3553 return not self._ascending
3554 3554
3555 3555 def first(self):
3556 3556 if self._ascending:
3557 3557 it = self.fastasc
3558 3558 else:
3559 3559 it = self.fastdesc
3560 3560 for x in it():
3561 3561 return x
3562 3562 return None
3563 3563
3564 3564 def last(self):
3565 3565 if self._ascending:
3566 3566 it = self.fastdesc
3567 3567 else:
3568 3568 it = self.fastasc
3569 3569 for x in it():
3570 3570 return x
3571 3571 return None
3572 3572
3573 3573 def __repr__(self):
3574 3574 d = {False: '-', True: '+'}[self._ascending]
3575 3575 return '<%s%s %d:%d>' % (type(self).__name__, d,
3576 3576 self._start, self._end - 1)
3577 3577
3578 3578 class fullreposet(spanset):
3579 3579 """a set containing all revisions in the repo
3580 3580
3581 3581 This class exists to host special optimization and magic to handle virtual
3582 3582 revisions such as "null".
3583 3583 """
3584 3584
3585 3585 def __init__(self, repo):
3586 3586 super(fullreposet, self).__init__(repo)
3587 3587
3588 3588 def __and__(self, other):
3589 3589 """As self contains the whole repo, all of the other set should also be
3590 3590 in self. Therefore `self & other = other`.
3591 3591
3592 3592 This boldly assumes the other contains valid revs only.
3593 3593 """
3594 3594 # other not a smartset, make is so
3595 3595 if not util.safehasattr(other, 'isascending'):
3596 3596 # filter out hidden revision
3597 3597 # (this boldly assumes all smartset are pure)
3598 3598 #
3599 3599 # `other` was used with "&", let's assume this is a set like
3600 3600 # object.
3601 3601 other = baseset(other - self._hiddenrevs)
3602 3602
3603 3603 # XXX As fullreposet is also used as bootstrap, this is wrong.
3604 3604 #
3605 3605 # With a giveme312() revset returning [3,1,2], this makes
3606 3606 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3607 3607 # We cannot just drop it because other usage still need to sort it:
3608 3608 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3609 3609 #
3610 3610 # There is also some faulty revset implementations that rely on it
3611 3611 # (eg: children as of its state in e8075329c5fb)
3612 3612 #
3613 3613 # When we fix the two points above we can move this into the if clause
3614 3614 other.sort(reverse=self.isdescending())
3615 3615 return other
3616 3616
3617 3617 def prettyformatset(revs):
3618 3618 lines = []
3619 3619 rs = repr(revs)
3620 3620 p = 0
3621 3621 while p < len(rs):
3622 3622 q = rs.find('<', p + 1)
3623 3623 if q < 0:
3624 3624 q = len(rs)
3625 3625 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3626 3626 assert l >= 0
3627 3627 lines.append((l, rs[p:q].rstrip()))
3628 3628 p = q
3629 3629 return '\n'.join(' ' * l + s for l, s in lines)
3630 3630
3631 def loadpredicate(ui, extname, registrarobj):
3632 """Load revset predicates from specified registrarobj
3633 """
3634 for name, func in registrarobj._table.iteritems():
3635 symbols[name] = func
3636 if func._safe:
3637 safesymbols.add(name)
3638
3631 3639 # tell hggettext to extract docstrings from these functions:
3632 3640 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now