##// END OF EJS Templates
revset: use absolute_import
Gregory Szorc -
r25971:e9cd028f default
parent child Browse files
Show More
@@ -1,3688 +1,3699 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 import re
9 import parser, util, error, hbisect, phases
10 import node
8 from __future__ import absolute_import
9
11 10 import heapq
12 import match as matchmod
13 from i18n import _
14 import encoding
15 import obsolete as obsmod
16 import pathutil
17 import repoview
11 import re
12
13 from .i18n import _
14 from . import (
15 encoding,
16 error,
17 hbisect,
18 match as matchmod,
19 node,
20 obsolete as obsmod,
21 parser,
22 pathutil,
23 phases,
24 repoview,
25 util,
26 )
18 27
19 28 def _revancestors(repo, revs, followfirst):
20 29 """Like revlog.ancestors(), but supports followfirst."""
21 30 if followfirst:
22 31 cut = 1
23 32 else:
24 33 cut = None
25 34 cl = repo.changelog
26 35
27 36 def iterate():
28 37 revs.sort(reverse=True)
29 38 irevs = iter(revs)
30 39 h = []
31 40
32 41 inputrev = next(irevs, None)
33 42 if inputrev is not None:
34 43 heapq.heappush(h, -inputrev)
35 44
36 45 seen = set()
37 46 while h:
38 47 current = -heapq.heappop(h)
39 48 if current == inputrev:
40 49 inputrev = next(irevs, None)
41 50 if inputrev is not None:
42 51 heapq.heappush(h, -inputrev)
43 52 if current not in seen:
44 53 seen.add(current)
45 54 yield current
46 55 for parent in cl.parentrevs(current)[:cut]:
47 56 if parent != node.nullrev:
48 57 heapq.heappush(h, -parent)
49 58
50 59 return generatorset(iterate(), iterasc=False)
51 60
52 61 def _revdescendants(repo, revs, followfirst):
53 62 """Like revlog.descendants() but supports followfirst."""
54 63 if followfirst:
55 64 cut = 1
56 65 else:
57 66 cut = None
58 67
59 68 def iterate():
60 69 cl = repo.changelog
61 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
62 71 # smartset (and if it is not, it should.)
63 72 first = min(revs)
64 73 nullrev = node.nullrev
65 74 if first == nullrev:
66 75 # Are there nodes with a null first parent and a non-null
67 76 # second one? Maybe. Do we care? Probably not.
68 77 for i in cl:
69 78 yield i
70 79 else:
71 80 seen = set(revs)
72 81 for i in cl.revs(first + 1):
73 82 for x in cl.parentrevs(i)[:cut]:
74 83 if x != nullrev and x in seen:
75 84 seen.add(i)
76 85 yield i
77 86 break
78 87
79 88 return generatorset(iterate(), iterasc=True)
80 89
81 90 def _revsbetween(repo, roots, heads):
82 91 """Return all paths between roots and heads, inclusive of both endpoint
83 92 sets."""
84 93 if not roots:
85 94 return baseset()
86 95 parentrevs = repo.changelog.parentrevs
87 96 visit = list(heads)
88 97 reachable = set()
89 98 seen = {}
90 99 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
91 100 # (and if it is not, it should.)
92 101 minroot = min(roots)
93 102 roots = set(roots)
94 103 # prefetch all the things! (because python is slow)
95 104 reached = reachable.add
96 105 dovisit = visit.append
97 106 nextvisit = visit.pop
98 107 # open-code the post-order traversal due to the tiny size of
99 108 # sys.getrecursionlimit()
100 109 while visit:
101 110 rev = nextvisit()
102 111 if rev in roots:
103 112 reached(rev)
104 113 parents = parentrevs(rev)
105 114 seen[rev] = parents
106 115 for parent in parents:
107 116 if parent >= minroot and parent not in seen:
108 117 dovisit(parent)
109 118 if not reachable:
110 119 return baseset()
111 120 for rev in sorted(seen):
112 121 for parent in seen[rev]:
113 122 if parent in reachable:
114 123 reached(rev)
115 124 return baseset(sorted(reachable))
116 125
117 126 elements = {
118 127 # token-type: binding-strength, primary, prefix, infix, suffix
119 128 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
120 129 "##": (20, None, None, ("_concat", 20), None),
121 130 "~": (18, None, None, ("ancestor", 18), None),
122 131 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
123 132 "-": (5, None, ("negate", 19), ("minus", 5), None),
124 133 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
125 134 ("dagrangepost", 17)),
126 135 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
127 136 ("dagrangepost", 17)),
128 137 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
129 138 "not": (10, None, ("not", 10), None, None),
130 139 "!": (10, None, ("not", 10), None, None),
131 140 "and": (5, None, None, ("and", 5), None),
132 141 "&": (5, None, None, ("and", 5), None),
133 142 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
134 143 "or": (4, None, None, ("or", 4), None),
135 144 "|": (4, None, None, ("or", 4), None),
136 145 "+": (4, None, None, ("or", 4), None),
137 146 "=": (3, None, None, ("keyvalue", 3), None),
138 147 ",": (2, None, None, ("list", 2), None),
139 148 ")": (0, None, None, None, None),
140 149 "symbol": (0, "symbol", None, None, None),
141 150 "string": (0, "string", None, None, None),
142 151 "end": (0, None, None, None, None),
143 152 }
144 153
145 154 keywords = set(['and', 'or', 'not'])
146 155
147 156 # default set of valid characters for the initial letter of symbols
148 157 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
149 158 if c.isalnum() or c in '._@' or ord(c) > 127)
150 159
151 160 # default set of valid characters for non-initial letters of symbols
152 161 _symletters = set(c for c in [chr(i) for i in xrange(256)]
153 162 if c.isalnum() or c in '-._/@' or ord(c) > 127)
154 163
155 164 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
156 165 '''
157 166 Parse a revset statement into a stream of tokens
158 167
159 168 ``syminitletters`` is the set of valid characters for the initial
160 169 letter of symbols.
161 170
162 171 By default, character ``c`` is recognized as valid for initial
163 172 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
164 173
165 174 ``symletters`` is the set of valid characters for non-initial
166 175 letters of symbols.
167 176
168 177 By default, character ``c`` is recognized as valid for non-initial
169 178 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
170 179
171 180 Check that @ is a valid unquoted token character (issue3686):
172 181 >>> list(tokenize("@::"))
173 182 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
174 183
175 184 '''
176 185 if syminitletters is None:
177 186 syminitletters = _syminitletters
178 187 if symletters is None:
179 188 symletters = _symletters
180 189
181 190 if program and lookup:
182 191 # attempt to parse old-style ranges first to deal with
183 192 # things like old-tag which contain query metacharacters
184 193 parts = program.split(':', 1)
185 194 if all(lookup(sym) for sym in parts if sym):
186 195 if parts[0]:
187 196 yield ('symbol', parts[0], 0)
188 197 if len(parts) > 1:
189 198 s = len(parts[0])
190 199 yield (':', None, s)
191 200 if parts[1]:
192 201 yield ('symbol', parts[1], s + 1)
193 202 yield ('end', None, len(program))
194 203 return
195 204
196 205 pos, l = 0, len(program)
197 206 while pos < l:
198 207 c = program[pos]
199 208 if c.isspace(): # skip inter-token whitespace
200 209 pass
201 210 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
202 211 yield ('::', None, pos)
203 212 pos += 1 # skip ahead
204 213 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
205 214 yield ('..', None, pos)
206 215 pos += 1 # skip ahead
207 216 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
208 217 yield ('##', None, pos)
209 218 pos += 1 # skip ahead
210 219 elif c in "():=,-|&+!~^%": # handle simple operators
211 220 yield (c, None, pos)
212 221 elif (c in '"\'' or c == 'r' and
213 222 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
214 223 if c == 'r':
215 224 pos += 1
216 225 c = program[pos]
217 226 decode = lambda x: x
218 227 else:
219 228 decode = lambda x: x.decode('string-escape')
220 229 pos += 1
221 230 s = pos
222 231 while pos < l: # find closing quote
223 232 d = program[pos]
224 233 if d == '\\': # skip over escaped characters
225 234 pos += 2
226 235 continue
227 236 if d == c:
228 237 yield ('string', decode(program[s:pos]), s)
229 238 break
230 239 pos += 1
231 240 else:
232 241 raise error.ParseError(_("unterminated string"), s)
233 242 # gather up a symbol/keyword
234 243 elif c in syminitletters:
235 244 s = pos
236 245 pos += 1
237 246 while pos < l: # find end of symbol
238 247 d = program[pos]
239 248 if d not in symletters:
240 249 break
241 250 if d == '.' and program[pos - 1] == '.': # special case for ..
242 251 pos -= 1
243 252 break
244 253 pos += 1
245 254 sym = program[s:pos]
246 255 if sym in keywords: # operator keywords
247 256 yield (sym, None, s)
248 257 elif '-' in sym:
249 258 # some jerk gave us foo-bar-baz, try to check if it's a symbol
250 259 if lookup and lookup(sym):
251 260 # looks like a real symbol
252 261 yield ('symbol', sym, s)
253 262 else:
254 263 # looks like an expression
255 264 parts = sym.split('-')
256 265 for p in parts[:-1]:
257 266 if p: # possible consecutive -
258 267 yield ('symbol', p, s)
259 268 s += len(p)
260 269 yield ('-', None, pos)
261 270 s += 1
262 271 if parts[-1]: # possible trailing -
263 272 yield ('symbol', parts[-1], s)
264 273 else:
265 274 yield ('symbol', sym, s)
266 275 pos -= 1
267 276 else:
268 277 raise error.ParseError(_("syntax error in revset '%s'") %
269 278 program, pos)
270 279 pos += 1
271 280 yield ('end', None, pos)
272 281
273 282 def parseerrordetail(inst):
274 283 """Compose error message from specified ParseError object
275 284 """
276 285 if len(inst.args) > 1:
277 286 return _('at %s: %s') % (inst.args[1], inst.args[0])
278 287 else:
279 288 return inst.args[0]
280 289
281 290 # helpers
282 291
283 292 def getstring(x, err):
284 293 if x and (x[0] == 'string' or x[0] == 'symbol'):
285 294 return x[1]
286 295 raise error.ParseError(err)
287 296
288 297 def getlist(x):
289 298 if not x:
290 299 return []
291 300 if x[0] == 'list':
292 301 return getlist(x[1]) + [x[2]]
293 302 return [x]
294 303
295 304 def getargs(x, min, max, err):
296 305 l = getlist(x)
297 306 if len(l) < min or (max >= 0 and len(l) > max):
298 307 raise error.ParseError(err)
299 308 return l
300 309
301 310 def getargsdict(x, funcname, keys):
302 311 return parser.buildargsdict(getlist(x), funcname, keys.split(),
303 312 keyvaluenode='keyvalue', keynode='symbol')
304 313
305 314 def isvalidsymbol(tree):
306 315 """Examine whether specified ``tree`` is valid ``symbol`` or not
307 316 """
308 317 return tree[0] == 'symbol' and len(tree) > 1
309 318
310 319 def getsymbol(tree):
311 320 """Get symbol name from valid ``symbol`` in ``tree``
312 321
313 322 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
314 323 """
315 324 return tree[1]
316 325
317 326 def isvalidfunc(tree):
318 327 """Examine whether specified ``tree`` is valid ``func`` or not
319 328 """
320 329 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
321 330
322 331 def getfuncname(tree):
323 332 """Get function name from valid ``func`` in ``tree``
324 333
325 334 This assumes that ``tree`` is already examined by ``isvalidfunc``.
326 335 """
327 336 return getsymbol(tree[1])
328 337
329 338 def getfuncargs(tree):
330 339 """Get list of function arguments from valid ``func`` in ``tree``
331 340
332 341 This assumes that ``tree`` is already examined by ``isvalidfunc``.
333 342 """
334 343 if len(tree) > 2:
335 344 return getlist(tree[2])
336 345 else:
337 346 return []
338 347
339 348 def getset(repo, subset, x):
340 349 if not x:
341 350 raise error.ParseError(_("missing argument"))
342 351 s = methods[x[0]](repo, subset, *x[1:])
343 352 if util.safehasattr(s, 'isascending'):
344 353 return s
345 354 if (repo.ui.configbool('devel', 'all-warnings')
346 355 or repo.ui.configbool('devel', 'old-revset')):
347 356 # else case should not happen, because all non-func are internal,
348 357 # ignoring for now.
349 358 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
350 359 repo.ui.develwarn('revset "%s" use list instead of smartset, '
351 360 '(upgrade your code)' % x[1][1])
352 361 return baseset(s)
353 362
354 363 def _getrevsource(repo, r):
355 364 extra = repo[r].extra()
356 365 for label in ('source', 'transplant_source', 'rebase_source'):
357 366 if label in extra:
358 367 try:
359 368 return repo[extra[label]].rev()
360 369 except error.RepoLookupError:
361 370 pass
362 371 return None
363 372
364 373 # operator methods
365 374
366 375 def stringset(repo, subset, x):
367 376 x = repo[x].rev()
368 377 if (x in subset
369 378 or x == node.nullrev and isinstance(subset, fullreposet)):
370 379 return baseset([x])
371 380 return baseset()
372 381
373 382 def rangeset(repo, subset, x, y):
374 383 m = getset(repo, fullreposet(repo), x)
375 384 n = getset(repo, fullreposet(repo), y)
376 385
377 386 if not m or not n:
378 387 return baseset()
379 388 m, n = m.first(), n.last()
380 389
381 390 if m == n:
382 391 r = baseset([m])
383 392 elif n == node.wdirrev:
384 393 r = spanset(repo, m, len(repo)) + baseset([n])
385 394 elif m == node.wdirrev:
386 395 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
387 396 elif m < n:
388 397 r = spanset(repo, m, n + 1)
389 398 else:
390 399 r = spanset(repo, m, n - 1)
391 400 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
392 401 # necessary to ensure we preserve the order in subset.
393 402 #
394 403 # This has performance implication, carrying the sorting over when possible
395 404 # would be more efficient.
396 405 return r & subset
397 406
398 407 def dagrange(repo, subset, x, y):
399 408 r = fullreposet(repo)
400 409 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
401 410 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
402 411 # necessary to ensure we preserve the order in subset.
403 412 return xs & subset
404 413
405 414 def andset(repo, subset, x, y):
406 415 return getset(repo, getset(repo, subset, x), y)
407 416
408 417 def orset(repo, subset, *xs):
409 418 assert xs
410 419 if len(xs) == 1:
411 420 return getset(repo, subset, xs[0])
412 421 p = len(xs) // 2
413 422 a = orset(repo, subset, *xs[:p])
414 423 b = orset(repo, subset, *xs[p:])
415 424 return a + b
416 425
417 426 def notset(repo, subset, x):
418 427 return subset - getset(repo, subset, x)
419 428
420 429 def listset(repo, subset, a, b):
421 430 raise error.ParseError(_("can't use a list in this context"))
422 431
423 432 def keyvaluepair(repo, subset, k, v):
424 433 raise error.ParseError(_("can't use a key-value pair in this context"))
425 434
426 435 def func(repo, subset, a, b):
427 436 if a[0] == 'symbol' and a[1] in symbols:
428 437 return symbols[a[1]](repo, subset, b)
429 438
430 439 keep = lambda fn: getattr(fn, '__doc__', None) is not None
431 440
432 441 syms = [s for (s, fn) in symbols.items() if keep(fn)]
433 442 raise error.UnknownIdentifier(a[1], syms)
434 443
435 444 # functions
436 445
437 446 def adds(repo, subset, x):
438 447 """``adds(pattern)``
439 448 Changesets that add a file matching pattern.
440 449
441 450 The pattern without explicit kind like ``glob:`` is expected to be
442 451 relative to the current directory and match against a file or a
443 452 directory.
444 453 """
445 454 # i18n: "adds" is a keyword
446 455 pat = getstring(x, _("adds requires a pattern"))
447 456 return checkstatus(repo, subset, pat, 1)
448 457
449 458 def ancestor(repo, subset, x):
450 459 """``ancestor(*changeset)``
451 460 A greatest common ancestor of the changesets.
452 461
453 462 Accepts 0 or more changesets.
454 463 Will return empty list when passed no args.
455 464 Greatest common ancestor of a single changeset is that changeset.
456 465 """
457 466 # i18n: "ancestor" is a keyword
458 467 l = getlist(x)
459 468 rl = fullreposet(repo)
460 469 anc = None
461 470
462 471 # (getset(repo, rl, i) for i in l) generates a list of lists
463 472 for revs in (getset(repo, rl, i) for i in l):
464 473 for r in revs:
465 474 if anc is None:
466 475 anc = repo[r]
467 476 else:
468 477 anc = anc.ancestor(repo[r])
469 478
470 479 if anc is not None and anc.rev() in subset:
471 480 return baseset([anc.rev()])
472 481 return baseset()
473 482
474 483 def _ancestors(repo, subset, x, followfirst=False):
475 484 heads = getset(repo, fullreposet(repo), x)
476 485 if not heads:
477 486 return baseset()
478 487 s = _revancestors(repo, heads, followfirst)
479 488 return subset & s
480 489
481 490 def ancestors(repo, subset, x):
482 491 """``ancestors(set)``
483 492 Changesets that are ancestors of a changeset in set.
484 493 """
485 494 return _ancestors(repo, subset, x)
486 495
487 496 def _firstancestors(repo, subset, x):
488 497 # ``_firstancestors(set)``
489 498 # Like ``ancestors(set)`` but follows only the first parents.
490 499 return _ancestors(repo, subset, x, followfirst=True)
491 500
492 501 def ancestorspec(repo, subset, x, n):
493 502 """``set~n``
494 503 Changesets that are the Nth ancestor (first parents only) of a changeset
495 504 in set.
496 505 """
497 506 try:
498 507 n = int(n[1])
499 508 except (TypeError, ValueError):
500 509 raise error.ParseError(_("~ expects a number"))
501 510 ps = set()
502 511 cl = repo.changelog
503 512 for r in getset(repo, fullreposet(repo), x):
504 513 for i in range(n):
505 514 r = cl.parentrevs(r)[0]
506 515 ps.add(r)
507 516 return subset & ps
508 517
509 518 def author(repo, subset, x):
510 519 """``author(string)``
511 520 Alias for ``user(string)``.
512 521 """
513 522 # i18n: "author" is a keyword
514 523 n = encoding.lower(getstring(x, _("author requires a string")))
515 524 kind, pattern, matcher = _substringmatcher(n)
516 525 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
517 526
518 527 def bisect(repo, subset, x):
519 528 """``bisect(string)``
520 529 Changesets marked in the specified bisect status:
521 530
522 531 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
523 532 - ``goods``, ``bads`` : csets topologically good/bad
524 533 - ``range`` : csets taking part in the bisection
525 534 - ``pruned`` : csets that are goods, bads or skipped
526 535 - ``untested`` : csets whose fate is yet unknown
527 536 - ``ignored`` : csets ignored due to DAG topology
528 537 - ``current`` : the cset currently being bisected
529 538 """
530 539 # i18n: "bisect" is a keyword
531 540 status = getstring(x, _("bisect requires a string")).lower()
532 541 state = set(hbisect.get(repo, status))
533 542 return subset & state
534 543
535 544 # Backward-compatibility
536 545 # - no help entry so that we do not advertise it any more
537 546 def bisected(repo, subset, x):
538 547 return bisect(repo, subset, x)
539 548
540 549 def bookmark(repo, subset, x):
541 550 """``bookmark([name])``
542 551 The named bookmark or all bookmarks.
543 552
544 553 If `name` starts with `re:`, the remainder of the name is treated as
545 554 a regular expression. To match a bookmark that actually starts with `re:`,
546 555 use the prefix `literal:`.
547 556 """
548 557 # i18n: "bookmark" is a keyword
549 558 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
550 559 if args:
551 560 bm = getstring(args[0],
552 561 # i18n: "bookmark" is a keyword
553 562 _('the argument to bookmark must be a string'))
554 563 kind, pattern, matcher = _stringmatcher(bm)
555 564 bms = set()
556 565 if kind == 'literal':
557 566 bmrev = repo._bookmarks.get(pattern, None)
558 567 if not bmrev:
559 568 raise error.RepoLookupError(_("bookmark '%s' does not exist")
560 569 % bm)
561 570 bms.add(repo[bmrev].rev())
562 571 else:
563 572 matchrevs = set()
564 573 for name, bmrev in repo._bookmarks.iteritems():
565 574 if matcher(name):
566 575 matchrevs.add(bmrev)
567 576 if not matchrevs:
568 577 raise error.RepoLookupError(_("no bookmarks exist"
569 578 " that match '%s'") % pattern)
570 579 for bmrev in matchrevs:
571 580 bms.add(repo[bmrev].rev())
572 581 else:
573 582 bms = set([repo[r].rev()
574 583 for r in repo._bookmarks.values()])
575 584 bms -= set([node.nullrev])
576 585 return subset & bms
577 586
578 587 def branch(repo, subset, x):
579 588 """``branch(string or set)``
580 589 All changesets belonging to the given branch or the branches of the given
581 590 changesets.
582 591
583 592 If `string` starts with `re:`, the remainder of the name is treated as
584 593 a regular expression. To match a branch that actually starts with `re:`,
585 594 use the prefix `literal:`.
586 595 """
587 596 getbi = repo.revbranchcache().branchinfo
588 597
589 598 try:
590 599 b = getstring(x, '')
591 600 except error.ParseError:
592 601 # not a string, but another revspec, e.g. tip()
593 602 pass
594 603 else:
595 604 kind, pattern, matcher = _stringmatcher(b)
596 605 if kind == 'literal':
597 606 # note: falls through to the revspec case if no branch with
598 607 # this name exists
599 608 if pattern in repo.branchmap():
600 609 return subset.filter(lambda r: matcher(getbi(r)[0]))
601 610 else:
602 611 return subset.filter(lambda r: matcher(getbi(r)[0]))
603 612
604 613 s = getset(repo, fullreposet(repo), x)
605 614 b = set()
606 615 for r in s:
607 616 b.add(getbi(r)[0])
608 617 c = s.__contains__
609 618 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
610 619
611 620 def bumped(repo, subset, x):
612 621 """``bumped()``
613 622 Mutable changesets marked as successors of public changesets.
614 623
615 624 Only non-public and non-obsolete changesets can be `bumped`.
616 625 """
617 626 # i18n: "bumped" is a keyword
618 627 getargs(x, 0, 0, _("bumped takes no arguments"))
619 628 bumped = obsmod.getrevs(repo, 'bumped')
620 629 return subset & bumped
621 630
622 631 def bundle(repo, subset, x):
623 632 """``bundle()``
624 633 Changesets in the bundle.
625 634
626 635 Bundle must be specified by the -R option."""
627 636
628 637 try:
629 638 bundlerevs = repo.changelog.bundlerevs
630 639 except AttributeError:
631 640 raise util.Abort(_("no bundle provided - specify with -R"))
632 641 return subset & bundlerevs
633 642
634 643 def checkstatus(repo, subset, pat, field):
635 644 hasset = matchmod.patkind(pat) == 'set'
636 645
637 646 mcache = [None]
638 647 def matches(x):
639 648 c = repo[x]
640 649 if not mcache[0] or hasset:
641 650 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
642 651 m = mcache[0]
643 652 fname = None
644 653 if not m.anypats() and len(m.files()) == 1:
645 654 fname = m.files()[0]
646 655 if fname is not None:
647 656 if fname not in c.files():
648 657 return False
649 658 else:
650 659 for f in c.files():
651 660 if m(f):
652 661 break
653 662 else:
654 663 return False
655 664 files = repo.status(c.p1().node(), c.node())[field]
656 665 if fname is not None:
657 666 if fname in files:
658 667 return True
659 668 else:
660 669 for f in files:
661 670 if m(f):
662 671 return True
663 672
664 673 return subset.filter(matches)
665 674
666 675 def _children(repo, narrow, parentset):
667 676 if not parentset:
668 677 return baseset()
669 678 cs = set()
670 679 pr = repo.changelog.parentrevs
671 680 minrev = parentset.min()
672 681 for r in narrow:
673 682 if r <= minrev:
674 683 continue
675 684 for p in pr(r):
676 685 if p in parentset:
677 686 cs.add(r)
678 687 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
679 688 # This does not break because of other fullreposet misbehavior.
680 689 return baseset(cs)
681 690
682 691 def children(repo, subset, x):
683 692 """``children(set)``
684 693 Child changesets of changesets in set.
685 694 """
686 695 s = getset(repo, fullreposet(repo), x)
687 696 cs = _children(repo, subset, s)
688 697 return subset & cs
689 698
690 699 def closed(repo, subset, x):
691 700 """``closed()``
692 701 Changeset is closed.
693 702 """
694 703 # i18n: "closed" is a keyword
695 704 getargs(x, 0, 0, _("closed takes no arguments"))
696 705 return subset.filter(lambda r: repo[r].closesbranch())
697 706
698 707 def contains(repo, subset, x):
699 708 """``contains(pattern)``
700 709 The revision's manifest contains a file matching pattern (but might not
701 710 modify it). See :hg:`help patterns` for information about file patterns.
702 711
703 712 The pattern without explicit kind like ``glob:`` is expected to be
704 713 relative to the current directory and match against a file exactly
705 714 for efficiency.
706 715 """
707 716 # i18n: "contains" is a keyword
708 717 pat = getstring(x, _("contains requires a pattern"))
709 718
710 719 def matches(x):
711 720 if not matchmod.patkind(pat):
712 721 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
713 722 if pats in repo[x]:
714 723 return True
715 724 else:
716 725 c = repo[x]
717 726 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
718 727 for f in c.manifest():
719 728 if m(f):
720 729 return True
721 730 return False
722 731
723 732 return subset.filter(matches)
724 733
725 734 def converted(repo, subset, x):
726 735 """``converted([id])``
727 736 Changesets converted from the given identifier in the old repository if
728 737 present, or all converted changesets if no identifier is specified.
729 738 """
730 739
731 740 # There is exactly no chance of resolving the revision, so do a simple
732 741 # string compare and hope for the best
733 742
734 743 rev = None
735 744 # i18n: "converted" is a keyword
736 745 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
737 746 if l:
738 747 # i18n: "converted" is a keyword
739 748 rev = getstring(l[0], _('converted requires a revision'))
740 749
741 750 def _matchvalue(r):
742 751 source = repo[r].extra().get('convert_revision', None)
743 752 return source is not None and (rev is None or source.startswith(rev))
744 753
745 754 return subset.filter(lambda r: _matchvalue(r))
746 755
747 756 def date(repo, subset, x):
748 757 """``date(interval)``
749 758 Changesets within the interval, see :hg:`help dates`.
750 759 """
751 760 # i18n: "date" is a keyword
752 761 ds = getstring(x, _("date requires a string"))
753 762 dm = util.matchdate(ds)
754 763 return subset.filter(lambda x: dm(repo[x].date()[0]))
755 764
756 765 def desc(repo, subset, x):
757 766 """``desc(string)``
758 767 Search commit message for string. The match is case-insensitive.
759 768 """
760 769 # i18n: "desc" is a keyword
761 770 ds = encoding.lower(getstring(x, _("desc requires a string")))
762 771
763 772 def matches(x):
764 773 c = repo[x]
765 774 return ds in encoding.lower(c.description())
766 775
767 776 return subset.filter(matches)
768 777
769 778 def _descendants(repo, subset, x, followfirst=False):
770 779 roots = getset(repo, fullreposet(repo), x)
771 780 if not roots:
772 781 return baseset()
773 782 s = _revdescendants(repo, roots, followfirst)
774 783
775 784 # Both sets need to be ascending in order to lazily return the union
776 785 # in the correct order.
777 786 base = subset & roots
778 787 desc = subset & s
779 788 result = base + desc
780 789 if subset.isascending():
781 790 result.sort()
782 791 elif subset.isdescending():
783 792 result.sort(reverse=True)
784 793 else:
785 794 result = subset & result
786 795 return result
787 796
788 797 def descendants(repo, subset, x):
789 798 """``descendants(set)``
790 799 Changesets which are descendants of changesets in set.
791 800 """
792 801 return _descendants(repo, subset, x)
793 802
794 803 def _firstdescendants(repo, subset, x):
795 804 # ``_firstdescendants(set)``
796 805 # Like ``descendants(set)`` but follows only the first parents.
797 806 return _descendants(repo, subset, x, followfirst=True)
798 807
799 808 def destination(repo, subset, x):
800 809 """``destination([set])``
801 810 Changesets that were created by a graft, transplant or rebase operation,
802 811 with the given revisions specified as the source. Omitting the optional set
803 812 is the same as passing all().
804 813 """
805 814 if x is not None:
806 815 sources = getset(repo, fullreposet(repo), x)
807 816 else:
808 817 sources = fullreposet(repo)
809 818
810 819 dests = set()
811 820
812 821 # subset contains all of the possible destinations that can be returned, so
813 822 # iterate over them and see if their source(s) were provided in the arg set.
814 823 # Even if the immediate src of r is not in the arg set, src's source (or
815 824 # further back) may be. Scanning back further than the immediate src allows
816 825 # transitive transplants and rebases to yield the same results as transitive
817 826 # grafts.
818 827 for r in subset:
819 828 src = _getrevsource(repo, r)
820 829 lineage = None
821 830
822 831 while src is not None:
823 832 if lineage is None:
824 833 lineage = list()
825 834
826 835 lineage.append(r)
827 836
828 837 # The visited lineage is a match if the current source is in the arg
829 838 # set. Since every candidate dest is visited by way of iterating
830 839 # subset, any dests further back in the lineage will be tested by a
831 840 # different iteration over subset. Likewise, if the src was already
832 841 # selected, the current lineage can be selected without going back
833 842 # further.
834 843 if src in sources or src in dests:
835 844 dests.update(lineage)
836 845 break
837 846
838 847 r = src
839 848 src = _getrevsource(repo, r)
840 849
841 850 return subset.filter(dests.__contains__)
842 851
843 852 def divergent(repo, subset, x):
844 853 """``divergent()``
845 854 Final successors of changesets with an alternative set of final successors.
846 855 """
847 856 # i18n: "divergent" is a keyword
848 857 getargs(x, 0, 0, _("divergent takes no arguments"))
849 858 divergent = obsmod.getrevs(repo, 'divergent')
850 859 return subset & divergent
851 860
852 861 def extinct(repo, subset, x):
853 862 """``extinct()``
854 863 Obsolete changesets with obsolete descendants only.
855 864 """
856 865 # i18n: "extinct" is a keyword
857 866 getargs(x, 0, 0, _("extinct takes no arguments"))
858 867 extincts = obsmod.getrevs(repo, 'extinct')
859 868 return subset & extincts
860 869
861 870 def extra(repo, subset, x):
862 871 """``extra(label, [value])``
863 872 Changesets with the given label in the extra metadata, with the given
864 873 optional value.
865 874
866 875 If `value` starts with `re:`, the remainder of the value is treated as
867 876 a regular expression. To match a value that actually starts with `re:`,
868 877 use the prefix `literal:`.
869 878 """
870 879 args = getargsdict(x, 'extra', 'label value')
871 880 if 'label' not in args:
872 881 # i18n: "extra" is a keyword
873 882 raise error.ParseError(_('extra takes at least 1 argument'))
874 883 # i18n: "extra" is a keyword
875 884 label = getstring(args['label'], _('first argument to extra must be '
876 885 'a string'))
877 886 value = None
878 887
879 888 if 'value' in args:
880 889 # i18n: "extra" is a keyword
881 890 value = getstring(args['value'], _('second argument to extra must be '
882 891 'a string'))
883 892 kind, value, matcher = _stringmatcher(value)
884 893
885 894 def _matchvalue(r):
886 895 extra = repo[r].extra()
887 896 return label in extra and (value is None or matcher(extra[label]))
888 897
889 898 return subset.filter(lambda r: _matchvalue(r))
890 899
891 900 def filelog(repo, subset, x):
892 901 """``filelog(pattern)``
893 902 Changesets connected to the specified filelog.
894 903
895 904 For performance reasons, visits only revisions mentioned in the file-level
896 905 filelog, rather than filtering through all changesets (much faster, but
897 906 doesn't include deletes or duplicate changes). For a slower, more accurate
898 907 result, use ``file()``.
899 908
900 909 The pattern without explicit kind like ``glob:`` is expected to be
901 910 relative to the current directory and match against a file exactly
902 911 for efficiency.
903 912
904 913 If some linkrev points to revisions filtered by the current repoview, we'll
905 914 work around it to return a non-filtered value.
906 915 """
907 916
908 917 # i18n: "filelog" is a keyword
909 918 pat = getstring(x, _("filelog requires a pattern"))
910 919 s = set()
911 920 cl = repo.changelog
912 921
913 922 if not matchmod.patkind(pat):
914 923 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
915 924 files = [f]
916 925 else:
917 926 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
918 927 files = (f for f in repo[None] if m(f))
919 928
920 929 for f in files:
921 930 backrevref = {} # final value for: filerev -> changerev
922 931 lowestchild = {} # lowest known filerev child of a filerev
923 932 delayed = [] # filerev with filtered linkrev, for post-processing
924 933 lowesthead = None # cache for manifest content of all head revisions
925 934 fl = repo.file(f)
926 935 for fr in list(fl):
927 936 rev = fl.linkrev(fr)
928 937 if rev not in cl:
929 938 # changerev pointed in linkrev is filtered
930 939 # record it for post processing.
931 940 delayed.append((fr, rev))
932 941 continue
933 942 for p in fl.parentrevs(fr):
934 943 if 0 <= p and p not in lowestchild:
935 944 lowestchild[p] = fr
936 945 backrevref[fr] = rev
937 946 s.add(rev)
938 947
939 948 # Post-processing of all filerevs we skipped because they were
940 949 # filtered. If such filerevs have known and unfiltered children, this
941 950 # means they have an unfiltered appearance out there. We'll use linkrev
942 951 # adjustment to find one of these appearances. The lowest known child
943 952 # will be used as a starting point because it is the best upper-bound we
944 953 # have.
945 954 #
946 955 # This approach will fail when an unfiltered but linkrev-shadowed
947 956 # appearance exists in a head changeset without unfiltered filerev
948 957 # children anywhere.
949 958 while delayed:
950 959 # must be a descending iteration. To slowly fill lowest child
951 960 # information that is of potential use by the next item.
952 961 fr, rev = delayed.pop()
953 962 lkr = rev
954 963
955 964 child = lowestchild.get(fr)
956 965
957 966 if child is None:
958 967 # search for existence of this file revision in a head revision.
959 968 # There are three possibilities:
960 969 # - the revision exists in a head and we can find an
961 970 # introduction from there,
962 971 # - the revision does not exist in a head because it has been
963 972 # changed since its introduction: we would have found a child
964 973 # and be in the other 'else' clause,
965 974 # - all versions of the revision are hidden.
966 975 if lowesthead is None:
967 976 lowesthead = {}
968 977 for h in repo.heads():
969 978 fnode = repo[h].manifest().get(f)
970 979 if fnode is not None:
971 980 lowesthead[fl.rev(fnode)] = h
972 981 headrev = lowesthead.get(fr)
973 982 if headrev is None:
974 983 # content is nowhere unfiltered
975 984 continue
976 985 rev = repo[headrev][f].introrev()
977 986 else:
978 987 # the lowest known child is a good upper bound
979 988 childcrev = backrevref[child]
980 989 # XXX this does not guarantee returning the lowest
981 990 # introduction of this revision, but this gives a
982 991 # result which is a good start and will fit in most
983 992 # cases. We probably need to fix the multiple
984 993 # introductions case properly (report each
985 994 # introduction, even for identical file revisions)
986 995 # once and for all at some point anyway.
987 996 for p in repo[childcrev][f].parents():
988 997 if p.filerev() == fr:
989 998 rev = p.rev()
990 999 break
991 1000 if rev == lkr: # no shadowed entry found
992 1001 # XXX This should never happen unless some manifest points
993 1002 # to biggish file revisions (like a revision that uses a
994 1003 # parent that never appears in the manifest ancestors)
995 1004 continue
996 1005
997 1006 # Fill the data for the next iteration.
998 1007 for p in fl.parentrevs(fr):
999 1008 if 0 <= p and p not in lowestchild:
1000 1009 lowestchild[p] = fr
1001 1010 backrevref[fr] = rev
1002 1011 s.add(rev)
1003 1012
1004 1013 return subset & s
1005 1014
1006 1015 def first(repo, subset, x):
1007 1016 """``first(set, [n])``
1008 1017 An alias for limit().
1009 1018 """
1010 1019 return limit(repo, subset, x)
1011 1020
1012 1021 def _follow(repo, subset, x, name, followfirst=False):
1013 1022 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
1014 1023 c = repo['.']
1015 1024 if l:
1016 1025 x = getstring(l[0], _("%s expected a filename") % name)
1017 1026 if x in c:
1018 1027 cx = c[x]
1019 1028 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
1020 1029 # include the revision responsible for the most recent version
1021 1030 s.add(cx.introrev())
1022 1031 else:
1023 1032 return baseset()
1024 1033 else:
1025 1034 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1026 1035
1027 1036 return subset & s
1028 1037
1029 1038 def follow(repo, subset, x):
1030 1039 """``follow([file])``
1031 1040 An alias for ``::.`` (ancestors of the working directory's first parent).
1032 1041 If a filename is specified, the history of the given file is followed,
1033 1042 including copies.
1034 1043 """
1035 1044 return _follow(repo, subset, x, 'follow')
1036 1045
1037 1046 def _followfirst(repo, subset, x):
1038 1047 # ``followfirst([file])``
1039 1048 # Like ``follow([file])`` but follows only the first parent of
1040 1049 # every revision or file revision.
1041 1050 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1042 1051
1043 1052 def getall(repo, subset, x):
1044 1053 """``all()``
1045 1054 All changesets, the same as ``0:tip``.
1046 1055 """
1047 1056 # i18n: "all" is a keyword
1048 1057 getargs(x, 0, 0, _("all takes no arguments"))
1049 1058 return subset & spanset(repo) # drop "null" if any
1050 1059
1051 1060 def grep(repo, subset, x):
1052 1061 """``grep(regex)``
1053 1062 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1054 1063 to ensure special escape characters are handled correctly. Unlike
1055 1064 ``keyword(string)``, the match is case-sensitive.
1056 1065 """
1057 1066 try:
1058 1067 # i18n: "grep" is a keyword
1059 1068 gr = re.compile(getstring(x, _("grep requires a string")))
1060 1069 except re.error as e:
1061 1070 raise error.ParseError(_('invalid match pattern: %s') % e)
1062 1071
1063 1072 def matches(x):
1064 1073 c = repo[x]
1065 1074 for e in c.files() + [c.user(), c.description()]:
1066 1075 if gr.search(e):
1067 1076 return True
1068 1077 return False
1069 1078
1070 1079 return subset.filter(matches)
1071 1080
1072 1081 def _matchfiles(repo, subset, x):
1073 1082 # _matchfiles takes a revset list of prefixed arguments:
1074 1083 #
1075 1084 # [p:foo, i:bar, x:baz]
1076 1085 #
1077 1086 # builds a match object from them and filters subset. Allowed
1078 1087 # prefixes are 'p:' for regular patterns, 'i:' for include
1079 1088 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1080 1089 # a revision identifier, or the empty string to reference the
1081 1090 # working directory, from which the match object is
1082 1091 # initialized. Use 'd:' to set the default matching mode, default
1083 1092 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1084 1093
1085 1094 # i18n: "_matchfiles" is a keyword
1086 1095 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1087 1096 pats, inc, exc = [], [], []
1088 1097 rev, default = None, None
1089 1098 for arg in l:
1090 1099 # i18n: "_matchfiles" is a keyword
1091 1100 s = getstring(arg, _("_matchfiles requires string arguments"))
1092 1101 prefix, value = s[:2], s[2:]
1093 1102 if prefix == 'p:':
1094 1103 pats.append(value)
1095 1104 elif prefix == 'i:':
1096 1105 inc.append(value)
1097 1106 elif prefix == 'x:':
1098 1107 exc.append(value)
1099 1108 elif prefix == 'r:':
1100 1109 if rev is not None:
1101 1110 # i18n: "_matchfiles" is a keyword
1102 1111 raise error.ParseError(_('_matchfiles expected at most one '
1103 1112 'revision'))
1104 1113 if value != '': # empty means working directory; leave rev as None
1105 1114 rev = value
1106 1115 elif prefix == 'd:':
1107 1116 if default is not None:
1108 1117 # i18n: "_matchfiles" is a keyword
1109 1118 raise error.ParseError(_('_matchfiles expected at most one '
1110 1119 'default mode'))
1111 1120 default = value
1112 1121 else:
1113 1122 # i18n: "_matchfiles" is a keyword
1114 1123 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1115 1124 if not default:
1116 1125 default = 'glob'
1117 1126
1118 1127 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1119 1128 exclude=exc, ctx=repo[rev], default=default)
1120 1129
1121 1130 def matches(x):
1122 1131 for f in repo[x].files():
1123 1132 if m(f):
1124 1133 return True
1125 1134 return False
1126 1135
1127 1136 return subset.filter(matches)
1128 1137
1129 1138 def hasfile(repo, subset, x):
1130 1139 """``file(pattern)``
1131 1140 Changesets affecting files matched by pattern.
1132 1141
1133 1142 For a faster but less accurate result, consider using ``filelog()``
1134 1143 instead.
1135 1144
1136 1145 This predicate uses ``glob:`` as the default kind of pattern.
1137 1146 """
1138 1147 # i18n: "file" is a keyword
1139 1148 pat = getstring(x, _("file requires a pattern"))
1140 1149 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1141 1150
1142 1151 def head(repo, subset, x):
1143 1152 """``head()``
1144 1153 Changeset is a named branch head.
1145 1154 """
1146 1155 # i18n: "head" is a keyword
1147 1156 getargs(x, 0, 0, _("head takes no arguments"))
1148 1157 hs = set()
1149 1158 cl = repo.changelog
1150 1159 for b, ls in repo.branchmap().iteritems():
1151 1160 hs.update(cl.rev(h) for h in ls)
1152 1161 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1153 1162 # This does not break because of other fullreposet misbehavior.
1154 1163 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1155 1164 # necessary to ensure we preserve the order in subset.
1156 1165 return baseset(hs) & subset
1157 1166
1158 1167 def heads(repo, subset, x):
1159 1168 """``heads(set)``
1160 1169 Members of set with no children in set.
1161 1170 """
1162 1171 s = getset(repo, subset, x)
1163 1172 ps = parents(repo, subset, x)
1164 1173 return s - ps
1165 1174
1166 1175 def hidden(repo, subset, x):
1167 1176 """``hidden()``
1168 1177 Hidden changesets.
1169 1178 """
1170 1179 # i18n: "hidden" is a keyword
1171 1180 getargs(x, 0, 0, _("hidden takes no arguments"))
1172 1181 hiddenrevs = repoview.filterrevs(repo, 'visible')
1173 1182 return subset & hiddenrevs
1174 1183
1175 1184 def keyword(repo, subset, x):
1176 1185 """``keyword(string)``
1177 1186 Search commit message, user name, and names of changed files for
1178 1187 string. The match is case-insensitive.
1179 1188 """
1180 1189 # i18n: "keyword" is a keyword
1181 1190 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1182 1191
1183 1192 def matches(r):
1184 1193 c = repo[r]
1185 1194 return any(kw in encoding.lower(t)
1186 1195 for t in c.files() + [c.user(), c.description()])
1187 1196
1188 1197 return subset.filter(matches)
1189 1198
1190 1199 def limit(repo, subset, x):
1191 1200 """``limit(set, [n])``
1192 1201 First n members of set, defaulting to 1.
1193 1202 """
1194 1203 # i18n: "limit" is a keyword
1195 1204 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1196 1205 try:
1197 1206 lim = 1
1198 1207 if len(l) == 2:
1199 1208 # i18n: "limit" is a keyword
1200 1209 lim = int(getstring(l[1], _("limit requires a number")))
1201 1210 except (TypeError, ValueError):
1202 1211 # i18n: "limit" is a keyword
1203 1212 raise error.ParseError(_("limit expects a number"))
1204 1213 ss = subset
1205 1214 os = getset(repo, fullreposet(repo), l[0])
1206 1215 result = []
1207 1216 it = iter(os)
1208 1217 for x in xrange(lim):
1209 1218 y = next(it, None)
1210 1219 if y is None:
1211 1220 break
1212 1221 elif y in ss:
1213 1222 result.append(y)
1214 1223 return baseset(result)
1215 1224
1216 1225 def last(repo, subset, x):
1217 1226 """``last(set, [n])``
1218 1227 Last n members of set, defaulting to 1.
1219 1228 """
1220 1229 # i18n: "last" is a keyword
1221 1230 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1222 1231 try:
1223 1232 lim = 1
1224 1233 if len(l) == 2:
1225 1234 # i18n: "last" is a keyword
1226 1235 lim = int(getstring(l[1], _("last requires a number")))
1227 1236 except (TypeError, ValueError):
1228 1237 # i18n: "last" is a keyword
1229 1238 raise error.ParseError(_("last expects a number"))
1230 1239 ss = subset
1231 1240 os = getset(repo, fullreposet(repo), l[0])
1232 1241 os.reverse()
1233 1242 result = []
1234 1243 it = iter(os)
1235 1244 for x in xrange(lim):
1236 1245 y = next(it, None)
1237 1246 if y is None:
1238 1247 break
1239 1248 elif y in ss:
1240 1249 result.append(y)
1241 1250 return baseset(result)
1242 1251
1243 1252 def maxrev(repo, subset, x):
1244 1253 """``max(set)``
1245 1254 Changeset with highest revision number in set.
1246 1255 """
1247 1256 os = getset(repo, fullreposet(repo), x)
1248 1257 if os:
1249 1258 m = os.max()
1250 1259 if m in subset:
1251 1260 return baseset([m])
1252 1261 return baseset()
1253 1262
1254 1263 def merge(repo, subset, x):
1255 1264 """``merge()``
1256 1265 Changeset is a merge changeset.
1257 1266 """
1258 1267 # i18n: "merge" is a keyword
1259 1268 getargs(x, 0, 0, _("merge takes no arguments"))
1260 1269 cl = repo.changelog
1261 1270 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1262 1271
1263 1272 def branchpoint(repo, subset, x):
1264 1273 """``branchpoint()``
1265 1274 Changesets with more than one child.
1266 1275 """
1267 1276 # i18n: "branchpoint" is a keyword
1268 1277 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1269 1278 cl = repo.changelog
1270 1279 if not subset:
1271 1280 return baseset()
1272 1281 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1273 1282 # (and if it is not, it should.)
1274 1283 baserev = min(subset)
1275 1284 parentscount = [0]*(len(repo) - baserev)
1276 1285 for r in cl.revs(start=baserev + 1):
1277 1286 for p in cl.parentrevs(r):
1278 1287 if p >= baserev:
1279 1288 parentscount[p - baserev] += 1
1280 1289 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1281 1290
1282 1291 def minrev(repo, subset, x):
1283 1292 """``min(set)``
1284 1293 Changeset with lowest revision number in set.
1285 1294 """
1286 1295 os = getset(repo, fullreposet(repo), x)
1287 1296 if os:
1288 1297 m = os.min()
1289 1298 if m in subset:
1290 1299 return baseset([m])
1291 1300 return baseset()
1292 1301
1293 1302 def modifies(repo, subset, x):
1294 1303 """``modifies(pattern)``
1295 1304 Changesets modifying files matched by pattern.
1296 1305
1297 1306 The pattern without explicit kind like ``glob:`` is expected to be
1298 1307 relative to the current directory and match against a file or a
1299 1308 directory.
1300 1309 """
1301 1310 # i18n: "modifies" is a keyword
1302 1311 pat = getstring(x, _("modifies requires a pattern"))
1303 1312 return checkstatus(repo, subset, pat, 0)
1304 1313
1305 1314 def named(repo, subset, x):
1306 1315 """``named(namespace)``
1307 1316 The changesets in a given namespace.
1308 1317
1309 1318 If `namespace` starts with `re:`, the remainder of the string is treated as
1310 1319 a regular expression. To match a namespace that actually starts with `re:`,
1311 1320 use the prefix `literal:`.
1312 1321 """
1313 1322 # i18n: "named" is a keyword
1314 1323 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1315 1324
1316 1325 ns = getstring(args[0],
1317 1326 # i18n: "named" is a keyword
1318 1327 _('the argument to named must be a string'))
1319 1328 kind, pattern, matcher = _stringmatcher(ns)
1320 1329 namespaces = set()
1321 1330 if kind == 'literal':
1322 1331 if pattern not in repo.names:
1323 1332 raise error.RepoLookupError(_("namespace '%s' does not exist")
1324 1333 % ns)
1325 1334 namespaces.add(repo.names[pattern])
1326 1335 else:
1327 1336 for name, ns in repo.names.iteritems():
1328 1337 if matcher(name):
1329 1338 namespaces.add(ns)
1330 1339 if not namespaces:
1331 1340 raise error.RepoLookupError(_("no namespace exists"
1332 1341 " that match '%s'") % pattern)
1333 1342
1334 1343 names = set()
1335 1344 for ns in namespaces:
1336 1345 for name in ns.listnames(repo):
1337 1346 if name not in ns.deprecated:
1338 1347 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1339 1348
1340 1349 names -= set([node.nullrev])
1341 1350 return subset & names
1342 1351
1343 1352 def node_(repo, subset, x):
1344 1353 """``id(string)``
1345 1354 Revision non-ambiguously specified by the given hex string prefix.
1346 1355 """
1347 1356 # i18n: "id" is a keyword
1348 1357 l = getargs(x, 1, 1, _("id requires one argument"))
1349 1358 # i18n: "id" is a keyword
1350 1359 n = getstring(l[0], _("id requires a string"))
1351 1360 if len(n) == 40:
1352 1361 try:
1353 1362 rn = repo.changelog.rev(node.bin(n))
1354 1363 except (LookupError, TypeError):
1355 1364 rn = None
1356 1365 else:
1357 1366 rn = None
1358 1367 pm = repo.changelog._partialmatch(n)
1359 1368 if pm is not None:
1360 1369 rn = repo.changelog.rev(pm)
1361 1370
1362 1371 if rn is None:
1363 1372 return baseset()
1364 1373 result = baseset([rn])
1365 1374 return result & subset
1366 1375
1367 1376 def obsolete(repo, subset, x):
1368 1377 """``obsolete()``
1369 1378 Mutable changeset with a newer version."""
1370 1379 # i18n: "obsolete" is a keyword
1371 1380 getargs(x, 0, 0, _("obsolete takes no arguments"))
1372 1381 obsoletes = obsmod.getrevs(repo, 'obsolete')
1373 1382 return subset & obsoletes
1374 1383
1375 1384 def only(repo, subset, x):
1376 1385 """``only(set, [set])``
1377 1386 Changesets that are ancestors of the first set that are not ancestors
1378 1387 of any other head in the repo. If a second set is specified, the result
1379 1388 is ancestors of the first set that are not ancestors of the second set
1380 1389 (i.e. ::<set1> - ::<set2>).
1381 1390 """
1382 1391 cl = repo.changelog
1383 1392 # i18n: "only" is a keyword
1384 1393 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1385 1394 include = getset(repo, fullreposet(repo), args[0])
1386 1395 if len(args) == 1:
1387 1396 if not include:
1388 1397 return baseset()
1389 1398
1390 1399 descendants = set(_revdescendants(repo, include, False))
1391 1400 exclude = [rev for rev in cl.headrevs()
1392 1401 if not rev in descendants and not rev in include]
1393 1402 else:
1394 1403 exclude = getset(repo, fullreposet(repo), args[1])
1395 1404
1396 1405 results = set(cl.findmissingrevs(common=exclude, heads=include))
1397 1406 # XXX we should turn this into a baseset instead of a set, smartset may do
1398 1407 # some optimisations from the fact this is a baseset.
1399 1408 return subset & results
1400 1409
1401 1410 def origin(repo, subset, x):
1402 1411 """``origin([set])``
1403 1412 Changesets that were specified as a source for the grafts, transplants or
1404 1413 rebases that created the given revisions. Omitting the optional set is the
1405 1414 same as passing all(). If a changeset created by these operations is itself
1406 1415 specified as a source for one of these operations, only the source changeset
1407 1416 for the first operation is selected.
1408 1417 """
1409 1418 if x is not None:
1410 1419 dests = getset(repo, fullreposet(repo), x)
1411 1420 else:
1412 1421 dests = fullreposet(repo)
1413 1422
1414 1423 def _firstsrc(rev):
1415 1424 src = _getrevsource(repo, rev)
1416 1425 if src is None:
1417 1426 return None
1418 1427
1419 1428 while True:
1420 1429 prev = _getrevsource(repo, src)
1421 1430
1422 1431 if prev is None:
1423 1432 return src
1424 1433 src = prev
1425 1434
1426 1435 o = set([_firstsrc(r) for r in dests])
1427 1436 o -= set([None])
1428 1437 # XXX we should turn this into a baseset instead of a set, smartset may do
1429 1438 # some optimisations from the fact this is a baseset.
1430 1439 return subset & o
1431 1440
1432 1441 def outgoing(repo, subset, x):
1433 1442 """``outgoing([path])``
1434 1443 Changesets not found in the specified destination repository, or the
1435 1444 default push location.
1436 1445 """
1437 1446 # Avoid cycles.
1438 import discovery
1439 import hg
1447 from . import (
1448 discovery,
1449 hg,
1450 )
1440 1451 # i18n: "outgoing" is a keyword
1441 1452 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1442 1453 # i18n: "outgoing" is a keyword
1443 1454 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1444 1455 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1445 1456 dest, branches = hg.parseurl(dest)
1446 1457 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1447 1458 if revs:
1448 1459 revs = [repo.lookup(rev) for rev in revs]
1449 1460 other = hg.peer(repo, {}, dest)
1450 1461 repo.ui.pushbuffer()
1451 1462 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1452 1463 repo.ui.popbuffer()
1453 1464 cl = repo.changelog
1454 1465 o = set([cl.rev(r) for r in outgoing.missing])
1455 1466 return subset & o
1456 1467
1457 1468 def p1(repo, subset, x):
1458 1469 """``p1([set])``
1459 1470 First parent of changesets in set, or the working directory.
1460 1471 """
1461 1472 if x is None:
1462 1473 p = repo[x].p1().rev()
1463 1474 if p >= 0:
1464 1475 return subset & baseset([p])
1465 1476 return baseset()
1466 1477
1467 1478 ps = set()
1468 1479 cl = repo.changelog
1469 1480 for r in getset(repo, fullreposet(repo), x):
1470 1481 ps.add(cl.parentrevs(r)[0])
1471 1482 ps -= set([node.nullrev])
1472 1483 # XXX we should turn this into a baseset instead of a set, smartset may do
1473 1484 # some optimisations from the fact this is a baseset.
1474 1485 return subset & ps
1475 1486
1476 1487 def p2(repo, subset, x):
1477 1488 """``p2([set])``
1478 1489 Second parent of changesets in set, or the working directory.
1479 1490 """
1480 1491 if x is None:
1481 1492 ps = repo[x].parents()
1482 1493 try:
1483 1494 p = ps[1].rev()
1484 1495 if p >= 0:
1485 1496 return subset & baseset([p])
1486 1497 return baseset()
1487 1498 except IndexError:
1488 1499 return baseset()
1489 1500
1490 1501 ps = set()
1491 1502 cl = repo.changelog
1492 1503 for r in getset(repo, fullreposet(repo), x):
1493 1504 ps.add(cl.parentrevs(r)[1])
1494 1505 ps -= set([node.nullrev])
1495 1506 # XXX we should turn this into a baseset instead of a set, smartset may do
1496 1507 # some optimisations from the fact this is a baseset.
1497 1508 return subset & ps
1498 1509
1499 1510 def parents(repo, subset, x):
1500 1511 """``parents([set])``
1501 1512 The set of all parents for all changesets in set, or the working directory.
1502 1513 """
1503 1514 if x is None:
1504 1515 ps = set(p.rev() for p in repo[x].parents())
1505 1516 else:
1506 1517 ps = set()
1507 1518 cl = repo.changelog
1508 1519 up = ps.update
1509 1520 parentrevs = cl.parentrevs
1510 1521 for r in getset(repo, fullreposet(repo), x):
1511 1522 if r == node.wdirrev:
1512 1523 up(p.rev() for p in repo[r].parents())
1513 1524 else:
1514 1525 up(parentrevs(r))
1515 1526 ps -= set([node.nullrev])
1516 1527 return subset & ps
1517 1528
1518 1529 def _phase(repo, subset, target):
1519 1530 """helper to select all rev in phase <target>"""
1520 1531 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1521 1532 if repo._phasecache._phasesets:
1522 1533 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1523 1534 s = baseset(s)
1524 1535 s.sort() # set are non ordered, so we enforce ascending
1525 1536 return subset & s
1526 1537 else:
1527 1538 phase = repo._phasecache.phase
1528 1539 condition = lambda r: phase(repo, r) == target
1529 1540 return subset.filter(condition, cache=False)
1530 1541
1531 1542 def draft(repo, subset, x):
1532 1543 """``draft()``
1533 1544 Changeset in draft phase."""
1534 1545 # i18n: "draft" is a keyword
1535 1546 getargs(x, 0, 0, _("draft takes no arguments"))
1536 1547 target = phases.draft
1537 1548 return _phase(repo, subset, target)
1538 1549
1539 1550 def secret(repo, subset, x):
1540 1551 """``secret()``
1541 1552 Changeset in secret phase."""
1542 1553 # i18n: "secret" is a keyword
1543 1554 getargs(x, 0, 0, _("secret takes no arguments"))
1544 1555 target = phases.secret
1545 1556 return _phase(repo, subset, target)
1546 1557
1547 1558 def parentspec(repo, subset, x, n):
1548 1559 """``set^0``
1549 1560 The set.
1550 1561 ``set^1`` (or ``set^``), ``set^2``
1551 1562 First or second parent, respectively, of all changesets in set.
1552 1563 """
1553 1564 try:
1554 1565 n = int(n[1])
1555 1566 if n not in (0, 1, 2):
1556 1567 raise ValueError
1557 1568 except (TypeError, ValueError):
1558 1569 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1559 1570 ps = set()
1560 1571 cl = repo.changelog
1561 1572 for r in getset(repo, fullreposet(repo), x):
1562 1573 if n == 0:
1563 1574 ps.add(r)
1564 1575 elif n == 1:
1565 1576 ps.add(cl.parentrevs(r)[0])
1566 1577 elif n == 2:
1567 1578 parents = cl.parentrevs(r)
1568 1579 if len(parents) > 1:
1569 1580 ps.add(parents[1])
1570 1581 return subset & ps
1571 1582
1572 1583 def present(repo, subset, x):
1573 1584 """``present(set)``
1574 1585 An empty set, if any revision in set isn't found; otherwise,
1575 1586 all revisions in set.
1576 1587
1577 1588 If any of specified revisions is not present in the local repository,
1578 1589 the query is normally aborted. But this predicate allows the query
1579 1590 to continue even in such cases.
1580 1591 """
1581 1592 try:
1582 1593 return getset(repo, subset, x)
1583 1594 except error.RepoLookupError:
1584 1595 return baseset()
1585 1596
1586 1597 # for internal use
1587 1598 def _notpublic(repo, subset, x):
1588 1599 getargs(x, 0, 0, "_notpublic takes no arguments")
1589 1600 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1590 1601 if repo._phasecache._phasesets:
1591 1602 s = set()
1592 1603 for u in repo._phasecache._phasesets[1:]:
1593 1604 s.update(u)
1594 1605 s = baseset(s - repo.changelog.filteredrevs)
1595 1606 s.sort()
1596 1607 return subset & s
1597 1608 else:
1598 1609 phase = repo._phasecache.phase
1599 1610 target = phases.public
1600 1611 condition = lambda r: phase(repo, r) != target
1601 1612 return subset.filter(condition, cache=False)
1602 1613
1603 1614 def public(repo, subset, x):
1604 1615 """``public()``
1605 1616 Changeset in public phase."""
1606 1617 # i18n: "public" is a keyword
1607 1618 getargs(x, 0, 0, _("public takes no arguments"))
1608 1619 phase = repo._phasecache.phase
1609 1620 target = phases.public
1610 1621 condition = lambda r: phase(repo, r) == target
1611 1622 return subset.filter(condition, cache=False)
1612 1623
1613 1624 def remote(repo, subset, x):
1614 1625 """``remote([id [,path]])``
1615 1626 Local revision that corresponds to the given identifier in a
1616 1627 remote repository, if present. Here, the '.' identifier is a
1617 1628 synonym for the current local branch.
1618 1629 """
1619 1630
1620 import hg # avoid start-up nasties
1631 from . import hg # avoid start-up nasties
1621 1632 # i18n: "remote" is a keyword
1622 1633 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1623 1634
1624 1635 q = '.'
1625 1636 if len(l) > 0:
1626 1637 # i18n: "remote" is a keyword
1627 1638 q = getstring(l[0], _("remote requires a string id"))
1628 1639 if q == '.':
1629 1640 q = repo['.'].branch()
1630 1641
1631 1642 dest = ''
1632 1643 if len(l) > 1:
1633 1644 # i18n: "remote" is a keyword
1634 1645 dest = getstring(l[1], _("remote requires a repository path"))
1635 1646 dest = repo.ui.expandpath(dest or 'default')
1636 1647 dest, branches = hg.parseurl(dest)
1637 1648 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1638 1649 if revs:
1639 1650 revs = [repo.lookup(rev) for rev in revs]
1640 1651 other = hg.peer(repo, {}, dest)
1641 1652 n = other.lookup(q)
1642 1653 if n in repo:
1643 1654 r = repo[n].rev()
1644 1655 if r in subset:
1645 1656 return baseset([r])
1646 1657 return baseset()
1647 1658
1648 1659 def removes(repo, subset, x):
1649 1660 """``removes(pattern)``
1650 1661 Changesets which remove files matching pattern.
1651 1662
1652 1663 The pattern without explicit kind like ``glob:`` is expected to be
1653 1664 relative to the current directory and match against a file or a
1654 1665 directory.
1655 1666 """
1656 1667 # i18n: "removes" is a keyword
1657 1668 pat = getstring(x, _("removes requires a pattern"))
1658 1669 return checkstatus(repo, subset, pat, 2)
1659 1670
1660 1671 def rev(repo, subset, x):
1661 1672 """``rev(number)``
1662 1673 Revision with the given numeric identifier.
1663 1674 """
1664 1675 # i18n: "rev" is a keyword
1665 1676 l = getargs(x, 1, 1, _("rev requires one argument"))
1666 1677 try:
1667 1678 # i18n: "rev" is a keyword
1668 1679 l = int(getstring(l[0], _("rev requires a number")))
1669 1680 except (TypeError, ValueError):
1670 1681 # i18n: "rev" is a keyword
1671 1682 raise error.ParseError(_("rev expects a number"))
1672 1683 if l not in repo.changelog and l != node.nullrev:
1673 1684 return baseset()
1674 1685 return subset & baseset([l])
1675 1686
1676 1687 def matching(repo, subset, x):
1677 1688 """``matching(revision [, field])``
1678 1689 Changesets in which a given set of fields match the set of fields in the
1679 1690 selected revision or set.
1680 1691
1681 1692 To match more than one field pass the list of fields to match separated
1682 1693 by spaces (e.g. ``author description``).
1683 1694
1684 1695 Valid fields are most regular revision fields and some special fields.
1685 1696
1686 1697 Regular revision fields are ``description``, ``author``, ``branch``,
1687 1698 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1688 1699 and ``diff``.
1689 1700 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1690 1701 contents of the revision. Two revisions matching their ``diff`` will
1691 1702 also match their ``files``.
1692 1703
1693 1704 Special fields are ``summary`` and ``metadata``:
1694 1705 ``summary`` matches the first line of the description.
1695 1706 ``metadata`` is equivalent to matching ``description user date``
1696 1707 (i.e. it matches the main metadata fields).
1697 1708
1698 1709 ``metadata`` is the default field which is used when no fields are
1699 1710 specified. You can match more than one field at a time.
1700 1711 """
1701 1712 # i18n: "matching" is a keyword
1702 1713 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1703 1714
1704 1715 revs = getset(repo, fullreposet(repo), l[0])
1705 1716
1706 1717 fieldlist = ['metadata']
1707 1718 if len(l) > 1:
1708 1719 fieldlist = getstring(l[1],
1709 1720 # i18n: "matching" is a keyword
1710 1721 _("matching requires a string "
1711 1722 "as its second argument")).split()
1712 1723
1713 1724 # Make sure that there are no repeated fields,
1714 1725 # expand the 'special' 'metadata' field type
1715 1726 # and check the 'files' whenever we check the 'diff'
1716 1727 fields = []
1717 1728 for field in fieldlist:
1718 1729 if field == 'metadata':
1719 1730 fields += ['user', 'description', 'date']
1720 1731 elif field == 'diff':
1721 1732 # a revision matching the diff must also match the files
1722 1733 # since matching the diff is very costly, make sure to
1723 1734 # also match the files first
1724 1735 fields += ['files', 'diff']
1725 1736 else:
1726 1737 if field == 'author':
1727 1738 field = 'user'
1728 1739 fields.append(field)
1729 1740 fields = set(fields)
1730 1741 if 'summary' in fields and 'description' in fields:
1731 1742 # If a revision matches its description it also matches its summary
1732 1743 fields.discard('summary')
1733 1744
1734 1745 # We may want to match more than one field
1735 1746 # Not all fields take the same amount of time to be matched
1736 1747 # Sort the selected fields in order of increasing matching cost
1737 1748 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1738 1749 'files', 'description', 'substate', 'diff']
1739 1750 def fieldkeyfunc(f):
1740 1751 try:
1741 1752 return fieldorder.index(f)
1742 1753 except ValueError:
1743 1754 # assume an unknown field is very costly
1744 1755 return len(fieldorder)
1745 1756 fields = list(fields)
1746 1757 fields.sort(key=fieldkeyfunc)
1747 1758
1748 1759 # Each field will be matched with its own "getfield" function
1749 1760 # which will be added to the getfieldfuncs array of functions
1750 1761 getfieldfuncs = []
1751 1762 _funcs = {
1752 1763 'user': lambda r: repo[r].user(),
1753 1764 'branch': lambda r: repo[r].branch(),
1754 1765 'date': lambda r: repo[r].date(),
1755 1766 'description': lambda r: repo[r].description(),
1756 1767 'files': lambda r: repo[r].files(),
1757 1768 'parents': lambda r: repo[r].parents(),
1758 1769 'phase': lambda r: repo[r].phase(),
1759 1770 'substate': lambda r: repo[r].substate,
1760 1771 'summary': lambda r: repo[r].description().splitlines()[0],
1761 1772 'diff': lambda r: list(repo[r].diff(git=True),)
1762 1773 }
1763 1774 for info in fields:
1764 1775 getfield = _funcs.get(info, None)
1765 1776 if getfield is None:
1766 1777 raise error.ParseError(
1767 1778 # i18n: "matching" is a keyword
1768 1779 _("unexpected field name passed to matching: %s") % info)
1769 1780 getfieldfuncs.append(getfield)
1770 1781 # convert the getfield array of functions into a "getinfo" function
1771 1782 # which returns an array of field values (or a single value if there
1772 1783 # is only one field to match)
1773 1784 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1774 1785
1775 1786 def matches(x):
1776 1787 for rev in revs:
1777 1788 target = getinfo(rev)
1778 1789 match = True
1779 1790 for n, f in enumerate(getfieldfuncs):
1780 1791 if target[n] != f(x):
1781 1792 match = False
1782 1793 if match:
1783 1794 return True
1784 1795 return False
1785 1796
1786 1797 return subset.filter(matches)
1787 1798
1788 1799 def reverse(repo, subset, x):
1789 1800 """``reverse(set)``
1790 1801 Reverse order of set.
1791 1802 """
1792 1803 l = getset(repo, subset, x)
1793 1804 l.reverse()
1794 1805 return l
1795 1806
1796 1807 def roots(repo, subset, x):
1797 1808 """``roots(set)``
1798 1809 Changesets in set with no parent changeset in set.
1799 1810 """
1800 1811 s = getset(repo, fullreposet(repo), x)
1801 1812 parents = repo.changelog.parentrevs
1802 1813 def filter(r):
1803 1814 for p in parents(r):
1804 1815 if 0 <= p and p in s:
1805 1816 return False
1806 1817 return True
1807 1818 return subset & s.filter(filter)
1808 1819
1809 1820 def sort(repo, subset, x):
1810 1821 """``sort(set[, [-]key...])``
1811 1822 Sort set by keys. The default sort order is ascending, specify a key
1812 1823 as ``-key`` to sort in descending order.
1813 1824
1814 1825 The keys can be:
1815 1826
1816 1827 - ``rev`` for the revision number,
1817 1828 - ``branch`` for the branch name,
1818 1829 - ``desc`` for the commit message (description),
1819 1830 - ``user`` for user name (``author`` can be used as an alias),
1820 1831 - ``date`` for the commit date
1821 1832 """
1822 1833 # i18n: "sort" is a keyword
1823 1834 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1824 1835 keys = "rev"
1825 1836 if len(l) == 2:
1826 1837 # i18n: "sort" is a keyword
1827 1838 keys = getstring(l[1], _("sort spec must be a string"))
1828 1839
1829 1840 s = l[0]
1830 1841 keys = keys.split()
1831 1842 l = []
1832 1843 def invert(s):
1833 1844 return "".join(chr(255 - ord(c)) for c in s)
1834 1845 revs = getset(repo, subset, s)
1835 1846 if keys == ["rev"]:
1836 1847 revs.sort()
1837 1848 return revs
1838 1849 elif keys == ["-rev"]:
1839 1850 revs.sort(reverse=True)
1840 1851 return revs
1841 1852 for r in revs:
1842 1853 c = repo[r]
1843 1854 e = []
1844 1855 for k in keys:
1845 1856 if k == 'rev':
1846 1857 e.append(r)
1847 1858 elif k == '-rev':
1848 1859 e.append(-r)
1849 1860 elif k == 'branch':
1850 1861 e.append(c.branch())
1851 1862 elif k == '-branch':
1852 1863 e.append(invert(c.branch()))
1853 1864 elif k == 'desc':
1854 1865 e.append(c.description())
1855 1866 elif k == '-desc':
1856 1867 e.append(invert(c.description()))
1857 1868 elif k in 'user author':
1858 1869 e.append(c.user())
1859 1870 elif k in '-user -author':
1860 1871 e.append(invert(c.user()))
1861 1872 elif k == 'date':
1862 1873 e.append(c.date()[0])
1863 1874 elif k == '-date':
1864 1875 e.append(-c.date()[0])
1865 1876 else:
1866 1877 raise error.ParseError(_("unknown sort key %r") % k)
1867 1878 e.append(r)
1868 1879 l.append(e)
1869 1880 l.sort()
1870 1881 return baseset([e[-1] for e in l])
1871 1882
1872 1883 def subrepo(repo, subset, x):
1873 1884 """``subrepo([pattern])``
1874 1885 Changesets that add, modify or remove the given subrepo. If no subrepo
1875 1886 pattern is named, any subrepo changes are returned.
1876 1887 """
1877 1888 # i18n: "subrepo" is a keyword
1878 1889 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1879 1890 if len(args) != 0:
1880 1891 pat = getstring(args[0], _("subrepo requires a pattern"))
1881 1892
1882 1893 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1883 1894
1884 1895 def submatches(names):
1885 1896 k, p, m = _stringmatcher(pat)
1886 1897 for name in names:
1887 1898 if m(name):
1888 1899 yield name
1889 1900
1890 1901 def matches(x):
1891 1902 c = repo[x]
1892 1903 s = repo.status(c.p1().node(), c.node(), match=m)
1893 1904
1894 1905 if len(args) == 0:
1895 1906 return s.added or s.modified or s.removed
1896 1907
1897 1908 if s.added:
1898 1909 return any(submatches(c.substate.keys()))
1899 1910
1900 1911 if s.modified:
1901 1912 subs = set(c.p1().substate.keys())
1902 1913 subs.update(c.substate.keys())
1903 1914
1904 1915 for path in submatches(subs):
1905 1916 if c.p1().substate.get(path) != c.substate.get(path):
1906 1917 return True
1907 1918
1908 1919 if s.removed:
1909 1920 return any(submatches(c.p1().substate.keys()))
1910 1921
1911 1922 return False
1912 1923
1913 1924 return subset.filter(matches)
1914 1925
1915 1926 def _stringmatcher(pattern):
1916 1927 """
1917 1928 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1918 1929 returns the matcher name, pattern, and matcher function.
1919 1930 missing or unknown prefixes are treated as literal matches.
1920 1931
1921 1932 helper for tests:
1922 1933 >>> def test(pattern, *tests):
1923 1934 ... kind, pattern, matcher = _stringmatcher(pattern)
1924 1935 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1925 1936
1926 1937 exact matching (no prefix):
1927 1938 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1928 1939 ('literal', 'abcdefg', [False, False, True])
1929 1940
1930 1941 regex matching ('re:' prefix)
1931 1942 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1932 1943 ('re', 'a.+b', [False, False, True])
1933 1944
1934 1945 force exact matches ('literal:' prefix)
1935 1946 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1936 1947 ('literal', 're:foobar', [False, True])
1937 1948
1938 1949 unknown prefixes are ignored and treated as literals
1939 1950 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1940 1951 ('literal', 'foo:bar', [False, False, True])
1941 1952 """
1942 1953 if pattern.startswith('re:'):
1943 1954 pattern = pattern[3:]
1944 1955 try:
1945 1956 regex = re.compile(pattern)
1946 1957 except re.error as e:
1947 1958 raise error.ParseError(_('invalid regular expression: %s')
1948 1959 % e)
1949 1960 return 're', pattern, regex.search
1950 1961 elif pattern.startswith('literal:'):
1951 1962 pattern = pattern[8:]
1952 1963 return 'literal', pattern, pattern.__eq__
1953 1964
1954 1965 def _substringmatcher(pattern):
1955 1966 kind, pattern, matcher = _stringmatcher(pattern)
1956 1967 if kind == 'literal':
1957 1968 matcher = lambda s: pattern in s
1958 1969 return kind, pattern, matcher
1959 1970
1960 1971 def tag(repo, subset, x):
1961 1972 """``tag([name])``
1962 1973 The specified tag by name, or all tagged revisions if no name is given.
1963 1974
1964 1975 If `name` starts with `re:`, the remainder of the name is treated as
1965 1976 a regular expression. To match a tag that actually starts with `re:`,
1966 1977 use the prefix `literal:`.
1967 1978 """
1968 1979 # i18n: "tag" is a keyword
1969 1980 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1970 1981 cl = repo.changelog
1971 1982 if args:
1972 1983 pattern = getstring(args[0],
1973 1984 # i18n: "tag" is a keyword
1974 1985 _('the argument to tag must be a string'))
1975 1986 kind, pattern, matcher = _stringmatcher(pattern)
1976 1987 if kind == 'literal':
1977 1988 # avoid resolving all tags
1978 1989 tn = repo._tagscache.tags.get(pattern, None)
1979 1990 if tn is None:
1980 1991 raise error.RepoLookupError(_("tag '%s' does not exist")
1981 1992 % pattern)
1982 1993 s = set([repo[tn].rev()])
1983 1994 else:
1984 1995 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1985 1996 else:
1986 1997 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1987 1998 return subset & s
1988 1999
1989 2000 def tagged(repo, subset, x):
1990 2001 return tag(repo, subset, x)
1991 2002
1992 2003 def unstable(repo, subset, x):
1993 2004 """``unstable()``
1994 2005 Non-obsolete changesets with obsolete ancestors.
1995 2006 """
1996 2007 # i18n: "unstable" is a keyword
1997 2008 getargs(x, 0, 0, _("unstable takes no arguments"))
1998 2009 unstables = obsmod.getrevs(repo, 'unstable')
1999 2010 return subset & unstables
2000 2011
2001 2012
2002 2013 def user(repo, subset, x):
2003 2014 """``user(string)``
2004 2015 User name contains string. The match is case-insensitive.
2005 2016
2006 2017 If `string` starts with `re:`, the remainder of the string is treated as
2007 2018 a regular expression. To match a user that actually contains `re:`, use
2008 2019 the prefix `literal:`.
2009 2020 """
2010 2021 return author(repo, subset, x)
2011 2022
2012 2023 # experimental
2013 2024 def wdir(repo, subset, x):
2014 2025 # i18n: "wdir" is a keyword
2015 2026 getargs(x, 0, 0, _("wdir takes no arguments"))
2016 2027 if node.wdirrev in subset or isinstance(subset, fullreposet):
2017 2028 return baseset([node.wdirrev])
2018 2029 return baseset()
2019 2030
2020 2031 # for internal use
2021 2032 def _list(repo, subset, x):
2022 2033 s = getstring(x, "internal error")
2023 2034 if not s:
2024 2035 return baseset()
2025 2036 # remove duplicates here. it's difficult for caller to deduplicate sets
2026 2037 # because different symbols can point to the same rev.
2027 2038 cl = repo.changelog
2028 2039 ls = []
2029 2040 seen = set()
2030 2041 for t in s.split('\0'):
2031 2042 try:
2032 2043 # fast path for integer revision
2033 2044 r = int(t)
2034 2045 if str(r) != t or r not in cl:
2035 2046 raise ValueError
2036 2047 except ValueError:
2037 2048 r = repo[t].rev()
2038 2049 if r in seen:
2039 2050 continue
2040 2051 if (r in subset
2041 2052 or r == node.nullrev and isinstance(subset, fullreposet)):
2042 2053 ls.append(r)
2043 2054 seen.add(r)
2044 2055 return baseset(ls)
2045 2056
2046 2057 # for internal use
2047 2058 def _intlist(repo, subset, x):
2048 2059 s = getstring(x, "internal error")
2049 2060 if not s:
2050 2061 return baseset()
2051 2062 ls = [int(r) for r in s.split('\0')]
2052 2063 s = subset
2053 2064 return baseset([r for r in ls if r in s])
2054 2065
2055 2066 # for internal use
2056 2067 def _hexlist(repo, subset, x):
2057 2068 s = getstring(x, "internal error")
2058 2069 if not s:
2059 2070 return baseset()
2060 2071 cl = repo.changelog
2061 2072 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2062 2073 s = subset
2063 2074 return baseset([r for r in ls if r in s])
2064 2075
2065 2076 symbols = {
2066 2077 "adds": adds,
2067 2078 "all": getall,
2068 2079 "ancestor": ancestor,
2069 2080 "ancestors": ancestors,
2070 2081 "_firstancestors": _firstancestors,
2071 2082 "author": author,
2072 2083 "bisect": bisect,
2073 2084 "bisected": bisected,
2074 2085 "bookmark": bookmark,
2075 2086 "branch": branch,
2076 2087 "branchpoint": branchpoint,
2077 2088 "bumped": bumped,
2078 2089 "bundle": bundle,
2079 2090 "children": children,
2080 2091 "closed": closed,
2081 2092 "contains": contains,
2082 2093 "converted": converted,
2083 2094 "date": date,
2084 2095 "desc": desc,
2085 2096 "descendants": descendants,
2086 2097 "_firstdescendants": _firstdescendants,
2087 2098 "destination": destination,
2088 2099 "divergent": divergent,
2089 2100 "draft": draft,
2090 2101 "extinct": extinct,
2091 2102 "extra": extra,
2092 2103 "file": hasfile,
2093 2104 "filelog": filelog,
2094 2105 "first": first,
2095 2106 "follow": follow,
2096 2107 "_followfirst": _followfirst,
2097 2108 "grep": grep,
2098 2109 "head": head,
2099 2110 "heads": heads,
2100 2111 "hidden": hidden,
2101 2112 "id": node_,
2102 2113 "keyword": keyword,
2103 2114 "last": last,
2104 2115 "limit": limit,
2105 2116 "_matchfiles": _matchfiles,
2106 2117 "max": maxrev,
2107 2118 "merge": merge,
2108 2119 "min": minrev,
2109 2120 "modifies": modifies,
2110 2121 "named": named,
2111 2122 "obsolete": obsolete,
2112 2123 "only": only,
2113 2124 "origin": origin,
2114 2125 "outgoing": outgoing,
2115 2126 "p1": p1,
2116 2127 "p2": p2,
2117 2128 "parents": parents,
2118 2129 "present": present,
2119 2130 "public": public,
2120 2131 "_notpublic": _notpublic,
2121 2132 "remote": remote,
2122 2133 "removes": removes,
2123 2134 "rev": rev,
2124 2135 "reverse": reverse,
2125 2136 "roots": roots,
2126 2137 "sort": sort,
2127 2138 "secret": secret,
2128 2139 "subrepo": subrepo,
2129 2140 "matching": matching,
2130 2141 "tag": tag,
2131 2142 "tagged": tagged,
2132 2143 "user": user,
2133 2144 "unstable": unstable,
2134 2145 "wdir": wdir,
2135 2146 "_list": _list,
2136 2147 "_intlist": _intlist,
2137 2148 "_hexlist": _hexlist,
2138 2149 }
2139 2150
2140 2151 # symbols which can't be used for a DoS attack for any given input
2141 2152 # (e.g. those which accept regexes as plain strings shouldn't be included)
2142 2153 # functions that just return a lot of changesets (like all) don't count here
2143 2154 safesymbols = set([
2144 2155 "adds",
2145 2156 "all",
2146 2157 "ancestor",
2147 2158 "ancestors",
2148 2159 "_firstancestors",
2149 2160 "author",
2150 2161 "bisect",
2151 2162 "bisected",
2152 2163 "bookmark",
2153 2164 "branch",
2154 2165 "branchpoint",
2155 2166 "bumped",
2156 2167 "bundle",
2157 2168 "children",
2158 2169 "closed",
2159 2170 "converted",
2160 2171 "date",
2161 2172 "desc",
2162 2173 "descendants",
2163 2174 "_firstdescendants",
2164 2175 "destination",
2165 2176 "divergent",
2166 2177 "draft",
2167 2178 "extinct",
2168 2179 "extra",
2169 2180 "file",
2170 2181 "filelog",
2171 2182 "first",
2172 2183 "follow",
2173 2184 "_followfirst",
2174 2185 "head",
2175 2186 "heads",
2176 2187 "hidden",
2177 2188 "id",
2178 2189 "keyword",
2179 2190 "last",
2180 2191 "limit",
2181 2192 "_matchfiles",
2182 2193 "max",
2183 2194 "merge",
2184 2195 "min",
2185 2196 "modifies",
2186 2197 "obsolete",
2187 2198 "only",
2188 2199 "origin",
2189 2200 "outgoing",
2190 2201 "p1",
2191 2202 "p2",
2192 2203 "parents",
2193 2204 "present",
2194 2205 "public",
2195 2206 "_notpublic",
2196 2207 "remote",
2197 2208 "removes",
2198 2209 "rev",
2199 2210 "reverse",
2200 2211 "roots",
2201 2212 "sort",
2202 2213 "secret",
2203 2214 "matching",
2204 2215 "tag",
2205 2216 "tagged",
2206 2217 "user",
2207 2218 "unstable",
2208 2219 "wdir",
2209 2220 "_list",
2210 2221 "_intlist",
2211 2222 "_hexlist",
2212 2223 ])
2213 2224
2214 2225 methods = {
2215 2226 "range": rangeset,
2216 2227 "dagrange": dagrange,
2217 2228 "string": stringset,
2218 2229 "symbol": stringset,
2219 2230 "and": andset,
2220 2231 "or": orset,
2221 2232 "not": notset,
2222 2233 "list": listset,
2223 2234 "keyvalue": keyvaluepair,
2224 2235 "func": func,
2225 2236 "ancestor": ancestorspec,
2226 2237 "parent": parentspec,
2227 2238 "parentpost": p1,
2228 2239 }
2229 2240
2230 2241 def optimize(x, small):
2231 2242 if x is None:
2232 2243 return 0, x
2233 2244
2234 2245 smallbonus = 1
2235 2246 if small:
2236 2247 smallbonus = .5
2237 2248
2238 2249 op = x[0]
2239 2250 if op == 'minus':
2240 2251 return optimize(('and', x[1], ('not', x[2])), small)
2241 2252 elif op == 'only':
2242 2253 return optimize(('func', ('symbol', 'only'),
2243 2254 ('list', x[1], x[2])), small)
2244 2255 elif op == 'onlypost':
2245 2256 return optimize(('func', ('symbol', 'only'), x[1]), small)
2246 2257 elif op == 'dagrangepre':
2247 2258 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2248 2259 elif op == 'dagrangepost':
2249 2260 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2250 2261 elif op == 'rangeall':
2251 2262 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2252 2263 elif op == 'rangepre':
2253 2264 return optimize(('range', ('string', '0'), x[1]), small)
2254 2265 elif op == 'rangepost':
2255 2266 return optimize(('range', x[1], ('string', 'tip')), small)
2256 2267 elif op == 'negate':
2257 2268 return optimize(('string',
2258 2269 '-' + getstring(x[1], _("can't negate that"))), small)
2259 2270 elif op in 'string symbol negate':
2260 2271 return smallbonus, x # single revisions are small
2261 2272 elif op == 'and':
2262 2273 wa, ta = optimize(x[1], True)
2263 2274 wb, tb = optimize(x[2], True)
2264 2275
2265 2276 # (::x and not ::y)/(not ::y and ::x) have a fast path
2266 2277 def isonly(revs, bases):
2267 2278 return (
2268 2279 revs[0] == 'func'
2269 2280 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2270 2281 and bases[0] == 'not'
2271 2282 and bases[1][0] == 'func'
2272 2283 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2273 2284
2274 2285 w = min(wa, wb)
2275 2286 if isonly(ta, tb):
2276 2287 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2277 2288 if isonly(tb, ta):
2278 2289 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2279 2290
2280 2291 if wa > wb:
2281 2292 return w, (op, tb, ta)
2282 2293 return w, (op, ta, tb)
2283 2294 elif op == 'or':
2284 2295 # fast path for machine-generated expression, that is likely to have
2285 2296 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2286 2297 ws, ts, ss = [], [], []
2287 2298 def flushss():
2288 2299 if not ss:
2289 2300 return
2290 2301 if len(ss) == 1:
2291 2302 w, t = ss[0]
2292 2303 else:
2293 2304 s = '\0'.join(t[1] for w, t in ss)
2294 2305 y = ('func', ('symbol', '_list'), ('string', s))
2295 2306 w, t = optimize(y, False)
2296 2307 ws.append(w)
2297 2308 ts.append(t)
2298 2309 del ss[:]
2299 2310 for y in x[1:]:
2300 2311 w, t = optimize(y, False)
2301 2312 if t[0] == 'string' or t[0] == 'symbol':
2302 2313 ss.append((w, t))
2303 2314 continue
2304 2315 flushss()
2305 2316 ws.append(w)
2306 2317 ts.append(t)
2307 2318 flushss()
2308 2319 if len(ts) == 1:
2309 2320 return ws[0], ts[0] # 'or' operation is fully optimized out
2310 2321 # we can't reorder trees by weight because it would change the order.
2311 2322 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2312 2323 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2313 2324 return max(ws), (op,) + tuple(ts)
2314 2325 elif op == 'not':
2315 2326 # Optimize not public() to _notpublic() because we have a fast version
2316 2327 if x[1] == ('func', ('symbol', 'public'), None):
2317 2328 newsym = ('func', ('symbol', '_notpublic'), None)
2318 2329 o = optimize(newsym, not small)
2319 2330 return o[0], o[1]
2320 2331 else:
2321 2332 o = optimize(x[1], not small)
2322 2333 return o[0], (op, o[1])
2323 2334 elif op == 'parentpost':
2324 2335 o = optimize(x[1], small)
2325 2336 return o[0], (op, o[1])
2326 2337 elif op == 'group':
2327 2338 return optimize(x[1], small)
2328 2339 elif op in 'dagrange range list parent ancestorspec':
2329 2340 if op == 'parent':
2330 2341 # x^:y means (x^) : y, not x ^ (:y)
2331 2342 post = ('parentpost', x[1])
2332 2343 if x[2][0] == 'dagrangepre':
2333 2344 return optimize(('dagrange', post, x[2][1]), small)
2334 2345 elif x[2][0] == 'rangepre':
2335 2346 return optimize(('range', post, x[2][1]), small)
2336 2347
2337 2348 wa, ta = optimize(x[1], small)
2338 2349 wb, tb = optimize(x[2], small)
2339 2350 return wa + wb, (op, ta, tb)
2340 2351 elif op == 'func':
2341 2352 f = getstring(x[1], _("not a symbol"))
2342 2353 wa, ta = optimize(x[2], small)
2343 2354 if f in ("author branch closed date desc file grep keyword "
2344 2355 "outgoing user"):
2345 2356 w = 10 # slow
2346 2357 elif f in "modifies adds removes":
2347 2358 w = 30 # slower
2348 2359 elif f == "contains":
2349 2360 w = 100 # very slow
2350 2361 elif f == "ancestor":
2351 2362 w = 1 * smallbonus
2352 2363 elif f in "reverse limit first _intlist":
2353 2364 w = 0
2354 2365 elif f in "sort":
2355 2366 w = 10 # assume most sorts look at changelog
2356 2367 else:
2357 2368 w = 1
2358 2369 return w + wa, (op, x[1], ta)
2359 2370 return 1, x
2360 2371
2361 2372 _aliasarg = ('func', ('symbol', '_aliasarg'))
2362 2373 def _getaliasarg(tree):
2363 2374 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2364 2375 return X, None otherwise.
2365 2376 """
2366 2377 if (len(tree) == 3 and tree[:2] == _aliasarg
2367 2378 and tree[2][0] == 'string'):
2368 2379 return tree[2][1]
2369 2380 return None
2370 2381
2371 2382 def _checkaliasarg(tree, known=None):
2372 2383 """Check tree contains no _aliasarg construct or only ones which
2373 2384 value is in known. Used to avoid alias placeholders injection.
2374 2385 """
2375 2386 if isinstance(tree, tuple):
2376 2387 arg = _getaliasarg(tree)
2377 2388 if arg is not None and (not known or arg not in known):
2378 2389 raise error.UnknownIdentifier('_aliasarg', [])
2379 2390 for t in tree:
2380 2391 _checkaliasarg(t, known)
2381 2392
2382 2393 # the set of valid characters for the initial letter of symbols in
2383 2394 # alias declarations and definitions
2384 2395 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2385 2396 if c.isalnum() or c in '._@$' or ord(c) > 127)
2386 2397
2387 2398 def _tokenizealias(program, lookup=None):
2388 2399 """Parse alias declaration/definition into a stream of tokens
2389 2400
2390 2401 This allows symbol names to use also ``$`` as an initial letter
2391 2402 (for backward compatibility), and callers of this function should
2392 2403 examine whether ``$`` is used also for unexpected symbols or not.
2393 2404 """
2394 2405 return tokenize(program, lookup=lookup,
2395 2406 syminitletters=_aliassyminitletters)
2396 2407
2397 2408 def _parsealiasdecl(decl):
2398 2409 """Parse alias declaration ``decl``
2399 2410
2400 2411 This returns ``(name, tree, args, errorstr)`` tuple:
2401 2412
2402 2413 - ``name``: of declared alias (may be ``decl`` itself at error)
2403 2414 - ``tree``: parse result (or ``None`` at error)
2404 2415 - ``args``: list of alias argument names (or None for symbol declaration)
2405 2416 - ``errorstr``: detail about detected error (or None)
2406 2417
2407 2418 >>> _parsealiasdecl('foo')
2408 2419 ('foo', ('symbol', 'foo'), None, None)
2409 2420 >>> _parsealiasdecl('$foo')
2410 2421 ('$foo', None, None, "'$' not for alias arguments")
2411 2422 >>> _parsealiasdecl('foo::bar')
2412 2423 ('foo::bar', None, None, 'invalid format')
2413 2424 >>> _parsealiasdecl('foo bar')
2414 2425 ('foo bar', None, None, 'at 4: invalid token')
2415 2426 >>> _parsealiasdecl('foo()')
2416 2427 ('foo', ('func', ('symbol', 'foo')), [], None)
2417 2428 >>> _parsealiasdecl('$foo()')
2418 2429 ('$foo()', None, None, "'$' not for alias arguments")
2419 2430 >>> _parsealiasdecl('foo($1, $2)')
2420 2431 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2421 2432 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2422 2433 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2423 2434 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2424 2435 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2425 2436 >>> _parsealiasdecl('foo(bar($1, $2))')
2426 2437 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2427 2438 >>> _parsealiasdecl('foo("string")')
2428 2439 ('foo("string")', None, None, 'invalid argument list')
2429 2440 >>> _parsealiasdecl('foo($1, $2')
2430 2441 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2431 2442 >>> _parsealiasdecl('foo("string')
2432 2443 ('foo("string', None, None, 'at 5: unterminated string')
2433 2444 >>> _parsealiasdecl('foo($1, $2, $1)')
2434 2445 ('foo', None, None, 'argument names collide with each other')
2435 2446 """
2436 2447 p = parser.parser(elements)
2437 2448 try:
2438 2449 tree, pos = p.parse(_tokenizealias(decl))
2439 2450 if (pos != len(decl)):
2440 2451 raise error.ParseError(_('invalid token'), pos)
2441 2452
2442 2453 if isvalidsymbol(tree):
2443 2454 # "name = ...." style
2444 2455 name = getsymbol(tree)
2445 2456 if name.startswith('$'):
2446 2457 return (decl, None, None, _("'$' not for alias arguments"))
2447 2458 return (name, ('symbol', name), None, None)
2448 2459
2449 2460 if isvalidfunc(tree):
2450 2461 # "name(arg, ....) = ...." style
2451 2462 name = getfuncname(tree)
2452 2463 if name.startswith('$'):
2453 2464 return (decl, None, None, _("'$' not for alias arguments"))
2454 2465 args = []
2455 2466 for arg in getfuncargs(tree):
2456 2467 if not isvalidsymbol(arg):
2457 2468 return (decl, None, None, _("invalid argument list"))
2458 2469 args.append(getsymbol(arg))
2459 2470 if len(args) != len(set(args)):
2460 2471 return (name, None, None,
2461 2472 _("argument names collide with each other"))
2462 2473 return (name, ('func', ('symbol', name)), args, None)
2463 2474
2464 2475 return (decl, None, None, _("invalid format"))
2465 2476 except error.ParseError as inst:
2466 2477 return (decl, None, None, parseerrordetail(inst))
2467 2478
2468 2479 def _parsealiasdefn(defn, args):
2469 2480 """Parse alias definition ``defn``
2470 2481
2471 2482 This function also replaces alias argument references in the
2472 2483 specified definition by ``_aliasarg(ARGNAME)``.
2473 2484
2474 2485 ``args`` is a list of alias argument names, or None if the alias
2475 2486 is declared as a symbol.
2476 2487
2477 2488 This returns "tree" as parsing result.
2478 2489
2479 2490 >>> args = ['$1', '$2', 'foo']
2480 2491 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2481 2492 (or
2482 2493 (func
2483 2494 ('symbol', '_aliasarg')
2484 2495 ('string', '$1'))
2485 2496 (func
2486 2497 ('symbol', '_aliasarg')
2487 2498 ('string', 'foo')))
2488 2499 >>> try:
2489 2500 ... _parsealiasdefn('$1 or $bar', args)
2490 2501 ... except error.ParseError, inst:
2491 2502 ... print parseerrordetail(inst)
2492 2503 at 6: '$' not for alias arguments
2493 2504 >>> args = ['$1', '$10', 'foo']
2494 2505 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2495 2506 (or
2496 2507 (func
2497 2508 ('symbol', '_aliasarg')
2498 2509 ('string', '$10'))
2499 2510 ('symbol', 'foobar'))
2500 2511 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2501 2512 (or
2502 2513 ('string', '$1')
2503 2514 ('string', 'foo'))
2504 2515 """
2505 2516 def tokenizedefn(program, lookup=None):
2506 2517 if args:
2507 2518 argset = set(args)
2508 2519 else:
2509 2520 argset = set()
2510 2521
2511 2522 for t, value, pos in _tokenizealias(program, lookup=lookup):
2512 2523 if t == 'symbol':
2513 2524 if value in argset:
2514 2525 # emulate tokenization of "_aliasarg('ARGNAME')":
2515 2526 # "_aliasarg()" is an unknown symbol only used separate
2516 2527 # alias argument placeholders from regular strings.
2517 2528 yield ('symbol', '_aliasarg', pos)
2518 2529 yield ('(', None, pos)
2519 2530 yield ('string', value, pos)
2520 2531 yield (')', None, pos)
2521 2532 continue
2522 2533 elif value.startswith('$'):
2523 2534 raise error.ParseError(_("'$' not for alias arguments"),
2524 2535 pos)
2525 2536 yield (t, value, pos)
2526 2537
2527 2538 p = parser.parser(elements)
2528 2539 tree, pos = p.parse(tokenizedefn(defn))
2529 2540 if pos != len(defn):
2530 2541 raise error.ParseError(_('invalid token'), pos)
2531 2542 return parser.simplifyinfixops(tree, ('or',))
2532 2543
2533 2544 class revsetalias(object):
2534 2545 # whether own `error` information is already shown or not.
2535 2546 # this avoids showing same warning multiple times at each `findaliases`.
2536 2547 warned = False
2537 2548
2538 2549 def __init__(self, name, value):
2539 2550 '''Aliases like:
2540 2551
2541 2552 h = heads(default)
2542 2553 b($1) = ancestors($1) - ancestors(default)
2543 2554 '''
2544 2555 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2545 2556 if self.error:
2546 2557 self.error = _('failed to parse the declaration of revset alias'
2547 2558 ' "%s": %s') % (self.name, self.error)
2548 2559 return
2549 2560
2550 2561 try:
2551 2562 self.replacement = _parsealiasdefn(value, self.args)
2552 2563 # Check for placeholder injection
2553 2564 _checkaliasarg(self.replacement, self.args)
2554 2565 except error.ParseError as inst:
2555 2566 self.error = _('failed to parse the definition of revset alias'
2556 2567 ' "%s": %s') % (self.name, parseerrordetail(inst))
2557 2568
2558 2569 def _getalias(aliases, tree):
2559 2570 """If tree looks like an unexpanded alias, return it. Return None
2560 2571 otherwise.
2561 2572 """
2562 2573 if isinstance(tree, tuple) and tree:
2563 2574 if tree[0] == 'symbol' and len(tree) == 2:
2564 2575 name = tree[1]
2565 2576 alias = aliases.get(name)
2566 2577 if alias and alias.args is None and alias.tree == tree:
2567 2578 return alias
2568 2579 if tree[0] == 'func' and len(tree) > 1:
2569 2580 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2570 2581 name = tree[1][1]
2571 2582 alias = aliases.get(name)
2572 2583 if alias and alias.args is not None and alias.tree == tree[:2]:
2573 2584 return alias
2574 2585 return None
2575 2586
2576 2587 def _expandargs(tree, args):
2577 2588 """Replace _aliasarg instances with the substitution value of the
2578 2589 same name in args, recursively.
2579 2590 """
2580 2591 if not tree or not isinstance(tree, tuple):
2581 2592 return tree
2582 2593 arg = _getaliasarg(tree)
2583 2594 if arg is not None:
2584 2595 return args[arg]
2585 2596 return tuple(_expandargs(t, args) for t in tree)
2586 2597
2587 2598 def _expandaliases(aliases, tree, expanding, cache):
2588 2599 """Expand aliases in tree, recursively.
2589 2600
2590 2601 'aliases' is a dictionary mapping user defined aliases to
2591 2602 revsetalias objects.
2592 2603 """
2593 2604 if not isinstance(tree, tuple):
2594 2605 # Do not expand raw strings
2595 2606 return tree
2596 2607 alias = _getalias(aliases, tree)
2597 2608 if alias is not None:
2598 2609 if alias.error:
2599 2610 raise util.Abort(alias.error)
2600 2611 if alias in expanding:
2601 2612 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2602 2613 'detected') % alias.name)
2603 2614 expanding.append(alias)
2604 2615 if alias.name not in cache:
2605 2616 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2606 2617 expanding, cache)
2607 2618 result = cache[alias.name]
2608 2619 expanding.pop()
2609 2620 if alias.args is not None:
2610 2621 l = getlist(tree[2])
2611 2622 if len(l) != len(alias.args):
2612 2623 raise error.ParseError(
2613 2624 _('invalid number of arguments: %s') % len(l))
2614 2625 l = [_expandaliases(aliases, a, [], cache) for a in l]
2615 2626 result = _expandargs(result, dict(zip(alias.args, l)))
2616 2627 else:
2617 2628 result = tuple(_expandaliases(aliases, t, expanding, cache)
2618 2629 for t in tree)
2619 2630 return result
2620 2631
2621 2632 def findaliases(ui, tree, showwarning=None):
2622 2633 _checkaliasarg(tree)
2623 2634 aliases = {}
2624 2635 for k, v in ui.configitems('revsetalias'):
2625 2636 alias = revsetalias(k, v)
2626 2637 aliases[alias.name] = alias
2627 2638 tree = _expandaliases(aliases, tree, [], {})
2628 2639 if showwarning:
2629 2640 # warn about problematic (but not referred) aliases
2630 2641 for name, alias in sorted(aliases.iteritems()):
2631 2642 if alias.error and not alias.warned:
2632 2643 showwarning(_('warning: %s\n') % (alias.error))
2633 2644 alias.warned = True
2634 2645 return tree
2635 2646
2636 2647 def foldconcat(tree):
2637 2648 """Fold elements to be concatenated by `##`
2638 2649 """
2639 2650 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2640 2651 return tree
2641 2652 if tree[0] == '_concat':
2642 2653 pending = [tree]
2643 2654 l = []
2644 2655 while pending:
2645 2656 e = pending.pop()
2646 2657 if e[0] == '_concat':
2647 2658 pending.extend(reversed(e[1:]))
2648 2659 elif e[0] in ('string', 'symbol'):
2649 2660 l.append(e[1])
2650 2661 else:
2651 2662 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2652 2663 raise error.ParseError(msg)
2653 2664 return ('string', ''.join(l))
2654 2665 else:
2655 2666 return tuple(foldconcat(t) for t in tree)
2656 2667
2657 2668 def parse(spec, lookup=None):
2658 2669 p = parser.parser(elements)
2659 2670 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2660 2671 if pos != len(spec):
2661 2672 raise error.ParseError(_("invalid token"), pos)
2662 2673 return parser.simplifyinfixops(tree, ('or',))
2663 2674
2664 2675 def posttreebuilthook(tree, repo):
2665 2676 # hook for extensions to execute code on the optimized tree
2666 2677 pass
2667 2678
2668 2679 def match(ui, spec, repo=None):
2669 2680 if not spec:
2670 2681 raise error.ParseError(_("empty query"))
2671 2682 lookup = None
2672 2683 if repo:
2673 2684 lookup = repo.__contains__
2674 2685 tree = parse(spec, lookup)
2675 2686 return _makematcher(ui, tree, repo)
2676 2687
2677 2688 def matchany(ui, specs, repo=None):
2678 2689 """Create a matcher that will include any revisions matching one of the
2679 2690 given specs"""
2680 2691 if not specs:
2681 2692 def mfunc(repo, subset=None):
2682 2693 return baseset()
2683 2694 return mfunc
2684 2695 if not all(specs):
2685 2696 raise error.ParseError(_("empty query"))
2686 2697 lookup = None
2687 2698 if repo:
2688 2699 lookup = repo.__contains__
2689 2700 if len(specs) == 1:
2690 2701 tree = parse(specs[0], lookup)
2691 2702 else:
2692 2703 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2693 2704 return _makematcher(ui, tree, repo)
2694 2705
2695 2706 def _makematcher(ui, tree, repo):
2696 2707 if ui:
2697 2708 tree = findaliases(ui, tree, showwarning=ui.warn)
2698 2709 tree = foldconcat(tree)
2699 2710 weight, tree = optimize(tree, True)
2700 2711 posttreebuilthook(tree, repo)
2701 2712 def mfunc(repo, subset=None):
2702 2713 if subset is None:
2703 2714 subset = fullreposet(repo)
2704 2715 if util.safehasattr(subset, 'isascending'):
2705 2716 result = getset(repo, subset, tree)
2706 2717 else:
2707 2718 result = getset(repo, baseset(subset), tree)
2708 2719 return result
2709 2720 return mfunc
2710 2721
2711 2722 def formatspec(expr, *args):
2712 2723 '''
2713 2724 This is a convenience function for using revsets internally, and
2714 2725 escapes arguments appropriately. Aliases are intentionally ignored
2715 2726 so that intended expression behavior isn't accidentally subverted.
2716 2727
2717 2728 Supported arguments:
2718 2729
2719 2730 %r = revset expression, parenthesized
2720 2731 %d = int(arg), no quoting
2721 2732 %s = string(arg), escaped and single-quoted
2722 2733 %b = arg.branch(), escaped and single-quoted
2723 2734 %n = hex(arg), single-quoted
2724 2735 %% = a literal '%'
2725 2736
2726 2737 Prefixing the type with 'l' specifies a parenthesized list of that type.
2727 2738
2728 2739 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2729 2740 '(10 or 11):: and ((this()) or (that()))'
2730 2741 >>> formatspec('%d:: and not %d::', 10, 20)
2731 2742 '10:: and not 20::'
2732 2743 >>> formatspec('%ld or %ld', [], [1])
2733 2744 "_list('') or 1"
2734 2745 >>> formatspec('keyword(%s)', 'foo\\xe9')
2735 2746 "keyword('foo\\\\xe9')"
2736 2747 >>> b = lambda: 'default'
2737 2748 >>> b.branch = b
2738 2749 >>> formatspec('branch(%b)', b)
2739 2750 "branch('default')"
2740 2751 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2741 2752 "root(_list('a\\x00b\\x00c\\x00d'))"
2742 2753 '''
2743 2754
2744 2755 def quote(s):
2745 2756 return repr(str(s))
2746 2757
2747 2758 def argtype(c, arg):
2748 2759 if c == 'd':
2749 2760 return str(int(arg))
2750 2761 elif c == 's':
2751 2762 return quote(arg)
2752 2763 elif c == 'r':
2753 2764 parse(arg) # make sure syntax errors are confined
2754 2765 return '(%s)' % arg
2755 2766 elif c == 'n':
2756 2767 return quote(node.hex(arg))
2757 2768 elif c == 'b':
2758 2769 return quote(arg.branch())
2759 2770
2760 2771 def listexp(s, t):
2761 2772 l = len(s)
2762 2773 if l == 0:
2763 2774 return "_list('')"
2764 2775 elif l == 1:
2765 2776 return argtype(t, s[0])
2766 2777 elif t == 'd':
2767 2778 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2768 2779 elif t == 's':
2769 2780 return "_list('%s')" % "\0".join(s)
2770 2781 elif t == 'n':
2771 2782 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2772 2783 elif t == 'b':
2773 2784 return "_list('%s')" % "\0".join(a.branch() for a in s)
2774 2785
2775 2786 m = l // 2
2776 2787 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2777 2788
2778 2789 ret = ''
2779 2790 pos = 0
2780 2791 arg = 0
2781 2792 while pos < len(expr):
2782 2793 c = expr[pos]
2783 2794 if c == '%':
2784 2795 pos += 1
2785 2796 d = expr[pos]
2786 2797 if d == '%':
2787 2798 ret += d
2788 2799 elif d in 'dsnbr':
2789 2800 ret += argtype(d, args[arg])
2790 2801 arg += 1
2791 2802 elif d == 'l':
2792 2803 # a list of some type
2793 2804 pos += 1
2794 2805 d = expr[pos]
2795 2806 ret += listexp(list(args[arg]), d)
2796 2807 arg += 1
2797 2808 else:
2798 2809 raise util.Abort('unexpected revspec format character %s' % d)
2799 2810 else:
2800 2811 ret += c
2801 2812 pos += 1
2802 2813
2803 2814 return ret
2804 2815
2805 2816 def prettyformat(tree):
2806 2817 return parser.prettyformat(tree, ('string', 'symbol'))
2807 2818
2808 2819 def depth(tree):
2809 2820 if isinstance(tree, tuple):
2810 2821 return max(map(depth, tree)) + 1
2811 2822 else:
2812 2823 return 0
2813 2824
2814 2825 def funcsused(tree):
2815 2826 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2816 2827 return set()
2817 2828 else:
2818 2829 funcs = set()
2819 2830 for s in tree[1:]:
2820 2831 funcs |= funcsused(s)
2821 2832 if tree[0] == 'func':
2822 2833 funcs.add(tree[1][1])
2823 2834 return funcs
2824 2835
2825 2836 class abstractsmartset(object):
2826 2837
2827 2838 def __nonzero__(self):
2828 2839 """True if the smartset is not empty"""
2829 2840 raise NotImplementedError()
2830 2841
2831 2842 def __contains__(self, rev):
2832 2843 """provide fast membership testing"""
2833 2844 raise NotImplementedError()
2834 2845
2835 2846 def __iter__(self):
2836 2847 """iterate the set in the order it is supposed to be iterated"""
2837 2848 raise NotImplementedError()
2838 2849
2839 2850 # Attributes containing a function to perform a fast iteration in a given
2840 2851 # direction. A smartset can have none, one, or both defined.
2841 2852 #
2842 2853 # Default value is None instead of a function returning None to avoid
2843 2854 # initializing an iterator just for testing if a fast method exists.
2844 2855 fastasc = None
2845 2856 fastdesc = None
2846 2857
2847 2858 def isascending(self):
2848 2859 """True if the set will iterate in ascending order"""
2849 2860 raise NotImplementedError()
2850 2861
2851 2862 def isdescending(self):
2852 2863 """True if the set will iterate in descending order"""
2853 2864 raise NotImplementedError()
2854 2865
2855 2866 def min(self):
2856 2867 """return the minimum element in the set"""
2857 2868 if self.fastasc is not None:
2858 2869 for r in self.fastasc():
2859 2870 return r
2860 2871 raise ValueError('arg is an empty sequence')
2861 2872 return min(self)
2862 2873
2863 2874 def max(self):
2864 2875 """return the maximum element in the set"""
2865 2876 if self.fastdesc is not None:
2866 2877 for r in self.fastdesc():
2867 2878 return r
2868 2879 raise ValueError('arg is an empty sequence')
2869 2880 return max(self)
2870 2881
2871 2882 def first(self):
2872 2883 """return the first element in the set (user iteration perspective)
2873 2884
2874 2885 Return None if the set is empty"""
2875 2886 raise NotImplementedError()
2876 2887
2877 2888 def last(self):
2878 2889 """return the last element in the set (user iteration perspective)
2879 2890
2880 2891 Return None if the set is empty"""
2881 2892 raise NotImplementedError()
2882 2893
2883 2894 def __len__(self):
2884 2895 """return the length of the smartsets
2885 2896
2886 2897 This can be expensive on smartset that could be lazy otherwise."""
2887 2898 raise NotImplementedError()
2888 2899
2889 2900 def reverse(self):
2890 2901 """reverse the expected iteration order"""
2891 2902 raise NotImplementedError()
2892 2903
2893 2904 def sort(self, reverse=True):
2894 2905 """get the set to iterate in an ascending or descending order"""
2895 2906 raise NotImplementedError()
2896 2907
2897 2908 def __and__(self, other):
2898 2909 """Returns a new object with the intersection of the two collections.
2899 2910
2900 2911 This is part of the mandatory API for smartset."""
2901 2912 if isinstance(other, fullreposet):
2902 2913 return self
2903 2914 return self.filter(other.__contains__, cache=False)
2904 2915
2905 2916 def __add__(self, other):
2906 2917 """Returns a new object with the union of the two collections.
2907 2918
2908 2919 This is part of the mandatory API for smartset."""
2909 2920 return addset(self, other)
2910 2921
2911 2922 def __sub__(self, other):
2912 2923 """Returns a new object with the substraction of the two collections.
2913 2924
2914 2925 This is part of the mandatory API for smartset."""
2915 2926 c = other.__contains__
2916 2927 return self.filter(lambda r: not c(r), cache=False)
2917 2928
2918 2929 def filter(self, condition, cache=True):
2919 2930 """Returns this smartset filtered by condition as a new smartset.
2920 2931
2921 2932 `condition` is a callable which takes a revision number and returns a
2922 2933 boolean.
2923 2934
2924 2935 This is part of the mandatory API for smartset."""
2925 2936 # builtin cannot be cached. but do not needs to
2926 2937 if cache and util.safehasattr(condition, 'func_code'):
2927 2938 condition = util.cachefunc(condition)
2928 2939 return filteredset(self, condition)
2929 2940
2930 2941 class baseset(abstractsmartset):
2931 2942 """Basic data structure that represents a revset and contains the basic
2932 2943 operation that it should be able to perform.
2933 2944
2934 2945 Every method in this class should be implemented by any smartset class.
2935 2946 """
2936 2947 def __init__(self, data=()):
2937 2948 if not isinstance(data, list):
2938 2949 data = list(data)
2939 2950 self._list = data
2940 2951 self._ascending = None
2941 2952
2942 2953 @util.propertycache
2943 2954 def _set(self):
2944 2955 return set(self._list)
2945 2956
2946 2957 @util.propertycache
2947 2958 def _asclist(self):
2948 2959 asclist = self._list[:]
2949 2960 asclist.sort()
2950 2961 return asclist
2951 2962
2952 2963 def __iter__(self):
2953 2964 if self._ascending is None:
2954 2965 return iter(self._list)
2955 2966 elif self._ascending:
2956 2967 return iter(self._asclist)
2957 2968 else:
2958 2969 return reversed(self._asclist)
2959 2970
2960 2971 def fastasc(self):
2961 2972 return iter(self._asclist)
2962 2973
2963 2974 def fastdesc(self):
2964 2975 return reversed(self._asclist)
2965 2976
2966 2977 @util.propertycache
2967 2978 def __contains__(self):
2968 2979 return self._set.__contains__
2969 2980
2970 2981 def __nonzero__(self):
2971 2982 return bool(self._list)
2972 2983
2973 2984 def sort(self, reverse=False):
2974 2985 self._ascending = not bool(reverse)
2975 2986
2976 2987 def reverse(self):
2977 2988 if self._ascending is None:
2978 2989 self._list.reverse()
2979 2990 else:
2980 2991 self._ascending = not self._ascending
2981 2992
2982 2993 def __len__(self):
2983 2994 return len(self._list)
2984 2995
2985 2996 def isascending(self):
2986 2997 """Returns True if the collection is ascending order, False if not.
2987 2998
2988 2999 This is part of the mandatory API for smartset."""
2989 3000 if len(self) <= 1:
2990 3001 return True
2991 3002 return self._ascending is not None and self._ascending
2992 3003
2993 3004 def isdescending(self):
2994 3005 """Returns True if the collection is descending order, False if not.
2995 3006
2996 3007 This is part of the mandatory API for smartset."""
2997 3008 if len(self) <= 1:
2998 3009 return True
2999 3010 return self._ascending is not None and not self._ascending
3000 3011
3001 3012 def first(self):
3002 3013 if self:
3003 3014 if self._ascending is None:
3004 3015 return self._list[0]
3005 3016 elif self._ascending:
3006 3017 return self._asclist[0]
3007 3018 else:
3008 3019 return self._asclist[-1]
3009 3020 return None
3010 3021
3011 3022 def last(self):
3012 3023 if self:
3013 3024 if self._ascending is None:
3014 3025 return self._list[-1]
3015 3026 elif self._ascending:
3016 3027 return self._asclist[-1]
3017 3028 else:
3018 3029 return self._asclist[0]
3019 3030 return None
3020 3031
3021 3032 def __repr__(self):
3022 3033 d = {None: '', False: '-', True: '+'}[self._ascending]
3023 3034 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3024 3035
3025 3036 class filteredset(abstractsmartset):
3026 3037 """Duck type for baseset class which iterates lazily over the revisions in
3027 3038 the subset and contains a function which tests for membership in the
3028 3039 revset
3029 3040 """
3030 3041 def __init__(self, subset, condition=lambda x: True):
3031 3042 """
3032 3043 condition: a function that decide whether a revision in the subset
3033 3044 belongs to the revset or not.
3034 3045 """
3035 3046 self._subset = subset
3036 3047 self._condition = condition
3037 3048 self._cache = {}
3038 3049
3039 3050 def __contains__(self, x):
3040 3051 c = self._cache
3041 3052 if x not in c:
3042 3053 v = c[x] = x in self._subset and self._condition(x)
3043 3054 return v
3044 3055 return c[x]
3045 3056
3046 3057 def __iter__(self):
3047 3058 return self._iterfilter(self._subset)
3048 3059
3049 3060 def _iterfilter(self, it):
3050 3061 cond = self._condition
3051 3062 for x in it:
3052 3063 if cond(x):
3053 3064 yield x
3054 3065
3055 3066 @property
3056 3067 def fastasc(self):
3057 3068 it = self._subset.fastasc
3058 3069 if it is None:
3059 3070 return None
3060 3071 return lambda: self._iterfilter(it())
3061 3072
3062 3073 @property
3063 3074 def fastdesc(self):
3064 3075 it = self._subset.fastdesc
3065 3076 if it is None:
3066 3077 return None
3067 3078 return lambda: self._iterfilter(it())
3068 3079
3069 3080 def __nonzero__(self):
3070 3081 for r in self:
3071 3082 return True
3072 3083 return False
3073 3084
3074 3085 def __len__(self):
3075 3086 # Basic implementation to be changed in future patches.
3076 3087 l = baseset([r for r in self])
3077 3088 return len(l)
3078 3089
3079 3090 def sort(self, reverse=False):
3080 3091 self._subset.sort(reverse=reverse)
3081 3092
3082 3093 def reverse(self):
3083 3094 self._subset.reverse()
3084 3095
3085 3096 def isascending(self):
3086 3097 return self._subset.isascending()
3087 3098
3088 3099 def isdescending(self):
3089 3100 return self._subset.isdescending()
3090 3101
3091 3102 def first(self):
3092 3103 for x in self:
3093 3104 return x
3094 3105 return None
3095 3106
3096 3107 def last(self):
3097 3108 it = None
3098 3109 if self.isascending():
3099 3110 it = self.fastdesc
3100 3111 elif self.isdescending():
3101 3112 it = self.fastasc
3102 3113 if it is not None:
3103 3114 for x in it():
3104 3115 return x
3105 3116 return None #empty case
3106 3117 else:
3107 3118 x = None
3108 3119 for x in self:
3109 3120 pass
3110 3121 return x
3111 3122
3112 3123 def __repr__(self):
3113 3124 return '<%s %r>' % (type(self).__name__, self._subset)
3114 3125
3115 3126 def _iterordered(ascending, iter1, iter2):
3116 3127 """produce an ordered iteration from two iterators with the same order
3117 3128
3118 3129 The ascending is used to indicated the iteration direction.
3119 3130 """
3120 3131 choice = max
3121 3132 if ascending:
3122 3133 choice = min
3123 3134
3124 3135 val1 = None
3125 3136 val2 = None
3126 3137 try:
3127 3138 # Consume both iterators in an ordered way until one is empty
3128 3139 while True:
3129 3140 if val1 is None:
3130 3141 val1 = iter1.next()
3131 3142 if val2 is None:
3132 3143 val2 = iter2.next()
3133 3144 next = choice(val1, val2)
3134 3145 yield next
3135 3146 if val1 == next:
3136 3147 val1 = None
3137 3148 if val2 == next:
3138 3149 val2 = None
3139 3150 except StopIteration:
3140 3151 # Flush any remaining values and consume the other one
3141 3152 it = iter2
3142 3153 if val1 is not None:
3143 3154 yield val1
3144 3155 it = iter1
3145 3156 elif val2 is not None:
3146 3157 # might have been equality and both are empty
3147 3158 yield val2
3148 3159 for val in it:
3149 3160 yield val
3150 3161
3151 3162 class addset(abstractsmartset):
3152 3163 """Represent the addition of two sets
3153 3164
3154 3165 Wrapper structure for lazily adding two structures without losing much
3155 3166 performance on the __contains__ method
3156 3167
3157 3168 If the ascending attribute is set, that means the two structures are
3158 3169 ordered in either an ascending or descending way. Therefore, we can add
3159 3170 them maintaining the order by iterating over both at the same time
3160 3171
3161 3172 >>> xs = baseset([0, 3, 2])
3162 3173 >>> ys = baseset([5, 2, 4])
3163 3174
3164 3175 >>> rs = addset(xs, ys)
3165 3176 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3166 3177 (True, True, False, True, 0, 4)
3167 3178 >>> rs = addset(xs, baseset([]))
3168 3179 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3169 3180 (True, True, False, 0, 2)
3170 3181 >>> rs = addset(baseset([]), baseset([]))
3171 3182 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3172 3183 (False, False, None, None)
3173 3184
3174 3185 iterate unsorted:
3175 3186 >>> rs = addset(xs, ys)
3176 3187 >>> [x for x in rs] # without _genlist
3177 3188 [0, 3, 2, 5, 4]
3178 3189 >>> assert not rs._genlist
3179 3190 >>> len(rs)
3180 3191 5
3181 3192 >>> [x for x in rs] # with _genlist
3182 3193 [0, 3, 2, 5, 4]
3183 3194 >>> assert rs._genlist
3184 3195
3185 3196 iterate ascending:
3186 3197 >>> rs = addset(xs, ys, ascending=True)
3187 3198 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3188 3199 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3189 3200 >>> assert not rs._asclist
3190 3201 >>> len(rs)
3191 3202 5
3192 3203 >>> [x for x in rs], [x for x in rs.fastasc()]
3193 3204 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3194 3205 >>> assert rs._asclist
3195 3206
3196 3207 iterate descending:
3197 3208 >>> rs = addset(xs, ys, ascending=False)
3198 3209 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3199 3210 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3200 3211 >>> assert not rs._asclist
3201 3212 >>> len(rs)
3202 3213 5
3203 3214 >>> [x for x in rs], [x for x in rs.fastdesc()]
3204 3215 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3205 3216 >>> assert rs._asclist
3206 3217
3207 3218 iterate ascending without fastasc:
3208 3219 >>> rs = addset(xs, generatorset(ys), ascending=True)
3209 3220 >>> assert rs.fastasc is None
3210 3221 >>> [x for x in rs]
3211 3222 [0, 2, 3, 4, 5]
3212 3223
3213 3224 iterate descending without fastdesc:
3214 3225 >>> rs = addset(generatorset(xs), ys, ascending=False)
3215 3226 >>> assert rs.fastdesc is None
3216 3227 >>> [x for x in rs]
3217 3228 [5, 4, 3, 2, 0]
3218 3229 """
3219 3230 def __init__(self, revs1, revs2, ascending=None):
3220 3231 self._r1 = revs1
3221 3232 self._r2 = revs2
3222 3233 self._iter = None
3223 3234 self._ascending = ascending
3224 3235 self._genlist = None
3225 3236 self._asclist = None
3226 3237
3227 3238 def __len__(self):
3228 3239 return len(self._list)
3229 3240
3230 3241 def __nonzero__(self):
3231 3242 return bool(self._r1) or bool(self._r2)
3232 3243
3233 3244 @util.propertycache
3234 3245 def _list(self):
3235 3246 if not self._genlist:
3236 3247 self._genlist = baseset(iter(self))
3237 3248 return self._genlist
3238 3249
3239 3250 def __iter__(self):
3240 3251 """Iterate over both collections without repeating elements
3241 3252
3242 3253 If the ascending attribute is not set, iterate over the first one and
3243 3254 then over the second one checking for membership on the first one so we
3244 3255 dont yield any duplicates.
3245 3256
3246 3257 If the ascending attribute is set, iterate over both collections at the
3247 3258 same time, yielding only one value at a time in the given order.
3248 3259 """
3249 3260 if self._ascending is None:
3250 3261 if self._genlist:
3251 3262 return iter(self._genlist)
3252 3263 def arbitraryordergen():
3253 3264 for r in self._r1:
3254 3265 yield r
3255 3266 inr1 = self._r1.__contains__
3256 3267 for r in self._r2:
3257 3268 if not inr1(r):
3258 3269 yield r
3259 3270 return arbitraryordergen()
3260 3271 # try to use our own fast iterator if it exists
3261 3272 self._trysetasclist()
3262 3273 if self._ascending:
3263 3274 attr = 'fastasc'
3264 3275 else:
3265 3276 attr = 'fastdesc'
3266 3277 it = getattr(self, attr)
3267 3278 if it is not None:
3268 3279 return it()
3269 3280 # maybe half of the component supports fast
3270 3281 # get iterator for _r1
3271 3282 iter1 = getattr(self._r1, attr)
3272 3283 if iter1 is None:
3273 3284 # let's avoid side effect (not sure it matters)
3274 3285 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3275 3286 else:
3276 3287 iter1 = iter1()
3277 3288 # get iterator for _r2
3278 3289 iter2 = getattr(self._r2, attr)
3279 3290 if iter2 is None:
3280 3291 # let's avoid side effect (not sure it matters)
3281 3292 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3282 3293 else:
3283 3294 iter2 = iter2()
3284 3295 return _iterordered(self._ascending, iter1, iter2)
3285 3296
3286 3297 def _trysetasclist(self):
3287 3298 """populate the _asclist attribute if possible and necessary"""
3288 3299 if self._genlist is not None and self._asclist is None:
3289 3300 self._asclist = sorted(self._genlist)
3290 3301
3291 3302 @property
3292 3303 def fastasc(self):
3293 3304 self._trysetasclist()
3294 3305 if self._asclist is not None:
3295 3306 return self._asclist.__iter__
3296 3307 iter1 = self._r1.fastasc
3297 3308 iter2 = self._r2.fastasc
3298 3309 if None in (iter1, iter2):
3299 3310 return None
3300 3311 return lambda: _iterordered(True, iter1(), iter2())
3301 3312
3302 3313 @property
3303 3314 def fastdesc(self):
3304 3315 self._trysetasclist()
3305 3316 if self._asclist is not None:
3306 3317 return self._asclist.__reversed__
3307 3318 iter1 = self._r1.fastdesc
3308 3319 iter2 = self._r2.fastdesc
3309 3320 if None in (iter1, iter2):
3310 3321 return None
3311 3322 return lambda: _iterordered(False, iter1(), iter2())
3312 3323
3313 3324 def __contains__(self, x):
3314 3325 return x in self._r1 or x in self._r2
3315 3326
3316 3327 def sort(self, reverse=False):
3317 3328 """Sort the added set
3318 3329
3319 3330 For this we use the cached list with all the generated values and if we
3320 3331 know they are ascending or descending we can sort them in a smart way.
3321 3332 """
3322 3333 self._ascending = not reverse
3323 3334
3324 3335 def isascending(self):
3325 3336 return self._ascending is not None and self._ascending
3326 3337
3327 3338 def isdescending(self):
3328 3339 return self._ascending is not None and not self._ascending
3329 3340
3330 3341 def reverse(self):
3331 3342 if self._ascending is None:
3332 3343 self._list.reverse()
3333 3344 else:
3334 3345 self._ascending = not self._ascending
3335 3346
3336 3347 def first(self):
3337 3348 for x in self:
3338 3349 return x
3339 3350 return None
3340 3351
3341 3352 def last(self):
3342 3353 self.reverse()
3343 3354 val = self.first()
3344 3355 self.reverse()
3345 3356 return val
3346 3357
3347 3358 def __repr__(self):
3348 3359 d = {None: '', False: '-', True: '+'}[self._ascending]
3349 3360 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3350 3361
3351 3362 class generatorset(abstractsmartset):
3352 3363 """Wrap a generator for lazy iteration
3353 3364
3354 3365 Wrapper structure for generators that provides lazy membership and can
3355 3366 be iterated more than once.
3356 3367 When asked for membership it generates values until either it finds the
3357 3368 requested one or has gone through all the elements in the generator
3358 3369 """
3359 3370 def __init__(self, gen, iterasc=None):
3360 3371 """
3361 3372 gen: a generator producing the values for the generatorset.
3362 3373 """
3363 3374 self._gen = gen
3364 3375 self._asclist = None
3365 3376 self._cache = {}
3366 3377 self._genlist = []
3367 3378 self._finished = False
3368 3379 self._ascending = True
3369 3380 if iterasc is not None:
3370 3381 if iterasc:
3371 3382 self.fastasc = self._iterator
3372 3383 self.__contains__ = self._asccontains
3373 3384 else:
3374 3385 self.fastdesc = self._iterator
3375 3386 self.__contains__ = self._desccontains
3376 3387
3377 3388 def __nonzero__(self):
3378 3389 # Do not use 'for r in self' because it will enforce the iteration
3379 3390 # order (default ascending), possibly unrolling a whole descending
3380 3391 # iterator.
3381 3392 if self._genlist:
3382 3393 return True
3383 3394 for r in self._consumegen():
3384 3395 return True
3385 3396 return False
3386 3397
3387 3398 def __contains__(self, x):
3388 3399 if x in self._cache:
3389 3400 return self._cache[x]
3390 3401
3391 3402 # Use new values only, as existing values would be cached.
3392 3403 for l in self._consumegen():
3393 3404 if l == x:
3394 3405 return True
3395 3406
3396 3407 self._cache[x] = False
3397 3408 return False
3398 3409
3399 3410 def _asccontains(self, x):
3400 3411 """version of contains optimised for ascending generator"""
3401 3412 if x in self._cache:
3402 3413 return self._cache[x]
3403 3414
3404 3415 # Use new values only, as existing values would be cached.
3405 3416 for l in self._consumegen():
3406 3417 if l == x:
3407 3418 return True
3408 3419 if l > x:
3409 3420 break
3410 3421
3411 3422 self._cache[x] = False
3412 3423 return False
3413 3424
3414 3425 def _desccontains(self, x):
3415 3426 """version of contains optimised for descending generator"""
3416 3427 if x in self._cache:
3417 3428 return self._cache[x]
3418 3429
3419 3430 # Use new values only, as existing values would be cached.
3420 3431 for l in self._consumegen():
3421 3432 if l == x:
3422 3433 return True
3423 3434 if l < x:
3424 3435 break
3425 3436
3426 3437 self._cache[x] = False
3427 3438 return False
3428 3439
3429 3440 def __iter__(self):
3430 3441 if self._ascending:
3431 3442 it = self.fastasc
3432 3443 else:
3433 3444 it = self.fastdesc
3434 3445 if it is not None:
3435 3446 return it()
3436 3447 # we need to consume the iterator
3437 3448 for x in self._consumegen():
3438 3449 pass
3439 3450 # recall the same code
3440 3451 return iter(self)
3441 3452
3442 3453 def _iterator(self):
3443 3454 if self._finished:
3444 3455 return iter(self._genlist)
3445 3456
3446 3457 # We have to use this complex iteration strategy to allow multiple
3447 3458 # iterations at the same time. We need to be able to catch revision
3448 3459 # removed from _consumegen and added to genlist in another instance.
3449 3460 #
3450 3461 # Getting rid of it would provide an about 15% speed up on this
3451 3462 # iteration.
3452 3463 genlist = self._genlist
3453 3464 nextrev = self._consumegen().next
3454 3465 _len = len # cache global lookup
3455 3466 def gen():
3456 3467 i = 0
3457 3468 while True:
3458 3469 if i < _len(genlist):
3459 3470 yield genlist[i]
3460 3471 else:
3461 3472 yield nextrev()
3462 3473 i += 1
3463 3474 return gen()
3464 3475
3465 3476 def _consumegen(self):
3466 3477 cache = self._cache
3467 3478 genlist = self._genlist.append
3468 3479 for item in self._gen:
3469 3480 cache[item] = True
3470 3481 genlist(item)
3471 3482 yield item
3472 3483 if not self._finished:
3473 3484 self._finished = True
3474 3485 asc = self._genlist[:]
3475 3486 asc.sort()
3476 3487 self._asclist = asc
3477 3488 self.fastasc = asc.__iter__
3478 3489 self.fastdesc = asc.__reversed__
3479 3490
3480 3491 def __len__(self):
3481 3492 for x in self._consumegen():
3482 3493 pass
3483 3494 return len(self._genlist)
3484 3495
3485 3496 def sort(self, reverse=False):
3486 3497 self._ascending = not reverse
3487 3498
3488 3499 def reverse(self):
3489 3500 self._ascending = not self._ascending
3490 3501
3491 3502 def isascending(self):
3492 3503 return self._ascending
3493 3504
3494 3505 def isdescending(self):
3495 3506 return not self._ascending
3496 3507
3497 3508 def first(self):
3498 3509 if self._ascending:
3499 3510 it = self.fastasc
3500 3511 else:
3501 3512 it = self.fastdesc
3502 3513 if it is None:
3503 3514 # we need to consume all and try again
3504 3515 for x in self._consumegen():
3505 3516 pass
3506 3517 return self.first()
3507 3518 return next(it(), None)
3508 3519
3509 3520 def last(self):
3510 3521 if self._ascending:
3511 3522 it = self.fastdesc
3512 3523 else:
3513 3524 it = self.fastasc
3514 3525 if it is None:
3515 3526 # we need to consume all and try again
3516 3527 for x in self._consumegen():
3517 3528 pass
3518 3529 return self.first()
3519 3530 return next(it(), None)
3520 3531
3521 3532 def __repr__(self):
3522 3533 d = {False: '-', True: '+'}[self._ascending]
3523 3534 return '<%s%s>' % (type(self).__name__, d)
3524 3535
3525 3536 class spanset(abstractsmartset):
3526 3537 """Duck type for baseset class which represents a range of revisions and
3527 3538 can work lazily and without having all the range in memory
3528 3539
3529 3540 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3530 3541 notable points:
3531 3542 - when x < y it will be automatically descending,
3532 3543 - revision filtered with this repoview will be skipped.
3533 3544
3534 3545 """
3535 3546 def __init__(self, repo, start=0, end=None):
3536 3547 """
3537 3548 start: first revision included the set
3538 3549 (default to 0)
3539 3550 end: first revision excluded (last+1)
3540 3551 (default to len(repo)
3541 3552
3542 3553 Spanset will be descending if `end` < `start`.
3543 3554 """
3544 3555 if end is None:
3545 3556 end = len(repo)
3546 3557 self._ascending = start <= end
3547 3558 if not self._ascending:
3548 3559 start, end = end + 1, start +1
3549 3560 self._start = start
3550 3561 self._end = end
3551 3562 self._hiddenrevs = repo.changelog.filteredrevs
3552 3563
3553 3564 def sort(self, reverse=False):
3554 3565 self._ascending = not reverse
3555 3566
3556 3567 def reverse(self):
3557 3568 self._ascending = not self._ascending
3558 3569
3559 3570 def _iterfilter(self, iterrange):
3560 3571 s = self._hiddenrevs
3561 3572 for r in iterrange:
3562 3573 if r not in s:
3563 3574 yield r
3564 3575
3565 3576 def __iter__(self):
3566 3577 if self._ascending:
3567 3578 return self.fastasc()
3568 3579 else:
3569 3580 return self.fastdesc()
3570 3581
3571 3582 def fastasc(self):
3572 3583 iterrange = xrange(self._start, self._end)
3573 3584 if self._hiddenrevs:
3574 3585 return self._iterfilter(iterrange)
3575 3586 return iter(iterrange)
3576 3587
3577 3588 def fastdesc(self):
3578 3589 iterrange = xrange(self._end - 1, self._start - 1, -1)
3579 3590 if self._hiddenrevs:
3580 3591 return self._iterfilter(iterrange)
3581 3592 return iter(iterrange)
3582 3593
3583 3594 def __contains__(self, rev):
3584 3595 hidden = self._hiddenrevs
3585 3596 return ((self._start <= rev < self._end)
3586 3597 and not (hidden and rev in hidden))
3587 3598
3588 3599 def __nonzero__(self):
3589 3600 for r in self:
3590 3601 return True
3591 3602 return False
3592 3603
3593 3604 def __len__(self):
3594 3605 if not self._hiddenrevs:
3595 3606 return abs(self._end - self._start)
3596 3607 else:
3597 3608 count = 0
3598 3609 start = self._start
3599 3610 end = self._end
3600 3611 for rev in self._hiddenrevs:
3601 3612 if (end < rev <= start) or (start <= rev < end):
3602 3613 count += 1
3603 3614 return abs(self._end - self._start) - count
3604 3615
3605 3616 def isascending(self):
3606 3617 return self._ascending
3607 3618
3608 3619 def isdescending(self):
3609 3620 return not self._ascending
3610 3621
3611 3622 def first(self):
3612 3623 if self._ascending:
3613 3624 it = self.fastasc
3614 3625 else:
3615 3626 it = self.fastdesc
3616 3627 for x in it():
3617 3628 return x
3618 3629 return None
3619 3630
3620 3631 def last(self):
3621 3632 if self._ascending:
3622 3633 it = self.fastdesc
3623 3634 else:
3624 3635 it = self.fastasc
3625 3636 for x in it():
3626 3637 return x
3627 3638 return None
3628 3639
3629 3640 def __repr__(self):
3630 3641 d = {False: '-', True: '+'}[self._ascending]
3631 3642 return '<%s%s %d:%d>' % (type(self).__name__, d,
3632 3643 self._start, self._end - 1)
3633 3644
3634 3645 class fullreposet(spanset):
3635 3646 """a set containing all revisions in the repo
3636 3647
3637 3648 This class exists to host special optimization and magic to handle virtual
3638 3649 revisions such as "null".
3639 3650 """
3640 3651
3641 3652 def __init__(self, repo):
3642 3653 super(fullreposet, self).__init__(repo)
3643 3654
3644 3655 def __and__(self, other):
3645 3656 """As self contains the whole repo, all of the other set should also be
3646 3657 in self. Therefore `self & other = other`.
3647 3658
3648 3659 This boldly assumes the other contains valid revs only.
3649 3660 """
3650 3661 # other not a smartset, make is so
3651 3662 if not util.safehasattr(other, 'isascending'):
3652 3663 # filter out hidden revision
3653 3664 # (this boldly assumes all smartset are pure)
3654 3665 #
3655 3666 # `other` was used with "&", let's assume this is a set like
3656 3667 # object.
3657 3668 other = baseset(other - self._hiddenrevs)
3658 3669
3659 3670 # XXX As fullreposet is also used as bootstrap, this is wrong.
3660 3671 #
3661 3672 # With a giveme312() revset returning [3,1,2], this makes
3662 3673 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3663 3674 # We cannot just drop it because other usage still need to sort it:
3664 3675 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3665 3676 #
3666 3677 # There is also some faulty revset implementations that rely on it
3667 3678 # (eg: children as of its state in e8075329c5fb)
3668 3679 #
3669 3680 # When we fix the two points above we can move this into the if clause
3670 3681 other.sort(reverse=self.isdescending())
3671 3682 return other
3672 3683
3673 3684 def prettyformatset(revs):
3674 3685 lines = []
3675 3686 rs = repr(revs)
3676 3687 p = 0
3677 3688 while p < len(rs):
3678 3689 q = rs.find('<', p + 1)
3679 3690 if q < 0:
3680 3691 q = len(rs)
3681 3692 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3682 3693 assert l >= 0
3683 3694 lines.append((l, rs[p:q].rstrip()))
3684 3695 p = q
3685 3696 return '\n'.join(' ' * l + s for l, s in lines)
3686 3697
3687 3698 # tell hggettext to extract docstrings from these functions:
3688 3699 i18nfunctions = symbols.values()
@@ -1,125 +1,122 b''
1 1 #require test-repo
2 2
3 3 $ import_checker="$TESTDIR"/../contrib/import-checker.py
4 4
5 5 Run the doctests from the import checker, and make sure
6 6 it's working correctly.
7 7 $ TERM=dumb
8 8 $ export TERM
9 9 $ python -m doctest $import_checker
10 10
11 11 Run additional tests for the import checker
12 12
13 13 $ mkdir testpackage
14 14
15 15 $ cat > testpackage/multiple.py << EOF
16 16 > from __future__ import absolute_import
17 17 > import os, sys
18 18 > EOF
19 19
20 20 $ cat > testpackage/unsorted.py << EOF
21 21 > from __future__ import absolute_import
22 22 > import sys
23 23 > import os
24 24 > EOF
25 25
26 26 $ cat > testpackage/stdafterlocal.py << EOF
27 27 > from __future__ import absolute_import
28 28 > from . import unsorted
29 29 > import os
30 30 > EOF
31 31
32 32 $ cat > testpackage/requirerelative.py << EOF
33 33 > from __future__ import absolute_import
34 34 > import testpackage.unsorted
35 35 > EOF
36 36
37 37 $ cat > testpackage/importalias.py << EOF
38 38 > from __future__ import absolute_import
39 39 > import ui
40 40 > EOF
41 41
42 42 $ cat > testpackage/relativestdlib.py << EOF
43 43 > from __future__ import absolute_import
44 44 > from .. import os
45 45 > EOF
46 46
47 47 $ cat > testpackage/symbolimport.py << EOF
48 48 > from __future__ import absolute_import
49 49 > from .unsorted import foo
50 50 > EOF
51 51
52 52 $ cat > testpackage/latesymbolimport.py << EOF
53 53 > from __future__ import absolute_import
54 54 > from . import unsorted
55 55 > from mercurial.node import hex
56 56 > EOF
57 57
58 58 $ cat > testpackage/multiplegroups.py << EOF
59 59 > from __future__ import absolute_import
60 60 > from . import unsorted
61 61 > from . import more
62 62 > EOF
63 63
64 64 $ mkdir testpackage/subpackage
65 65 $ cat > testpackage/subpackage/levelpriority.py << EOF
66 66 > from __future__ import absolute_import
67 67 > from . import foo
68 68 > from .. import parent
69 69 > EOF
70 70
71 71 $ cat > testpackage/sortedentries.py << EOF
72 72 > from __future__ import absolute_import
73 73 > from . import (
74 74 > foo,
75 75 > bar,
76 76 > )
77 77 > EOF
78 78
79 79 $ cat > testpackage/importfromalias.py << EOF
80 80 > from __future__ import absolute_import
81 81 > from . import ui
82 82 > EOF
83 83
84 84 $ cat > testpackage/importfromrelative.py << EOF
85 85 > from __future__ import absolute_import
86 86 > from testpackage.unsorted import foo
87 87 > EOF
88 88
89 89 $ python "$import_checker" testpackage/*.py testpackage/subpackage/*.py
90 90 testpackage/importalias.py ui module must be "as" aliased to uimod
91 91 testpackage/importfromalias.py ui from testpackage must be "as" aliased to uimod
92 92 testpackage/importfromrelative.py import should be relative: testpackage.unsorted
93 93 testpackage/importfromrelative.py direct symbol import from testpackage.unsorted
94 94 testpackage/latesymbolimport.py symbol import follows non-symbol import: mercurial.node
95 95 testpackage/multiple.py multiple imported names: os, sys
96 96 testpackage/multiplegroups.py multiple "from . import" statements
97 97 testpackage/relativestdlib.py relative import of stdlib module
98 98 testpackage/requirerelative.py import should be relative: testpackage.unsorted
99 99 testpackage/sortedentries.py imports from testpackage not lexically sorted: bar < foo
100 100 testpackage/stdafterlocal.py stdlib import follows local import: os
101 101 testpackage/subpackage/levelpriority.py higher-level import should come first: testpackage
102 102 testpackage/symbolimport.py direct symbol import from testpackage.unsorted
103 103 testpackage/unsorted.py imports not lexically sorted: os < sys
104 104 [1]
105 105
106 106 $ cd "$TESTDIR"/..
107 107
108 108 There are a handful of cases here that require renaming a module so it
109 109 doesn't overlap with a stdlib module name. There are also some cycles
110 110 here that we should still endeavor to fix, and some cycles will be
111 111 hidden by deduplication algorithm in the cycle detector, so fixing
112 112 these may expose other cycles.
113 113
114 114 $ hg locate 'mercurial/**.py' 'hgext/**.py' | sed 's-\\-/-g' | python "$import_checker" -
115 mercurial/revset.py mixed imports
116 stdlib: parser
117 relative: error, hbisect, phases, util
118 115 mercurial/templater.py mixed imports
119 116 stdlib: parser
120 117 relative: config, error, templatefilters, templatekw, util
121 118 mercurial/ui.py mixed imports
122 119 stdlib: formatter
123 120 relative: config, error, progress, scmutil, util
124 121 Import cycle: hgext.largefiles.basestore -> hgext.largefiles.localstore -> hgext.largefiles.basestore
125 122 [1]
General Comments 0
You need to be logged in to leave comments. Login now