##// END OF EJS Templates
devel-warn: issue a warning for old style revsets...
Pierre-Yves David -
r25630:c88082ba default
parent child Browse files
Show More
@@ -1,3618 +1,3625 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 if followfirst:
22 22 cut = 1
23 23 else:
24 24 cut = None
25 25 cl = repo.changelog
26 26
27 27 def iterate():
28 28 revs.sort(reverse=True)
29 29 irevs = iter(revs)
30 30 h = []
31 31
32 32 inputrev = next(irevs, None)
33 33 if inputrev is not None:
34 34 heapq.heappush(h, -inputrev)
35 35
36 36 seen = set()
37 37 while h:
38 38 current = -heapq.heappop(h)
39 39 if current == inputrev:
40 40 inputrev = next(irevs, None)
41 41 if inputrev is not None:
42 42 heapq.heappush(h, -inputrev)
43 43 if current not in seen:
44 44 seen.add(current)
45 45 yield current
46 46 for parent in cl.parentrevs(current)[:cut]:
47 47 if parent != node.nullrev:
48 48 heapq.heappush(h, -parent)
49 49
50 50 return generatorset(iterate(), iterasc=False)
51 51
52 52 def _revdescendants(repo, revs, followfirst):
53 53 """Like revlog.descendants() but supports followfirst."""
54 54 if followfirst:
55 55 cut = 1
56 56 else:
57 57 cut = None
58 58
59 59 def iterate():
60 60 cl = repo.changelog
61 61 # XXX this should be 'parentset.min()' assuming 'parentset' is a
62 62 # smartset (and if it is not, it should.)
63 63 first = min(revs)
64 64 nullrev = node.nullrev
65 65 if first == nullrev:
66 66 # Are there nodes with a null first parent and a non-null
67 67 # second one? Maybe. Do we care? Probably not.
68 68 for i in cl:
69 69 yield i
70 70 else:
71 71 seen = set(revs)
72 72 for i in cl.revs(first + 1):
73 73 for x in cl.parentrevs(i)[:cut]:
74 74 if x != nullrev and x in seen:
75 75 seen.add(i)
76 76 yield i
77 77 break
78 78
79 79 return generatorset(iterate(), iterasc=True)
80 80
81 81 def _revsbetween(repo, roots, heads):
82 82 """Return all paths between roots and heads, inclusive of both endpoint
83 83 sets."""
84 84 if not roots:
85 85 return baseset()
86 86 parentrevs = repo.changelog.parentrevs
87 87 visit = list(heads)
88 88 reachable = set()
89 89 seen = {}
90 90 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
91 91 # (and if it is not, it should.)
92 92 minroot = min(roots)
93 93 roots = set(roots)
94 94 # prefetch all the things! (because python is slow)
95 95 reached = reachable.add
96 96 dovisit = visit.append
97 97 nextvisit = visit.pop
98 98 # open-code the post-order traversal due to the tiny size of
99 99 # sys.getrecursionlimit()
100 100 while visit:
101 101 rev = nextvisit()
102 102 if rev in roots:
103 103 reached(rev)
104 104 parents = parentrevs(rev)
105 105 seen[rev] = parents
106 106 for parent in parents:
107 107 if parent >= minroot and parent not in seen:
108 108 dovisit(parent)
109 109 if not reachable:
110 110 return baseset()
111 111 for rev in sorted(seen):
112 112 for parent in seen[rev]:
113 113 if parent in reachable:
114 114 reached(rev)
115 115 return baseset(sorted(reachable))
116 116
117 117 elements = {
118 118 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
119 119 "##": (20, None, ("_concat", 20)),
120 120 "~": (18, None, ("ancestor", 18)),
121 121 "^": (18, None, ("parent", 18), ("parentpost", 18)),
122 122 "-": (5, ("negate", 19), ("minus", 5)),
123 123 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
124 124 ("dagrangepost", 17)),
125 125 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
126 126 ("dagrangepost", 17)),
127 127 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
128 128 "not": (10, ("not", 10)),
129 129 "!": (10, ("not", 10)),
130 130 "and": (5, None, ("and", 5)),
131 131 "&": (5, None, ("and", 5)),
132 132 "%": (5, None, ("only", 5), ("onlypost", 5)),
133 133 "or": (4, None, ("or", 4)),
134 134 "|": (4, None, ("or", 4)),
135 135 "+": (4, None, ("or", 4)),
136 136 ",": (2, None, ("list", 2)),
137 137 ")": (0, None, None),
138 138 "symbol": (0, ("symbol",), None),
139 139 "string": (0, ("string",), None),
140 140 "end": (0, None, None),
141 141 }
142 142
143 143 keywords = set(['and', 'or', 'not'])
144 144
145 145 # default set of valid characters for the initial letter of symbols
146 146 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
147 147 if c.isalnum() or c in '._@' or ord(c) > 127)
148 148
149 149 # default set of valid characters for non-initial letters of symbols
150 150 _symletters = set(c for c in [chr(i) for i in xrange(256)]
151 151 if c.isalnum() or c in '-._/@' or ord(c) > 127)
152 152
153 153 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
154 154 '''
155 155 Parse a revset statement into a stream of tokens
156 156
157 157 ``syminitletters`` is the set of valid characters for the initial
158 158 letter of symbols.
159 159
160 160 By default, character ``c`` is recognized as valid for initial
161 161 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
162 162
163 163 ``symletters`` is the set of valid characters for non-initial
164 164 letters of symbols.
165 165
166 166 By default, character ``c`` is recognized as valid for non-initial
167 167 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
168 168
169 169 Check that @ is a valid unquoted token character (issue3686):
170 170 >>> list(tokenize("@::"))
171 171 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
172 172
173 173 '''
174 174 if syminitletters is None:
175 175 syminitletters = _syminitletters
176 176 if symletters is None:
177 177 symletters = _symletters
178 178
179 179 pos, l = 0, len(program)
180 180 while pos < l:
181 181 c = program[pos]
182 182 if c.isspace(): # skip inter-token whitespace
183 183 pass
184 184 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
185 185 yield ('::', None, pos)
186 186 pos += 1 # skip ahead
187 187 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
188 188 yield ('..', None, pos)
189 189 pos += 1 # skip ahead
190 190 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
191 191 yield ('##', None, pos)
192 192 pos += 1 # skip ahead
193 193 elif c in "():,-|&+!~^%": # handle simple operators
194 194 yield (c, None, pos)
195 195 elif (c in '"\'' or c == 'r' and
196 196 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
197 197 if c == 'r':
198 198 pos += 1
199 199 c = program[pos]
200 200 decode = lambda x: x
201 201 else:
202 202 decode = lambda x: x.decode('string-escape')
203 203 pos += 1
204 204 s = pos
205 205 while pos < l: # find closing quote
206 206 d = program[pos]
207 207 if d == '\\': # skip over escaped characters
208 208 pos += 2
209 209 continue
210 210 if d == c:
211 211 yield ('string', decode(program[s:pos]), s)
212 212 break
213 213 pos += 1
214 214 else:
215 215 raise error.ParseError(_("unterminated string"), s)
216 216 # gather up a symbol/keyword
217 217 elif c in syminitletters:
218 218 s = pos
219 219 pos += 1
220 220 while pos < l: # find end of symbol
221 221 d = program[pos]
222 222 if d not in symletters:
223 223 break
224 224 if d == '.' and program[pos - 1] == '.': # special case for ..
225 225 pos -= 1
226 226 break
227 227 pos += 1
228 228 sym = program[s:pos]
229 229 if sym in keywords: # operator keywords
230 230 yield (sym, None, s)
231 231 elif '-' in sym:
232 232 # some jerk gave us foo-bar-baz, try to check if it's a symbol
233 233 if lookup and lookup(sym):
234 234 # looks like a real symbol
235 235 yield ('symbol', sym, s)
236 236 else:
237 237 # looks like an expression
238 238 parts = sym.split('-')
239 239 for p in parts[:-1]:
240 240 if p: # possible consecutive -
241 241 yield ('symbol', p, s)
242 242 s += len(p)
243 243 yield ('-', None, pos)
244 244 s += 1
245 245 if parts[-1]: # possible trailing -
246 246 yield ('symbol', parts[-1], s)
247 247 else:
248 248 yield ('symbol', sym, s)
249 249 pos -= 1
250 250 else:
251 251 raise error.ParseError(_("syntax error in revset '%s'") %
252 252 program, pos)
253 253 pos += 1
254 254 yield ('end', None, pos)
255 255
256 256 def parseerrordetail(inst):
257 257 """Compose error message from specified ParseError object
258 258 """
259 259 if len(inst.args) > 1:
260 260 return _('at %s: %s') % (inst.args[1], inst.args[0])
261 261 else:
262 262 return inst.args[0]
263 263
264 264 # helpers
265 265
266 266 def getstring(x, err):
267 267 if x and (x[0] == 'string' or x[0] == 'symbol'):
268 268 return x[1]
269 269 raise error.ParseError(err)
270 270
271 271 def getlist(x):
272 272 if not x:
273 273 return []
274 274 if x[0] == 'list':
275 275 return getlist(x[1]) + [x[2]]
276 276 return [x]
277 277
278 278 def getargs(x, min, max, err):
279 279 l = getlist(x)
280 280 if len(l) < min or (max >= 0 and len(l) > max):
281 281 raise error.ParseError(err)
282 282 return l
283 283
284 284 def isvalidsymbol(tree):
285 285 """Examine whether specified ``tree`` is valid ``symbol`` or not
286 286 """
287 287 return tree[0] == 'symbol' and len(tree) > 1
288 288
289 289 def getsymbol(tree):
290 290 """Get symbol name from valid ``symbol`` in ``tree``
291 291
292 292 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
293 293 """
294 294 return tree[1]
295 295
296 296 def isvalidfunc(tree):
297 297 """Examine whether specified ``tree`` is valid ``func`` or not
298 298 """
299 299 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
300 300
301 301 def getfuncname(tree):
302 302 """Get function name from valid ``func`` in ``tree``
303 303
304 304 This assumes that ``tree`` is already examined by ``isvalidfunc``.
305 305 """
306 306 return getsymbol(tree[1])
307 307
308 308 def getfuncargs(tree):
309 309 """Get list of function arguments from valid ``func`` in ``tree``
310 310
311 311 This assumes that ``tree`` is already examined by ``isvalidfunc``.
312 312 """
313 313 if len(tree) > 2:
314 314 return getlist(tree[2])
315 315 else:
316 316 return []
317 317
318 318 def getset(repo, subset, x):
319 319 if not x:
320 320 raise error.ParseError(_("missing argument"))
321 321 s = methods[x[0]](repo, subset, *x[1:])
322 322 if util.safehasattr(s, 'isascending'):
323 323 return s
324 if (repo.ui.configbool('devel', 'all-warnings')
325 or repo.ui.configbool('devel', 'old-revset')):
326 # else case should not happen, because all non-func are internal,
327 # ignoring for now.
328 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
329 repo.ui.develwarn('revset "%s" use list instead of smartset, '
330 '(upgrade your code)' % x[1][1])
324 331 return baseset(s)
325 332
326 333 def _getrevsource(repo, r):
327 334 extra = repo[r].extra()
328 335 for label in ('source', 'transplant_source', 'rebase_source'):
329 336 if label in extra:
330 337 try:
331 338 return repo[extra[label]].rev()
332 339 except error.RepoLookupError:
333 340 pass
334 341 return None
335 342
336 343 # operator methods
337 344
338 345 def stringset(repo, subset, x):
339 346 x = repo[x].rev()
340 347 if (x in subset
341 348 or x == node.nullrev and isinstance(subset, fullreposet)):
342 349 return baseset([x])
343 350 return baseset()
344 351
345 352 def rangeset(repo, subset, x, y):
346 353 m = getset(repo, fullreposet(repo), x)
347 354 n = getset(repo, fullreposet(repo), y)
348 355
349 356 if not m or not n:
350 357 return baseset()
351 358 m, n = m.first(), n.last()
352 359
353 360 if m < n:
354 361 r = spanset(repo, m, n + 1)
355 362 else:
356 363 r = spanset(repo, m, n - 1)
357 364 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
358 365 # necessary to ensure we preserve the order in subset.
359 366 #
360 367 # This has performance implication, carrying the sorting over when possible
361 368 # would be more efficient.
362 369 return r & subset
363 370
364 371 def dagrange(repo, subset, x, y):
365 372 r = fullreposet(repo)
366 373 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
367 374 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
368 375 # necessary to ensure we preserve the order in subset.
369 376 return xs & subset
370 377
371 378 def andset(repo, subset, x, y):
372 379 return getset(repo, getset(repo, subset, x), y)
373 380
374 381 def orset(repo, subset, *xs):
375 382 rs = [getset(repo, subset, x) for x in xs]
376 383 return _combinesets(rs)
377 384
378 385 def notset(repo, subset, x):
379 386 return subset - getset(repo, subset, x)
380 387
381 388 def listset(repo, subset, a, b):
382 389 raise error.ParseError(_("can't use a list in this context"))
383 390
384 391 def func(repo, subset, a, b):
385 392 if a[0] == 'symbol' and a[1] in symbols:
386 393 return symbols[a[1]](repo, subset, b)
387 394 raise error.UnknownIdentifier(a[1], symbols.keys())
388 395
389 396 # functions
390 397
391 398 def adds(repo, subset, x):
392 399 """``adds(pattern)``
393 400 Changesets that add a file matching pattern.
394 401
395 402 The pattern without explicit kind like ``glob:`` is expected to be
396 403 relative to the current directory and match against a file or a
397 404 directory.
398 405 """
399 406 # i18n: "adds" is a keyword
400 407 pat = getstring(x, _("adds requires a pattern"))
401 408 return checkstatus(repo, subset, pat, 1)
402 409
403 410 def ancestor(repo, subset, x):
404 411 """``ancestor(*changeset)``
405 412 A greatest common ancestor of the changesets.
406 413
407 414 Accepts 0 or more changesets.
408 415 Will return empty list when passed no args.
409 416 Greatest common ancestor of a single changeset is that changeset.
410 417 """
411 418 # i18n: "ancestor" is a keyword
412 419 l = getlist(x)
413 420 rl = fullreposet(repo)
414 421 anc = None
415 422
416 423 # (getset(repo, rl, i) for i in l) generates a list of lists
417 424 for revs in (getset(repo, rl, i) for i in l):
418 425 for r in revs:
419 426 if anc is None:
420 427 anc = repo[r]
421 428 else:
422 429 anc = anc.ancestor(repo[r])
423 430
424 431 if anc is not None and anc.rev() in subset:
425 432 return baseset([anc.rev()])
426 433 return baseset()
427 434
428 435 def _ancestors(repo, subset, x, followfirst=False):
429 436 heads = getset(repo, fullreposet(repo), x)
430 437 if not heads:
431 438 return baseset()
432 439 s = _revancestors(repo, heads, followfirst)
433 440 return subset & s
434 441
435 442 def ancestors(repo, subset, x):
436 443 """``ancestors(set)``
437 444 Changesets that are ancestors of a changeset in set.
438 445 """
439 446 return _ancestors(repo, subset, x)
440 447
441 448 def _firstancestors(repo, subset, x):
442 449 # ``_firstancestors(set)``
443 450 # Like ``ancestors(set)`` but follows only the first parents.
444 451 return _ancestors(repo, subset, x, followfirst=True)
445 452
446 453 def ancestorspec(repo, subset, x, n):
447 454 """``set~n``
448 455 Changesets that are the Nth ancestor (first parents only) of a changeset
449 456 in set.
450 457 """
451 458 try:
452 459 n = int(n[1])
453 460 except (TypeError, ValueError):
454 461 raise error.ParseError(_("~ expects a number"))
455 462 ps = set()
456 463 cl = repo.changelog
457 464 for r in getset(repo, fullreposet(repo), x):
458 465 for i in range(n):
459 466 r = cl.parentrevs(r)[0]
460 467 ps.add(r)
461 468 return subset & ps
462 469
463 470 def author(repo, subset, x):
464 471 """``author(string)``
465 472 Alias for ``user(string)``.
466 473 """
467 474 # i18n: "author" is a keyword
468 475 n = encoding.lower(getstring(x, _("author requires a string")))
469 476 kind, pattern, matcher = _substringmatcher(n)
470 477 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
471 478
472 479 def bisect(repo, subset, x):
473 480 """``bisect(string)``
474 481 Changesets marked in the specified bisect status:
475 482
476 483 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
477 484 - ``goods``, ``bads`` : csets topologically good/bad
478 485 - ``range`` : csets taking part in the bisection
479 486 - ``pruned`` : csets that are goods, bads or skipped
480 487 - ``untested`` : csets whose fate is yet unknown
481 488 - ``ignored`` : csets ignored due to DAG topology
482 489 - ``current`` : the cset currently being bisected
483 490 """
484 491 # i18n: "bisect" is a keyword
485 492 status = getstring(x, _("bisect requires a string")).lower()
486 493 state = set(hbisect.get(repo, status))
487 494 return subset & state
488 495
489 496 # Backward-compatibility
490 497 # - no help entry so that we do not advertise it any more
491 498 def bisected(repo, subset, x):
492 499 return bisect(repo, subset, x)
493 500
494 501 def bookmark(repo, subset, x):
495 502 """``bookmark([name])``
496 503 The named bookmark or all bookmarks.
497 504
498 505 If `name` starts with `re:`, the remainder of the name is treated as
499 506 a regular expression. To match a bookmark that actually starts with `re:`,
500 507 use the prefix `literal:`.
501 508 """
502 509 # i18n: "bookmark" is a keyword
503 510 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
504 511 if args:
505 512 bm = getstring(args[0],
506 513 # i18n: "bookmark" is a keyword
507 514 _('the argument to bookmark must be a string'))
508 515 kind, pattern, matcher = _stringmatcher(bm)
509 516 bms = set()
510 517 if kind == 'literal':
511 518 bmrev = repo._bookmarks.get(pattern, None)
512 519 if not bmrev:
513 520 raise error.RepoLookupError(_("bookmark '%s' does not exist")
514 521 % bm)
515 522 bms.add(repo[bmrev].rev())
516 523 else:
517 524 matchrevs = set()
518 525 for name, bmrev in repo._bookmarks.iteritems():
519 526 if matcher(name):
520 527 matchrevs.add(bmrev)
521 528 if not matchrevs:
522 529 raise error.RepoLookupError(_("no bookmarks exist"
523 530 " that match '%s'") % pattern)
524 531 for bmrev in matchrevs:
525 532 bms.add(repo[bmrev].rev())
526 533 else:
527 534 bms = set([repo[r].rev()
528 535 for r in repo._bookmarks.values()])
529 536 bms -= set([node.nullrev])
530 537 return subset & bms
531 538
532 539 def branch(repo, subset, x):
533 540 """``branch(string or set)``
534 541 All changesets belonging to the given branch or the branches of the given
535 542 changesets.
536 543
537 544 If `string` starts with `re:`, the remainder of the name is treated as
538 545 a regular expression. To match a branch that actually starts with `re:`,
539 546 use the prefix `literal:`.
540 547 """
541 548 getbi = repo.revbranchcache().branchinfo
542 549
543 550 try:
544 551 b = getstring(x, '')
545 552 except error.ParseError:
546 553 # not a string, but another revspec, e.g. tip()
547 554 pass
548 555 else:
549 556 kind, pattern, matcher = _stringmatcher(b)
550 557 if kind == 'literal':
551 558 # note: falls through to the revspec case if no branch with
552 559 # this name exists
553 560 if pattern in repo.branchmap():
554 561 return subset.filter(lambda r: matcher(getbi(r)[0]))
555 562 else:
556 563 return subset.filter(lambda r: matcher(getbi(r)[0]))
557 564
558 565 s = getset(repo, fullreposet(repo), x)
559 566 b = set()
560 567 for r in s:
561 568 b.add(getbi(r)[0])
562 569 c = s.__contains__
563 570 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
564 571
565 572 def bumped(repo, subset, x):
566 573 """``bumped()``
567 574 Mutable changesets marked as successors of public changesets.
568 575
569 576 Only non-public and non-obsolete changesets can be `bumped`.
570 577 """
571 578 # i18n: "bumped" is a keyword
572 579 getargs(x, 0, 0, _("bumped takes no arguments"))
573 580 bumped = obsmod.getrevs(repo, 'bumped')
574 581 return subset & bumped
575 582
576 583 def bundle(repo, subset, x):
577 584 """``bundle()``
578 585 Changesets in the bundle.
579 586
580 587 Bundle must be specified by the -R option."""
581 588
582 589 try:
583 590 bundlerevs = repo.changelog.bundlerevs
584 591 except AttributeError:
585 592 raise util.Abort(_("no bundle provided - specify with -R"))
586 593 return subset & bundlerevs
587 594
588 595 def checkstatus(repo, subset, pat, field):
589 596 hasset = matchmod.patkind(pat) == 'set'
590 597
591 598 mcache = [None]
592 599 def matches(x):
593 600 c = repo[x]
594 601 if not mcache[0] or hasset:
595 602 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
596 603 m = mcache[0]
597 604 fname = None
598 605 if not m.anypats() and len(m.files()) == 1:
599 606 fname = m.files()[0]
600 607 if fname is not None:
601 608 if fname not in c.files():
602 609 return False
603 610 else:
604 611 for f in c.files():
605 612 if m(f):
606 613 break
607 614 else:
608 615 return False
609 616 files = repo.status(c.p1().node(), c.node())[field]
610 617 if fname is not None:
611 618 if fname in files:
612 619 return True
613 620 else:
614 621 for f in files:
615 622 if m(f):
616 623 return True
617 624
618 625 return subset.filter(matches)
619 626
620 627 def _children(repo, narrow, parentset):
621 628 if not parentset:
622 629 return baseset()
623 630 cs = set()
624 631 pr = repo.changelog.parentrevs
625 632 minrev = parentset.min()
626 633 for r in narrow:
627 634 if r <= minrev:
628 635 continue
629 636 for p in pr(r):
630 637 if p in parentset:
631 638 cs.add(r)
632 639 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
633 640 # This does not break because of other fullreposet misbehavior.
634 641 return baseset(cs)
635 642
636 643 def children(repo, subset, x):
637 644 """``children(set)``
638 645 Child changesets of changesets in set.
639 646 """
640 647 s = getset(repo, fullreposet(repo), x)
641 648 cs = _children(repo, subset, s)
642 649 return subset & cs
643 650
644 651 def closed(repo, subset, x):
645 652 """``closed()``
646 653 Changeset is closed.
647 654 """
648 655 # i18n: "closed" is a keyword
649 656 getargs(x, 0, 0, _("closed takes no arguments"))
650 657 return subset.filter(lambda r: repo[r].closesbranch())
651 658
652 659 def contains(repo, subset, x):
653 660 """``contains(pattern)``
654 661 The revision's manifest contains a file matching pattern (but might not
655 662 modify it). See :hg:`help patterns` for information about file patterns.
656 663
657 664 The pattern without explicit kind like ``glob:`` is expected to be
658 665 relative to the current directory and match against a file exactly
659 666 for efficiency.
660 667 """
661 668 # i18n: "contains" is a keyword
662 669 pat = getstring(x, _("contains requires a pattern"))
663 670
664 671 def matches(x):
665 672 if not matchmod.patkind(pat):
666 673 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
667 674 if pats in repo[x]:
668 675 return True
669 676 else:
670 677 c = repo[x]
671 678 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
672 679 for f in c.manifest():
673 680 if m(f):
674 681 return True
675 682 return False
676 683
677 684 return subset.filter(matches)
678 685
679 686 def converted(repo, subset, x):
680 687 """``converted([id])``
681 688 Changesets converted from the given identifier in the old repository if
682 689 present, or all converted changesets if no identifier is specified.
683 690 """
684 691
685 692 # There is exactly no chance of resolving the revision, so do a simple
686 693 # string compare and hope for the best
687 694
688 695 rev = None
689 696 # i18n: "converted" is a keyword
690 697 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
691 698 if l:
692 699 # i18n: "converted" is a keyword
693 700 rev = getstring(l[0], _('converted requires a revision'))
694 701
695 702 def _matchvalue(r):
696 703 source = repo[r].extra().get('convert_revision', None)
697 704 return source is not None and (rev is None or source.startswith(rev))
698 705
699 706 return subset.filter(lambda r: _matchvalue(r))
700 707
701 708 def date(repo, subset, x):
702 709 """``date(interval)``
703 710 Changesets within the interval, see :hg:`help dates`.
704 711 """
705 712 # i18n: "date" is a keyword
706 713 ds = getstring(x, _("date requires a string"))
707 714 dm = util.matchdate(ds)
708 715 return subset.filter(lambda x: dm(repo[x].date()[0]))
709 716
710 717 def desc(repo, subset, x):
711 718 """``desc(string)``
712 719 Search commit message for string. The match is case-insensitive.
713 720 """
714 721 # i18n: "desc" is a keyword
715 722 ds = encoding.lower(getstring(x, _("desc requires a string")))
716 723
717 724 def matches(x):
718 725 c = repo[x]
719 726 return ds in encoding.lower(c.description())
720 727
721 728 return subset.filter(matches)
722 729
723 730 def _descendants(repo, subset, x, followfirst=False):
724 731 roots = getset(repo, fullreposet(repo), x)
725 732 if not roots:
726 733 return baseset()
727 734 s = _revdescendants(repo, roots, followfirst)
728 735
729 736 # Both sets need to be ascending in order to lazily return the union
730 737 # in the correct order.
731 738 base = subset & roots
732 739 desc = subset & s
733 740 result = base + desc
734 741 if subset.isascending():
735 742 result.sort()
736 743 elif subset.isdescending():
737 744 result.sort(reverse=True)
738 745 else:
739 746 result = subset & result
740 747 return result
741 748
742 749 def descendants(repo, subset, x):
743 750 """``descendants(set)``
744 751 Changesets which are descendants of changesets in set.
745 752 """
746 753 return _descendants(repo, subset, x)
747 754
748 755 def _firstdescendants(repo, subset, x):
749 756 # ``_firstdescendants(set)``
750 757 # Like ``descendants(set)`` but follows only the first parents.
751 758 return _descendants(repo, subset, x, followfirst=True)
752 759
753 760 def destination(repo, subset, x):
754 761 """``destination([set])``
755 762 Changesets that were created by a graft, transplant or rebase operation,
756 763 with the given revisions specified as the source. Omitting the optional set
757 764 is the same as passing all().
758 765 """
759 766 if x is not None:
760 767 sources = getset(repo, fullreposet(repo), x)
761 768 else:
762 769 sources = fullreposet(repo)
763 770
764 771 dests = set()
765 772
766 773 # subset contains all of the possible destinations that can be returned, so
767 774 # iterate over them and see if their source(s) were provided in the arg set.
768 775 # Even if the immediate src of r is not in the arg set, src's source (or
769 776 # further back) may be. Scanning back further than the immediate src allows
770 777 # transitive transplants and rebases to yield the same results as transitive
771 778 # grafts.
772 779 for r in subset:
773 780 src = _getrevsource(repo, r)
774 781 lineage = None
775 782
776 783 while src is not None:
777 784 if lineage is None:
778 785 lineage = list()
779 786
780 787 lineage.append(r)
781 788
782 789 # The visited lineage is a match if the current source is in the arg
783 790 # set. Since every candidate dest is visited by way of iterating
784 791 # subset, any dests further back in the lineage will be tested by a
785 792 # different iteration over subset. Likewise, if the src was already
786 793 # selected, the current lineage can be selected without going back
787 794 # further.
788 795 if src in sources or src in dests:
789 796 dests.update(lineage)
790 797 break
791 798
792 799 r = src
793 800 src = _getrevsource(repo, r)
794 801
795 802 return subset.filter(dests.__contains__)
796 803
797 804 def divergent(repo, subset, x):
798 805 """``divergent()``
799 806 Final successors of changesets with an alternative set of final successors.
800 807 """
801 808 # i18n: "divergent" is a keyword
802 809 getargs(x, 0, 0, _("divergent takes no arguments"))
803 810 divergent = obsmod.getrevs(repo, 'divergent')
804 811 return subset & divergent
805 812
806 813 def extinct(repo, subset, x):
807 814 """``extinct()``
808 815 Obsolete changesets with obsolete descendants only.
809 816 """
810 817 # i18n: "extinct" is a keyword
811 818 getargs(x, 0, 0, _("extinct takes no arguments"))
812 819 extincts = obsmod.getrevs(repo, 'extinct')
813 820 return subset & extincts
814 821
815 822 def extra(repo, subset, x):
816 823 """``extra(label, [value])``
817 824 Changesets with the given label in the extra metadata, with the given
818 825 optional value.
819 826
820 827 If `value` starts with `re:`, the remainder of the value is treated as
821 828 a regular expression. To match a value that actually starts with `re:`,
822 829 use the prefix `literal:`.
823 830 """
824 831
825 832 # i18n: "extra" is a keyword
826 833 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
827 834 # i18n: "extra" is a keyword
828 835 label = getstring(l[0], _('first argument to extra must be a string'))
829 836 value = None
830 837
831 838 if len(l) > 1:
832 839 # i18n: "extra" is a keyword
833 840 value = getstring(l[1], _('second argument to extra must be a string'))
834 841 kind, value, matcher = _stringmatcher(value)
835 842
836 843 def _matchvalue(r):
837 844 extra = repo[r].extra()
838 845 return label in extra and (value is None or matcher(extra[label]))
839 846
840 847 return subset.filter(lambda r: _matchvalue(r))
841 848
842 849 def filelog(repo, subset, x):
843 850 """``filelog(pattern)``
844 851 Changesets connected to the specified filelog.
845 852
846 853 For performance reasons, visits only revisions mentioned in the file-level
847 854 filelog, rather than filtering through all changesets (much faster, but
848 855 doesn't include deletes or duplicate changes). For a slower, more accurate
849 856 result, use ``file()``.
850 857
851 858 The pattern without explicit kind like ``glob:`` is expected to be
852 859 relative to the current directory and match against a file exactly
853 860 for efficiency.
854 861
855 862 If some linkrev points to revisions filtered by the current repoview, we'll
856 863 work around it to return a non-filtered value.
857 864 """
858 865
859 866 # i18n: "filelog" is a keyword
860 867 pat = getstring(x, _("filelog requires a pattern"))
861 868 s = set()
862 869 cl = repo.changelog
863 870
864 871 if not matchmod.patkind(pat):
865 872 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
866 873 files = [f]
867 874 else:
868 875 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
869 876 files = (f for f in repo[None] if m(f))
870 877
871 878 for f in files:
872 879 backrevref = {} # final value for: filerev -> changerev
873 880 lowestchild = {} # lowest known filerev child of a filerev
874 881 delayed = [] # filerev with filtered linkrev, for post-processing
875 882 lowesthead = None # cache for manifest content of all head revisions
876 883 fl = repo.file(f)
877 884 for fr in list(fl):
878 885 rev = fl.linkrev(fr)
879 886 if rev not in cl:
880 887 # changerev pointed in linkrev is filtered
881 888 # record it for post processing.
882 889 delayed.append((fr, rev))
883 890 continue
884 891 for p in fl.parentrevs(fr):
885 892 if 0 <= p and p not in lowestchild:
886 893 lowestchild[p] = fr
887 894 backrevref[fr] = rev
888 895 s.add(rev)
889 896
890 897 # Post-processing of all filerevs we skipped because they were
891 898 # filtered. If such filerevs have known and unfiltered children, this
892 899 # means they have an unfiltered appearance out there. We'll use linkrev
893 900 # adjustment to find one of these appearances. The lowest known child
894 901 # will be used as a starting point because it is the best upper-bound we
895 902 # have.
896 903 #
897 904 # This approach will fail when an unfiltered but linkrev-shadowed
898 905 # appearance exists in a head changeset without unfiltered filerev
899 906 # children anywhere.
900 907 while delayed:
901 908 # must be a descending iteration. To slowly fill lowest child
902 909 # information that is of potential use by the next item.
903 910 fr, rev = delayed.pop()
904 911 lkr = rev
905 912
906 913 child = lowestchild.get(fr)
907 914
908 915 if child is None:
909 916 # search for existence of this file revision in a head revision.
910 917 # There are three possibilities:
911 918 # - the revision exists in a head and we can find an
912 919 # introduction from there,
913 920 # - the revision does not exist in a head because it has been
914 921 # changed since its introduction: we would have found a child
915 922 # and be in the other 'else' clause,
916 923 # - all versions of the revision are hidden.
917 924 if lowesthead is None:
918 925 lowesthead = {}
919 926 for h in repo.heads():
920 927 fnode = repo[h].manifest().get(f)
921 928 if fnode is not None:
922 929 lowesthead[fl.rev(fnode)] = h
923 930 headrev = lowesthead.get(fr)
924 931 if headrev is None:
925 932 # content is nowhere unfiltered
926 933 continue
927 934 rev = repo[headrev][f].introrev()
928 935 else:
929 936 # the lowest known child is a good upper bound
930 937 childcrev = backrevref[child]
931 938 # XXX this does not guarantee returning the lowest
932 939 # introduction of this revision, but this gives a
933 940 # result which is a good start and will fit in most
934 941 # cases. We probably need to fix the multiple
935 942 # introductions case properly (report each
936 943 # introduction, even for identical file revisions)
937 944 # once and for all at some point anyway.
938 945 for p in repo[childcrev][f].parents():
939 946 if p.filerev() == fr:
940 947 rev = p.rev()
941 948 break
942 949 if rev == lkr: # no shadowed entry found
943 950 # XXX This should never happen unless some manifest points
944 951 # to biggish file revisions (like a revision that uses a
945 952 # parent that never appears in the manifest ancestors)
946 953 continue
947 954
948 955 # Fill the data for the next iteration.
949 956 for p in fl.parentrevs(fr):
950 957 if 0 <= p and p not in lowestchild:
951 958 lowestchild[p] = fr
952 959 backrevref[fr] = rev
953 960 s.add(rev)
954 961
955 962 return subset & s
956 963
957 964 def first(repo, subset, x):
958 965 """``first(set, [n])``
959 966 An alias for limit().
960 967 """
961 968 return limit(repo, subset, x)
962 969
963 970 def _follow(repo, subset, x, name, followfirst=False):
964 971 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
965 972 c = repo['.']
966 973 if l:
967 974 x = getstring(l[0], _("%s expected a filename") % name)
968 975 if x in c:
969 976 cx = c[x]
970 977 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
971 978 # include the revision responsible for the most recent version
972 979 s.add(cx.introrev())
973 980 else:
974 981 return baseset()
975 982 else:
976 983 s = _revancestors(repo, baseset([c.rev()]), followfirst)
977 984
978 985 return subset & s
979 986
980 987 def follow(repo, subset, x):
981 988 """``follow([file])``
982 989 An alias for ``::.`` (ancestors of the working directory's first parent).
983 990 If a filename is specified, the history of the given file is followed,
984 991 including copies.
985 992 """
986 993 return _follow(repo, subset, x, 'follow')
987 994
988 995 def _followfirst(repo, subset, x):
989 996 # ``followfirst([file])``
990 997 # Like ``follow([file])`` but follows only the first parent of
991 998 # every revision or file revision.
992 999 return _follow(repo, subset, x, '_followfirst', followfirst=True)
993 1000
994 1001 def getall(repo, subset, x):
995 1002 """``all()``
996 1003 All changesets, the same as ``0:tip``.
997 1004 """
998 1005 # i18n: "all" is a keyword
999 1006 getargs(x, 0, 0, _("all takes no arguments"))
1000 1007 return subset & spanset(repo) # drop "null" if any
1001 1008
1002 1009 def grep(repo, subset, x):
1003 1010 """``grep(regex)``
1004 1011 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1005 1012 to ensure special escape characters are handled correctly. Unlike
1006 1013 ``keyword(string)``, the match is case-sensitive.
1007 1014 """
1008 1015 try:
1009 1016 # i18n: "grep" is a keyword
1010 1017 gr = re.compile(getstring(x, _("grep requires a string")))
1011 1018 except re.error, e:
1012 1019 raise error.ParseError(_('invalid match pattern: %s') % e)
1013 1020
1014 1021 def matches(x):
1015 1022 c = repo[x]
1016 1023 for e in c.files() + [c.user(), c.description()]:
1017 1024 if gr.search(e):
1018 1025 return True
1019 1026 return False
1020 1027
1021 1028 return subset.filter(matches)
1022 1029
1023 1030 def _matchfiles(repo, subset, x):
1024 1031 # _matchfiles takes a revset list of prefixed arguments:
1025 1032 #
1026 1033 # [p:foo, i:bar, x:baz]
1027 1034 #
1028 1035 # builds a match object from them and filters subset. Allowed
1029 1036 # prefixes are 'p:' for regular patterns, 'i:' for include
1030 1037 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1031 1038 # a revision identifier, or the empty string to reference the
1032 1039 # working directory, from which the match object is
1033 1040 # initialized. Use 'd:' to set the default matching mode, default
1034 1041 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1035 1042
1036 1043 # i18n: "_matchfiles" is a keyword
1037 1044 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1038 1045 pats, inc, exc = [], [], []
1039 1046 rev, default = None, None
1040 1047 for arg in l:
1041 1048 # i18n: "_matchfiles" is a keyword
1042 1049 s = getstring(arg, _("_matchfiles requires string arguments"))
1043 1050 prefix, value = s[:2], s[2:]
1044 1051 if prefix == 'p:':
1045 1052 pats.append(value)
1046 1053 elif prefix == 'i:':
1047 1054 inc.append(value)
1048 1055 elif prefix == 'x:':
1049 1056 exc.append(value)
1050 1057 elif prefix == 'r:':
1051 1058 if rev is not None:
1052 1059 # i18n: "_matchfiles" is a keyword
1053 1060 raise error.ParseError(_('_matchfiles expected at most one '
1054 1061 'revision'))
1055 1062 if value != '': # empty means working directory; leave rev as None
1056 1063 rev = value
1057 1064 elif prefix == 'd:':
1058 1065 if default is not None:
1059 1066 # i18n: "_matchfiles" is a keyword
1060 1067 raise error.ParseError(_('_matchfiles expected at most one '
1061 1068 'default mode'))
1062 1069 default = value
1063 1070 else:
1064 1071 # i18n: "_matchfiles" is a keyword
1065 1072 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1066 1073 if not default:
1067 1074 default = 'glob'
1068 1075
1069 1076 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1070 1077 exclude=exc, ctx=repo[rev], default=default)
1071 1078
1072 1079 def matches(x):
1073 1080 for f in repo[x].files():
1074 1081 if m(f):
1075 1082 return True
1076 1083 return False
1077 1084
1078 1085 return subset.filter(matches)
1079 1086
1080 1087 def hasfile(repo, subset, x):
1081 1088 """``file(pattern)``
1082 1089 Changesets affecting files matched by pattern.
1083 1090
1084 1091 For a faster but less accurate result, consider using ``filelog()``
1085 1092 instead.
1086 1093
1087 1094 This predicate uses ``glob:`` as the default kind of pattern.
1088 1095 """
1089 1096 # i18n: "file" is a keyword
1090 1097 pat = getstring(x, _("file requires a pattern"))
1091 1098 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1092 1099
1093 1100 def head(repo, subset, x):
1094 1101 """``head()``
1095 1102 Changeset is a named branch head.
1096 1103 """
1097 1104 # i18n: "head" is a keyword
1098 1105 getargs(x, 0, 0, _("head takes no arguments"))
1099 1106 hs = set()
1100 1107 cl = repo.changelog
1101 1108 for b, ls in repo.branchmap().iteritems():
1102 1109 hs.update(cl.rev(h) for h in ls)
1103 1110 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1104 1111 # This does not break because of other fullreposet misbehavior.
1105 1112 # XXX We should not be using '.filter' here, but combines subset with '&'
1106 1113 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1107 1114 # necessary to ensure we preserve the order in subset.
1108 1115 return baseset(hs).filter(subset.__contains__)
1109 1116
1110 1117 def heads(repo, subset, x):
1111 1118 """``heads(set)``
1112 1119 Members of set with no children in set.
1113 1120 """
1114 1121 s = getset(repo, subset, x)
1115 1122 ps = parents(repo, subset, x)
1116 1123 return s - ps
1117 1124
1118 1125 def hidden(repo, subset, x):
1119 1126 """``hidden()``
1120 1127 Hidden changesets.
1121 1128 """
1122 1129 # i18n: "hidden" is a keyword
1123 1130 getargs(x, 0, 0, _("hidden takes no arguments"))
1124 1131 hiddenrevs = repoview.filterrevs(repo, 'visible')
1125 1132 return subset & hiddenrevs
1126 1133
1127 1134 def keyword(repo, subset, x):
1128 1135 """``keyword(string)``
1129 1136 Search commit message, user name, and names of changed files for
1130 1137 string. The match is case-insensitive.
1131 1138 """
1132 1139 # i18n: "keyword" is a keyword
1133 1140 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1134 1141
1135 1142 def matches(r):
1136 1143 c = repo[r]
1137 1144 return any(kw in encoding.lower(t)
1138 1145 for t in c.files() + [c.user(), c.description()])
1139 1146
1140 1147 return subset.filter(matches)
1141 1148
1142 1149 def limit(repo, subset, x):
1143 1150 """``limit(set, [n])``
1144 1151 First n members of set, defaulting to 1.
1145 1152 """
1146 1153 # i18n: "limit" is a keyword
1147 1154 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1148 1155 try:
1149 1156 lim = 1
1150 1157 if len(l) == 2:
1151 1158 # i18n: "limit" is a keyword
1152 1159 lim = int(getstring(l[1], _("limit requires a number")))
1153 1160 except (TypeError, ValueError):
1154 1161 # i18n: "limit" is a keyword
1155 1162 raise error.ParseError(_("limit expects a number"))
1156 1163 ss = subset
1157 1164 os = getset(repo, fullreposet(repo), l[0])
1158 1165 result = []
1159 1166 it = iter(os)
1160 1167 for x in xrange(lim):
1161 1168 y = next(it, None)
1162 1169 if y is None:
1163 1170 break
1164 1171 elif y in ss:
1165 1172 result.append(y)
1166 1173 return baseset(result)
1167 1174
1168 1175 def last(repo, subset, x):
1169 1176 """``last(set, [n])``
1170 1177 Last n members of set, defaulting to 1.
1171 1178 """
1172 1179 # i18n: "last" is a keyword
1173 1180 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1174 1181 try:
1175 1182 lim = 1
1176 1183 if len(l) == 2:
1177 1184 # i18n: "last" is a keyword
1178 1185 lim = int(getstring(l[1], _("last requires a number")))
1179 1186 except (TypeError, ValueError):
1180 1187 # i18n: "last" is a keyword
1181 1188 raise error.ParseError(_("last expects a number"))
1182 1189 ss = subset
1183 1190 os = getset(repo, fullreposet(repo), l[0])
1184 1191 os.reverse()
1185 1192 result = []
1186 1193 it = iter(os)
1187 1194 for x in xrange(lim):
1188 1195 y = next(it, None)
1189 1196 if y is None:
1190 1197 break
1191 1198 elif y in ss:
1192 1199 result.append(y)
1193 1200 return baseset(result)
1194 1201
1195 1202 def maxrev(repo, subset, x):
1196 1203 """``max(set)``
1197 1204 Changeset with highest revision number in set.
1198 1205 """
1199 1206 os = getset(repo, fullreposet(repo), x)
1200 1207 if os:
1201 1208 m = os.max()
1202 1209 if m in subset:
1203 1210 return baseset([m])
1204 1211 return baseset()
1205 1212
1206 1213 def merge(repo, subset, x):
1207 1214 """``merge()``
1208 1215 Changeset is a merge changeset.
1209 1216 """
1210 1217 # i18n: "merge" is a keyword
1211 1218 getargs(x, 0, 0, _("merge takes no arguments"))
1212 1219 cl = repo.changelog
1213 1220 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1214 1221
1215 1222 def branchpoint(repo, subset, x):
1216 1223 """``branchpoint()``
1217 1224 Changesets with more than one child.
1218 1225 """
1219 1226 # i18n: "branchpoint" is a keyword
1220 1227 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1221 1228 cl = repo.changelog
1222 1229 if not subset:
1223 1230 return baseset()
1224 1231 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1225 1232 # (and if it is not, it should.)
1226 1233 baserev = min(subset)
1227 1234 parentscount = [0]*(len(repo) - baserev)
1228 1235 for r in cl.revs(start=baserev + 1):
1229 1236 for p in cl.parentrevs(r):
1230 1237 if p >= baserev:
1231 1238 parentscount[p - baserev] += 1
1232 1239 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1233 1240
1234 1241 def minrev(repo, subset, x):
1235 1242 """``min(set)``
1236 1243 Changeset with lowest revision number in set.
1237 1244 """
1238 1245 os = getset(repo, fullreposet(repo), x)
1239 1246 if os:
1240 1247 m = os.min()
1241 1248 if m in subset:
1242 1249 return baseset([m])
1243 1250 return baseset()
1244 1251
1245 1252 def modifies(repo, subset, x):
1246 1253 """``modifies(pattern)``
1247 1254 Changesets modifying files matched by pattern.
1248 1255
1249 1256 The pattern without explicit kind like ``glob:`` is expected to be
1250 1257 relative to the current directory and match against a file or a
1251 1258 directory.
1252 1259 """
1253 1260 # i18n: "modifies" is a keyword
1254 1261 pat = getstring(x, _("modifies requires a pattern"))
1255 1262 return checkstatus(repo, subset, pat, 0)
1256 1263
1257 1264 def named(repo, subset, x):
1258 1265 """``named(namespace)``
1259 1266 The changesets in a given namespace.
1260 1267
1261 1268 If `namespace` starts with `re:`, the remainder of the string is treated as
1262 1269 a regular expression. To match a namespace that actually starts with `re:`,
1263 1270 use the prefix `literal:`.
1264 1271 """
1265 1272 # i18n: "named" is a keyword
1266 1273 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1267 1274
1268 1275 ns = getstring(args[0],
1269 1276 # i18n: "named" is a keyword
1270 1277 _('the argument to named must be a string'))
1271 1278 kind, pattern, matcher = _stringmatcher(ns)
1272 1279 namespaces = set()
1273 1280 if kind == 'literal':
1274 1281 if pattern not in repo.names:
1275 1282 raise error.RepoLookupError(_("namespace '%s' does not exist")
1276 1283 % ns)
1277 1284 namespaces.add(repo.names[pattern])
1278 1285 else:
1279 1286 for name, ns in repo.names.iteritems():
1280 1287 if matcher(name):
1281 1288 namespaces.add(ns)
1282 1289 if not namespaces:
1283 1290 raise error.RepoLookupError(_("no namespace exists"
1284 1291 " that match '%s'") % pattern)
1285 1292
1286 1293 names = set()
1287 1294 for ns in namespaces:
1288 1295 for name in ns.listnames(repo):
1289 1296 if name not in ns.deprecated:
1290 1297 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1291 1298
1292 1299 names -= set([node.nullrev])
1293 1300 return subset & names
1294 1301
1295 1302 def node_(repo, subset, x):
1296 1303 """``id(string)``
1297 1304 Revision non-ambiguously specified by the given hex string prefix.
1298 1305 """
1299 1306 # i18n: "id" is a keyword
1300 1307 l = getargs(x, 1, 1, _("id requires one argument"))
1301 1308 # i18n: "id" is a keyword
1302 1309 n = getstring(l[0], _("id requires a string"))
1303 1310 if len(n) == 40:
1304 1311 try:
1305 1312 rn = repo.changelog.rev(node.bin(n))
1306 1313 except (LookupError, TypeError):
1307 1314 rn = None
1308 1315 else:
1309 1316 rn = None
1310 1317 pm = repo.changelog._partialmatch(n)
1311 1318 if pm is not None:
1312 1319 rn = repo.changelog.rev(pm)
1313 1320
1314 1321 if rn is None:
1315 1322 return baseset()
1316 1323 result = baseset([rn])
1317 1324 return result & subset
1318 1325
1319 1326 def obsolete(repo, subset, x):
1320 1327 """``obsolete()``
1321 1328 Mutable changeset with a newer version."""
1322 1329 # i18n: "obsolete" is a keyword
1323 1330 getargs(x, 0, 0, _("obsolete takes no arguments"))
1324 1331 obsoletes = obsmod.getrevs(repo, 'obsolete')
1325 1332 return subset & obsoletes
1326 1333
1327 1334 def only(repo, subset, x):
1328 1335 """``only(set, [set])``
1329 1336 Changesets that are ancestors of the first set that are not ancestors
1330 1337 of any other head in the repo. If a second set is specified, the result
1331 1338 is ancestors of the first set that are not ancestors of the second set
1332 1339 (i.e. ::<set1> - ::<set2>).
1333 1340 """
1334 1341 cl = repo.changelog
1335 1342 # i18n: "only" is a keyword
1336 1343 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1337 1344 include = getset(repo, fullreposet(repo), args[0])
1338 1345 if len(args) == 1:
1339 1346 if not include:
1340 1347 return baseset()
1341 1348
1342 1349 descendants = set(_revdescendants(repo, include, False))
1343 1350 exclude = [rev for rev in cl.headrevs()
1344 1351 if not rev in descendants and not rev in include]
1345 1352 else:
1346 1353 exclude = getset(repo, fullreposet(repo), args[1])
1347 1354
1348 1355 results = set(cl.findmissingrevs(common=exclude, heads=include))
1349 1356 # XXX we should turn this into a baseset instead of a set, smartset may do
1350 1357 # some optimisations from the fact this is a baseset.
1351 1358 return subset & results
1352 1359
1353 1360 def origin(repo, subset, x):
1354 1361 """``origin([set])``
1355 1362 Changesets that were specified as a source for the grafts, transplants or
1356 1363 rebases that created the given revisions. Omitting the optional set is the
1357 1364 same as passing all(). If a changeset created by these operations is itself
1358 1365 specified as a source for one of these operations, only the source changeset
1359 1366 for the first operation is selected.
1360 1367 """
1361 1368 if x is not None:
1362 1369 dests = getset(repo, fullreposet(repo), x)
1363 1370 else:
1364 1371 dests = fullreposet(repo)
1365 1372
1366 1373 def _firstsrc(rev):
1367 1374 src = _getrevsource(repo, rev)
1368 1375 if src is None:
1369 1376 return None
1370 1377
1371 1378 while True:
1372 1379 prev = _getrevsource(repo, src)
1373 1380
1374 1381 if prev is None:
1375 1382 return src
1376 1383 src = prev
1377 1384
1378 1385 o = set([_firstsrc(r) for r in dests])
1379 1386 o -= set([None])
1380 1387 # XXX we should turn this into a baseset instead of a set, smartset may do
1381 1388 # some optimisations from the fact this is a baseset.
1382 1389 return subset & o
1383 1390
1384 1391 def outgoing(repo, subset, x):
1385 1392 """``outgoing([path])``
1386 1393 Changesets not found in the specified destination repository, or the
1387 1394 default push location.
1388 1395 """
1389 1396 # Avoid cycles.
1390 1397 import discovery
1391 1398 import hg
1392 1399 # i18n: "outgoing" is a keyword
1393 1400 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1394 1401 # i18n: "outgoing" is a keyword
1395 1402 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1396 1403 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1397 1404 dest, branches = hg.parseurl(dest)
1398 1405 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1399 1406 if revs:
1400 1407 revs = [repo.lookup(rev) for rev in revs]
1401 1408 other = hg.peer(repo, {}, dest)
1402 1409 repo.ui.pushbuffer()
1403 1410 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1404 1411 repo.ui.popbuffer()
1405 1412 cl = repo.changelog
1406 1413 o = set([cl.rev(r) for r in outgoing.missing])
1407 1414 return subset & o
1408 1415
1409 1416 def p1(repo, subset, x):
1410 1417 """``p1([set])``
1411 1418 First parent of changesets in set, or the working directory.
1412 1419 """
1413 1420 if x is None:
1414 1421 p = repo[x].p1().rev()
1415 1422 if p >= 0:
1416 1423 return subset & baseset([p])
1417 1424 return baseset()
1418 1425
1419 1426 ps = set()
1420 1427 cl = repo.changelog
1421 1428 for r in getset(repo, fullreposet(repo), x):
1422 1429 ps.add(cl.parentrevs(r)[0])
1423 1430 ps -= set([node.nullrev])
1424 1431 # XXX we should turn this into a baseset instead of a set, smartset may do
1425 1432 # some optimisations from the fact this is a baseset.
1426 1433 return subset & ps
1427 1434
1428 1435 def p2(repo, subset, x):
1429 1436 """``p2([set])``
1430 1437 Second parent of changesets in set, or the working directory.
1431 1438 """
1432 1439 if x is None:
1433 1440 ps = repo[x].parents()
1434 1441 try:
1435 1442 p = ps[1].rev()
1436 1443 if p >= 0:
1437 1444 return subset & baseset([p])
1438 1445 return baseset()
1439 1446 except IndexError:
1440 1447 return baseset()
1441 1448
1442 1449 ps = set()
1443 1450 cl = repo.changelog
1444 1451 for r in getset(repo, fullreposet(repo), x):
1445 1452 ps.add(cl.parentrevs(r)[1])
1446 1453 ps -= set([node.nullrev])
1447 1454 # XXX we should turn this into a baseset instead of a set, smartset may do
1448 1455 # some optimisations from the fact this is a baseset.
1449 1456 return subset & ps
1450 1457
1451 1458 def parents(repo, subset, x):
1452 1459 """``parents([set])``
1453 1460 The set of all parents for all changesets in set, or the working directory.
1454 1461 """
1455 1462 if x is None:
1456 1463 ps = set(p.rev() for p in repo[x].parents())
1457 1464 else:
1458 1465 ps = set()
1459 1466 cl = repo.changelog
1460 1467 for r in getset(repo, fullreposet(repo), x):
1461 1468 ps.update(cl.parentrevs(r))
1462 1469 ps -= set([node.nullrev])
1463 1470 return subset & ps
1464 1471
1465 1472 def _phase(repo, subset, target):
1466 1473 """helper to select all rev in phase <target>"""
1467 1474 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1468 1475 if repo._phasecache._phasesets:
1469 1476 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1470 1477 s = baseset(s)
1471 1478 s.sort() # set are non ordered, so we enforce ascending
1472 1479 return subset & s
1473 1480 else:
1474 1481 phase = repo._phasecache.phase
1475 1482 condition = lambda r: phase(repo, r) == target
1476 1483 return subset.filter(condition, cache=False)
1477 1484
1478 1485 def draft(repo, subset, x):
1479 1486 """``draft()``
1480 1487 Changeset in draft phase."""
1481 1488 # i18n: "draft" is a keyword
1482 1489 getargs(x, 0, 0, _("draft takes no arguments"))
1483 1490 target = phases.draft
1484 1491 return _phase(repo, subset, target)
1485 1492
1486 1493 def secret(repo, subset, x):
1487 1494 """``secret()``
1488 1495 Changeset in secret phase."""
1489 1496 # i18n: "secret" is a keyword
1490 1497 getargs(x, 0, 0, _("secret takes no arguments"))
1491 1498 target = phases.secret
1492 1499 return _phase(repo, subset, target)
1493 1500
1494 1501 def parentspec(repo, subset, x, n):
1495 1502 """``set^0``
1496 1503 The set.
1497 1504 ``set^1`` (or ``set^``), ``set^2``
1498 1505 First or second parent, respectively, of all changesets in set.
1499 1506 """
1500 1507 try:
1501 1508 n = int(n[1])
1502 1509 if n not in (0, 1, 2):
1503 1510 raise ValueError
1504 1511 except (TypeError, ValueError):
1505 1512 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1506 1513 ps = set()
1507 1514 cl = repo.changelog
1508 1515 for r in getset(repo, fullreposet(repo), x):
1509 1516 if n == 0:
1510 1517 ps.add(r)
1511 1518 elif n == 1:
1512 1519 ps.add(cl.parentrevs(r)[0])
1513 1520 elif n == 2:
1514 1521 parents = cl.parentrevs(r)
1515 1522 if len(parents) > 1:
1516 1523 ps.add(parents[1])
1517 1524 return subset & ps
1518 1525
1519 1526 def present(repo, subset, x):
1520 1527 """``present(set)``
1521 1528 An empty set, if any revision in set isn't found; otherwise,
1522 1529 all revisions in set.
1523 1530
1524 1531 If any of specified revisions is not present in the local repository,
1525 1532 the query is normally aborted. But this predicate allows the query
1526 1533 to continue even in such cases.
1527 1534 """
1528 1535 try:
1529 1536 return getset(repo, subset, x)
1530 1537 except error.RepoLookupError:
1531 1538 return baseset()
1532 1539
1533 1540 # for internal use
1534 1541 def _notpublic(repo, subset, x):
1535 1542 getargs(x, 0, 0, "_notpublic takes no arguments")
1536 1543 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1537 1544 if repo._phasecache._phasesets:
1538 1545 s = set()
1539 1546 for u in repo._phasecache._phasesets[1:]:
1540 1547 s.update(u)
1541 1548 s = baseset(s - repo.changelog.filteredrevs)
1542 1549 s.sort()
1543 1550 return subset & s
1544 1551 else:
1545 1552 phase = repo._phasecache.phase
1546 1553 target = phases.public
1547 1554 condition = lambda r: phase(repo, r) != target
1548 1555 return subset.filter(condition, cache=False)
1549 1556
1550 1557 def public(repo, subset, x):
1551 1558 """``public()``
1552 1559 Changeset in public phase."""
1553 1560 # i18n: "public" is a keyword
1554 1561 getargs(x, 0, 0, _("public takes no arguments"))
1555 1562 phase = repo._phasecache.phase
1556 1563 target = phases.public
1557 1564 condition = lambda r: phase(repo, r) == target
1558 1565 return subset.filter(condition, cache=False)
1559 1566
1560 1567 def remote(repo, subset, x):
1561 1568 """``remote([id [,path]])``
1562 1569 Local revision that corresponds to the given identifier in a
1563 1570 remote repository, if present. Here, the '.' identifier is a
1564 1571 synonym for the current local branch.
1565 1572 """
1566 1573
1567 1574 import hg # avoid start-up nasties
1568 1575 # i18n: "remote" is a keyword
1569 1576 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1570 1577
1571 1578 q = '.'
1572 1579 if len(l) > 0:
1573 1580 # i18n: "remote" is a keyword
1574 1581 q = getstring(l[0], _("remote requires a string id"))
1575 1582 if q == '.':
1576 1583 q = repo['.'].branch()
1577 1584
1578 1585 dest = ''
1579 1586 if len(l) > 1:
1580 1587 # i18n: "remote" is a keyword
1581 1588 dest = getstring(l[1], _("remote requires a repository path"))
1582 1589 dest = repo.ui.expandpath(dest or 'default')
1583 1590 dest, branches = hg.parseurl(dest)
1584 1591 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1585 1592 if revs:
1586 1593 revs = [repo.lookup(rev) for rev in revs]
1587 1594 other = hg.peer(repo, {}, dest)
1588 1595 n = other.lookup(q)
1589 1596 if n in repo:
1590 1597 r = repo[n].rev()
1591 1598 if r in subset:
1592 1599 return baseset([r])
1593 1600 return baseset()
1594 1601
1595 1602 def removes(repo, subset, x):
1596 1603 """``removes(pattern)``
1597 1604 Changesets which remove files matching pattern.
1598 1605
1599 1606 The pattern without explicit kind like ``glob:`` is expected to be
1600 1607 relative to the current directory and match against a file or a
1601 1608 directory.
1602 1609 """
1603 1610 # i18n: "removes" is a keyword
1604 1611 pat = getstring(x, _("removes requires a pattern"))
1605 1612 return checkstatus(repo, subset, pat, 2)
1606 1613
1607 1614 def rev(repo, subset, x):
1608 1615 """``rev(number)``
1609 1616 Revision with the given numeric identifier.
1610 1617 """
1611 1618 # i18n: "rev" is a keyword
1612 1619 l = getargs(x, 1, 1, _("rev requires one argument"))
1613 1620 try:
1614 1621 # i18n: "rev" is a keyword
1615 1622 l = int(getstring(l[0], _("rev requires a number")))
1616 1623 except (TypeError, ValueError):
1617 1624 # i18n: "rev" is a keyword
1618 1625 raise error.ParseError(_("rev expects a number"))
1619 1626 if l not in repo.changelog and l != node.nullrev:
1620 1627 return baseset()
1621 1628 return subset & baseset([l])
1622 1629
1623 1630 def matching(repo, subset, x):
1624 1631 """``matching(revision [, field])``
1625 1632 Changesets in which a given set of fields match the set of fields in the
1626 1633 selected revision or set.
1627 1634
1628 1635 To match more than one field pass the list of fields to match separated
1629 1636 by spaces (e.g. ``author description``).
1630 1637
1631 1638 Valid fields are most regular revision fields and some special fields.
1632 1639
1633 1640 Regular revision fields are ``description``, ``author``, ``branch``,
1634 1641 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1635 1642 and ``diff``.
1636 1643 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1637 1644 contents of the revision. Two revisions matching their ``diff`` will
1638 1645 also match their ``files``.
1639 1646
1640 1647 Special fields are ``summary`` and ``metadata``:
1641 1648 ``summary`` matches the first line of the description.
1642 1649 ``metadata`` is equivalent to matching ``description user date``
1643 1650 (i.e. it matches the main metadata fields).
1644 1651
1645 1652 ``metadata`` is the default field which is used when no fields are
1646 1653 specified. You can match more than one field at a time.
1647 1654 """
1648 1655 # i18n: "matching" is a keyword
1649 1656 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1650 1657
1651 1658 revs = getset(repo, fullreposet(repo), l[0])
1652 1659
1653 1660 fieldlist = ['metadata']
1654 1661 if len(l) > 1:
1655 1662 fieldlist = getstring(l[1],
1656 1663 # i18n: "matching" is a keyword
1657 1664 _("matching requires a string "
1658 1665 "as its second argument")).split()
1659 1666
1660 1667 # Make sure that there are no repeated fields,
1661 1668 # expand the 'special' 'metadata' field type
1662 1669 # and check the 'files' whenever we check the 'diff'
1663 1670 fields = []
1664 1671 for field in fieldlist:
1665 1672 if field == 'metadata':
1666 1673 fields += ['user', 'description', 'date']
1667 1674 elif field == 'diff':
1668 1675 # a revision matching the diff must also match the files
1669 1676 # since matching the diff is very costly, make sure to
1670 1677 # also match the files first
1671 1678 fields += ['files', 'diff']
1672 1679 else:
1673 1680 if field == 'author':
1674 1681 field = 'user'
1675 1682 fields.append(field)
1676 1683 fields = set(fields)
1677 1684 if 'summary' in fields and 'description' in fields:
1678 1685 # If a revision matches its description it also matches its summary
1679 1686 fields.discard('summary')
1680 1687
1681 1688 # We may want to match more than one field
1682 1689 # Not all fields take the same amount of time to be matched
1683 1690 # Sort the selected fields in order of increasing matching cost
1684 1691 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1685 1692 'files', 'description', 'substate', 'diff']
1686 1693 def fieldkeyfunc(f):
1687 1694 try:
1688 1695 return fieldorder.index(f)
1689 1696 except ValueError:
1690 1697 # assume an unknown field is very costly
1691 1698 return len(fieldorder)
1692 1699 fields = list(fields)
1693 1700 fields.sort(key=fieldkeyfunc)
1694 1701
1695 1702 # Each field will be matched with its own "getfield" function
1696 1703 # which will be added to the getfieldfuncs array of functions
1697 1704 getfieldfuncs = []
1698 1705 _funcs = {
1699 1706 'user': lambda r: repo[r].user(),
1700 1707 'branch': lambda r: repo[r].branch(),
1701 1708 'date': lambda r: repo[r].date(),
1702 1709 'description': lambda r: repo[r].description(),
1703 1710 'files': lambda r: repo[r].files(),
1704 1711 'parents': lambda r: repo[r].parents(),
1705 1712 'phase': lambda r: repo[r].phase(),
1706 1713 'substate': lambda r: repo[r].substate,
1707 1714 'summary': lambda r: repo[r].description().splitlines()[0],
1708 1715 'diff': lambda r: list(repo[r].diff(git=True),)
1709 1716 }
1710 1717 for info in fields:
1711 1718 getfield = _funcs.get(info, None)
1712 1719 if getfield is None:
1713 1720 raise error.ParseError(
1714 1721 # i18n: "matching" is a keyword
1715 1722 _("unexpected field name passed to matching: %s") % info)
1716 1723 getfieldfuncs.append(getfield)
1717 1724 # convert the getfield array of functions into a "getinfo" function
1718 1725 # which returns an array of field values (or a single value if there
1719 1726 # is only one field to match)
1720 1727 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1721 1728
1722 1729 def matches(x):
1723 1730 for rev in revs:
1724 1731 target = getinfo(rev)
1725 1732 match = True
1726 1733 for n, f in enumerate(getfieldfuncs):
1727 1734 if target[n] != f(x):
1728 1735 match = False
1729 1736 if match:
1730 1737 return True
1731 1738 return False
1732 1739
1733 1740 return subset.filter(matches)
1734 1741
1735 1742 def reverse(repo, subset, x):
1736 1743 """``reverse(set)``
1737 1744 Reverse order of set.
1738 1745 """
1739 1746 l = getset(repo, subset, x)
1740 1747 l.reverse()
1741 1748 return l
1742 1749
1743 1750 def roots(repo, subset, x):
1744 1751 """``roots(set)``
1745 1752 Changesets in set with no parent changeset in set.
1746 1753 """
1747 1754 s = getset(repo, fullreposet(repo), x)
1748 1755 subset = subset & s# baseset([r for r in s if r in subset])
1749 1756 cs = _children(repo, subset, s)
1750 1757 return subset - cs
1751 1758
1752 1759 def sort(repo, subset, x):
1753 1760 """``sort(set[, [-]key...])``
1754 1761 Sort set by keys. The default sort order is ascending, specify a key
1755 1762 as ``-key`` to sort in descending order.
1756 1763
1757 1764 The keys can be:
1758 1765
1759 1766 - ``rev`` for the revision number,
1760 1767 - ``branch`` for the branch name,
1761 1768 - ``desc`` for the commit message (description),
1762 1769 - ``user`` for user name (``author`` can be used as an alias),
1763 1770 - ``date`` for the commit date
1764 1771 """
1765 1772 # i18n: "sort" is a keyword
1766 1773 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1767 1774 keys = "rev"
1768 1775 if len(l) == 2:
1769 1776 # i18n: "sort" is a keyword
1770 1777 keys = getstring(l[1], _("sort spec must be a string"))
1771 1778
1772 1779 s = l[0]
1773 1780 keys = keys.split()
1774 1781 l = []
1775 1782 def invert(s):
1776 1783 return "".join(chr(255 - ord(c)) for c in s)
1777 1784 revs = getset(repo, subset, s)
1778 1785 if keys == ["rev"]:
1779 1786 revs.sort()
1780 1787 return revs
1781 1788 elif keys == ["-rev"]:
1782 1789 revs.sort(reverse=True)
1783 1790 return revs
1784 1791 for r in revs:
1785 1792 c = repo[r]
1786 1793 e = []
1787 1794 for k in keys:
1788 1795 if k == 'rev':
1789 1796 e.append(r)
1790 1797 elif k == '-rev':
1791 1798 e.append(-r)
1792 1799 elif k == 'branch':
1793 1800 e.append(c.branch())
1794 1801 elif k == '-branch':
1795 1802 e.append(invert(c.branch()))
1796 1803 elif k == 'desc':
1797 1804 e.append(c.description())
1798 1805 elif k == '-desc':
1799 1806 e.append(invert(c.description()))
1800 1807 elif k in 'user author':
1801 1808 e.append(c.user())
1802 1809 elif k in '-user -author':
1803 1810 e.append(invert(c.user()))
1804 1811 elif k == 'date':
1805 1812 e.append(c.date()[0])
1806 1813 elif k == '-date':
1807 1814 e.append(-c.date()[0])
1808 1815 else:
1809 1816 raise error.ParseError(_("unknown sort key %r") % k)
1810 1817 e.append(r)
1811 1818 l.append(e)
1812 1819 l.sort()
1813 1820 return baseset([e[-1] for e in l])
1814 1821
1815 1822 def subrepo(repo, subset, x):
1816 1823 """``subrepo([pattern])``
1817 1824 Changesets that add, modify or remove the given subrepo. If no subrepo
1818 1825 pattern is named, any subrepo changes are returned.
1819 1826 """
1820 1827 # i18n: "subrepo" is a keyword
1821 1828 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1822 1829 if len(args) != 0:
1823 1830 pat = getstring(args[0], _("subrepo requires a pattern"))
1824 1831
1825 1832 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1826 1833
1827 1834 def submatches(names):
1828 1835 k, p, m = _stringmatcher(pat)
1829 1836 for name in names:
1830 1837 if m(name):
1831 1838 yield name
1832 1839
1833 1840 def matches(x):
1834 1841 c = repo[x]
1835 1842 s = repo.status(c.p1().node(), c.node(), match=m)
1836 1843
1837 1844 if len(args) == 0:
1838 1845 return s.added or s.modified or s.removed
1839 1846
1840 1847 if s.added:
1841 1848 return any(submatches(c.substate.keys()))
1842 1849
1843 1850 if s.modified:
1844 1851 subs = set(c.p1().substate.keys())
1845 1852 subs.update(c.substate.keys())
1846 1853
1847 1854 for path in submatches(subs):
1848 1855 if c.p1().substate.get(path) != c.substate.get(path):
1849 1856 return True
1850 1857
1851 1858 if s.removed:
1852 1859 return any(submatches(c.p1().substate.keys()))
1853 1860
1854 1861 return False
1855 1862
1856 1863 return subset.filter(matches)
1857 1864
1858 1865 def _stringmatcher(pattern):
1859 1866 """
1860 1867 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1861 1868 returns the matcher name, pattern, and matcher function.
1862 1869 missing or unknown prefixes are treated as literal matches.
1863 1870
1864 1871 helper for tests:
1865 1872 >>> def test(pattern, *tests):
1866 1873 ... kind, pattern, matcher = _stringmatcher(pattern)
1867 1874 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1868 1875
1869 1876 exact matching (no prefix):
1870 1877 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1871 1878 ('literal', 'abcdefg', [False, False, True])
1872 1879
1873 1880 regex matching ('re:' prefix)
1874 1881 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1875 1882 ('re', 'a.+b', [False, False, True])
1876 1883
1877 1884 force exact matches ('literal:' prefix)
1878 1885 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1879 1886 ('literal', 're:foobar', [False, True])
1880 1887
1881 1888 unknown prefixes are ignored and treated as literals
1882 1889 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1883 1890 ('literal', 'foo:bar', [False, False, True])
1884 1891 """
1885 1892 if pattern.startswith('re:'):
1886 1893 pattern = pattern[3:]
1887 1894 try:
1888 1895 regex = re.compile(pattern)
1889 1896 except re.error, e:
1890 1897 raise error.ParseError(_('invalid regular expression: %s')
1891 1898 % e)
1892 1899 return 're', pattern, regex.search
1893 1900 elif pattern.startswith('literal:'):
1894 1901 pattern = pattern[8:]
1895 1902 return 'literal', pattern, pattern.__eq__
1896 1903
1897 1904 def _substringmatcher(pattern):
1898 1905 kind, pattern, matcher = _stringmatcher(pattern)
1899 1906 if kind == 'literal':
1900 1907 matcher = lambda s: pattern in s
1901 1908 return kind, pattern, matcher
1902 1909
1903 1910 def tag(repo, subset, x):
1904 1911 """``tag([name])``
1905 1912 The specified tag by name, or all tagged revisions if no name is given.
1906 1913
1907 1914 If `name` starts with `re:`, the remainder of the name is treated as
1908 1915 a regular expression. To match a tag that actually starts with `re:`,
1909 1916 use the prefix `literal:`.
1910 1917 """
1911 1918 # i18n: "tag" is a keyword
1912 1919 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1913 1920 cl = repo.changelog
1914 1921 if args:
1915 1922 pattern = getstring(args[0],
1916 1923 # i18n: "tag" is a keyword
1917 1924 _('the argument to tag must be a string'))
1918 1925 kind, pattern, matcher = _stringmatcher(pattern)
1919 1926 if kind == 'literal':
1920 1927 # avoid resolving all tags
1921 1928 tn = repo._tagscache.tags.get(pattern, None)
1922 1929 if tn is None:
1923 1930 raise error.RepoLookupError(_("tag '%s' does not exist")
1924 1931 % pattern)
1925 1932 s = set([repo[tn].rev()])
1926 1933 else:
1927 1934 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1928 1935 else:
1929 1936 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1930 1937 return subset & s
1931 1938
1932 1939 def tagged(repo, subset, x):
1933 1940 return tag(repo, subset, x)
1934 1941
1935 1942 def unstable(repo, subset, x):
1936 1943 """``unstable()``
1937 1944 Non-obsolete changesets with obsolete ancestors.
1938 1945 """
1939 1946 # i18n: "unstable" is a keyword
1940 1947 getargs(x, 0, 0, _("unstable takes no arguments"))
1941 1948 unstables = obsmod.getrevs(repo, 'unstable')
1942 1949 return subset & unstables
1943 1950
1944 1951
1945 1952 def user(repo, subset, x):
1946 1953 """``user(string)``
1947 1954 User name contains string. The match is case-insensitive.
1948 1955
1949 1956 If `string` starts with `re:`, the remainder of the string is treated as
1950 1957 a regular expression. To match a user that actually contains `re:`, use
1951 1958 the prefix `literal:`.
1952 1959 """
1953 1960 return author(repo, subset, x)
1954 1961
1955 1962 # experimental
1956 1963 def wdir(repo, subset, x):
1957 1964 # i18n: "wdir" is a keyword
1958 1965 getargs(x, 0, 0, _("wdir takes no arguments"))
1959 1966 if None in subset or isinstance(subset, fullreposet):
1960 1967 return baseset([None])
1961 1968 return baseset()
1962 1969
1963 1970 # for internal use
1964 1971 def _list(repo, subset, x):
1965 1972 s = getstring(x, "internal error")
1966 1973 if not s:
1967 1974 return baseset()
1968 1975 # remove duplicates here. it's difficult for caller to deduplicate sets
1969 1976 # because different symbols can point to the same rev.
1970 1977 cl = repo.changelog
1971 1978 ls = []
1972 1979 seen = set()
1973 1980 for t in s.split('\0'):
1974 1981 try:
1975 1982 # fast path for integer revision
1976 1983 r = int(t)
1977 1984 if str(r) != t or r not in cl:
1978 1985 raise ValueError
1979 1986 except ValueError:
1980 1987 r = repo[t].rev()
1981 1988 if r in seen:
1982 1989 continue
1983 1990 if (r in subset
1984 1991 or r == node.nullrev and isinstance(subset, fullreposet)):
1985 1992 ls.append(r)
1986 1993 seen.add(r)
1987 1994 return baseset(ls)
1988 1995
1989 1996 # for internal use
1990 1997 def _intlist(repo, subset, x):
1991 1998 s = getstring(x, "internal error")
1992 1999 if not s:
1993 2000 return baseset()
1994 2001 ls = [int(r) for r in s.split('\0')]
1995 2002 s = subset
1996 2003 return baseset([r for r in ls if r in s])
1997 2004
1998 2005 # for internal use
1999 2006 def _hexlist(repo, subset, x):
2000 2007 s = getstring(x, "internal error")
2001 2008 if not s:
2002 2009 return baseset()
2003 2010 cl = repo.changelog
2004 2011 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2005 2012 s = subset
2006 2013 return baseset([r for r in ls if r in s])
2007 2014
2008 2015 symbols = {
2009 2016 "adds": adds,
2010 2017 "all": getall,
2011 2018 "ancestor": ancestor,
2012 2019 "ancestors": ancestors,
2013 2020 "_firstancestors": _firstancestors,
2014 2021 "author": author,
2015 2022 "bisect": bisect,
2016 2023 "bisected": bisected,
2017 2024 "bookmark": bookmark,
2018 2025 "branch": branch,
2019 2026 "branchpoint": branchpoint,
2020 2027 "bumped": bumped,
2021 2028 "bundle": bundle,
2022 2029 "children": children,
2023 2030 "closed": closed,
2024 2031 "contains": contains,
2025 2032 "converted": converted,
2026 2033 "date": date,
2027 2034 "desc": desc,
2028 2035 "descendants": descendants,
2029 2036 "_firstdescendants": _firstdescendants,
2030 2037 "destination": destination,
2031 2038 "divergent": divergent,
2032 2039 "draft": draft,
2033 2040 "extinct": extinct,
2034 2041 "extra": extra,
2035 2042 "file": hasfile,
2036 2043 "filelog": filelog,
2037 2044 "first": first,
2038 2045 "follow": follow,
2039 2046 "_followfirst": _followfirst,
2040 2047 "grep": grep,
2041 2048 "head": head,
2042 2049 "heads": heads,
2043 2050 "hidden": hidden,
2044 2051 "id": node_,
2045 2052 "keyword": keyword,
2046 2053 "last": last,
2047 2054 "limit": limit,
2048 2055 "_matchfiles": _matchfiles,
2049 2056 "max": maxrev,
2050 2057 "merge": merge,
2051 2058 "min": minrev,
2052 2059 "modifies": modifies,
2053 2060 "named": named,
2054 2061 "obsolete": obsolete,
2055 2062 "only": only,
2056 2063 "origin": origin,
2057 2064 "outgoing": outgoing,
2058 2065 "p1": p1,
2059 2066 "p2": p2,
2060 2067 "parents": parents,
2061 2068 "present": present,
2062 2069 "public": public,
2063 2070 "_notpublic": _notpublic,
2064 2071 "remote": remote,
2065 2072 "removes": removes,
2066 2073 "rev": rev,
2067 2074 "reverse": reverse,
2068 2075 "roots": roots,
2069 2076 "sort": sort,
2070 2077 "secret": secret,
2071 2078 "subrepo": subrepo,
2072 2079 "matching": matching,
2073 2080 "tag": tag,
2074 2081 "tagged": tagged,
2075 2082 "user": user,
2076 2083 "unstable": unstable,
2077 2084 "wdir": wdir,
2078 2085 "_list": _list,
2079 2086 "_intlist": _intlist,
2080 2087 "_hexlist": _hexlist,
2081 2088 }
2082 2089
2083 2090 # symbols which can't be used for a DoS attack for any given input
2084 2091 # (e.g. those which accept regexes as plain strings shouldn't be included)
2085 2092 # functions that just return a lot of changesets (like all) don't count here
2086 2093 safesymbols = set([
2087 2094 "adds",
2088 2095 "all",
2089 2096 "ancestor",
2090 2097 "ancestors",
2091 2098 "_firstancestors",
2092 2099 "author",
2093 2100 "bisect",
2094 2101 "bisected",
2095 2102 "bookmark",
2096 2103 "branch",
2097 2104 "branchpoint",
2098 2105 "bumped",
2099 2106 "bundle",
2100 2107 "children",
2101 2108 "closed",
2102 2109 "converted",
2103 2110 "date",
2104 2111 "desc",
2105 2112 "descendants",
2106 2113 "_firstdescendants",
2107 2114 "destination",
2108 2115 "divergent",
2109 2116 "draft",
2110 2117 "extinct",
2111 2118 "extra",
2112 2119 "file",
2113 2120 "filelog",
2114 2121 "first",
2115 2122 "follow",
2116 2123 "_followfirst",
2117 2124 "head",
2118 2125 "heads",
2119 2126 "hidden",
2120 2127 "id",
2121 2128 "keyword",
2122 2129 "last",
2123 2130 "limit",
2124 2131 "_matchfiles",
2125 2132 "max",
2126 2133 "merge",
2127 2134 "min",
2128 2135 "modifies",
2129 2136 "obsolete",
2130 2137 "only",
2131 2138 "origin",
2132 2139 "outgoing",
2133 2140 "p1",
2134 2141 "p2",
2135 2142 "parents",
2136 2143 "present",
2137 2144 "public",
2138 2145 "_notpublic",
2139 2146 "remote",
2140 2147 "removes",
2141 2148 "rev",
2142 2149 "reverse",
2143 2150 "roots",
2144 2151 "sort",
2145 2152 "secret",
2146 2153 "matching",
2147 2154 "tag",
2148 2155 "tagged",
2149 2156 "user",
2150 2157 "unstable",
2151 2158 "wdir",
2152 2159 "_list",
2153 2160 "_intlist",
2154 2161 "_hexlist",
2155 2162 ])
2156 2163
2157 2164 methods = {
2158 2165 "range": rangeset,
2159 2166 "dagrange": dagrange,
2160 2167 "string": stringset,
2161 2168 "symbol": stringset,
2162 2169 "and": andset,
2163 2170 "or": orset,
2164 2171 "not": notset,
2165 2172 "list": listset,
2166 2173 "func": func,
2167 2174 "ancestor": ancestorspec,
2168 2175 "parent": parentspec,
2169 2176 "parentpost": p1,
2170 2177 }
2171 2178
2172 2179 def optimize(x, small):
2173 2180 if x is None:
2174 2181 return 0, x
2175 2182
2176 2183 smallbonus = 1
2177 2184 if small:
2178 2185 smallbonus = .5
2179 2186
2180 2187 op = x[0]
2181 2188 if op == 'minus':
2182 2189 return optimize(('and', x[1], ('not', x[2])), small)
2183 2190 elif op == 'only':
2184 2191 return optimize(('func', ('symbol', 'only'),
2185 2192 ('list', x[1], x[2])), small)
2186 2193 elif op == 'onlypost':
2187 2194 return optimize(('func', ('symbol', 'only'), x[1]), small)
2188 2195 elif op == 'dagrangepre':
2189 2196 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2190 2197 elif op == 'dagrangepost':
2191 2198 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2192 2199 elif op == 'rangepre':
2193 2200 return optimize(('range', ('string', '0'), x[1]), small)
2194 2201 elif op == 'rangepost':
2195 2202 return optimize(('range', x[1], ('string', 'tip')), small)
2196 2203 elif op == 'negate':
2197 2204 return optimize(('string',
2198 2205 '-' + getstring(x[1], _("can't negate that"))), small)
2199 2206 elif op in 'string symbol negate':
2200 2207 return smallbonus, x # single revisions are small
2201 2208 elif op == 'and':
2202 2209 wa, ta = optimize(x[1], True)
2203 2210 wb, tb = optimize(x[2], True)
2204 2211
2205 2212 # (::x and not ::y)/(not ::y and ::x) have a fast path
2206 2213 def isonly(revs, bases):
2207 2214 return (
2208 2215 revs[0] == 'func'
2209 2216 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2210 2217 and bases[0] == 'not'
2211 2218 and bases[1][0] == 'func'
2212 2219 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2213 2220
2214 2221 w = min(wa, wb)
2215 2222 if isonly(ta, tb):
2216 2223 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2217 2224 if isonly(tb, ta):
2218 2225 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2219 2226
2220 2227 if wa > wb:
2221 2228 return w, (op, tb, ta)
2222 2229 return w, (op, ta, tb)
2223 2230 elif op == 'or':
2224 2231 # fast path for machine-generated expression, that is likely to have
2225 2232 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2226 2233 ws, ts, ss = [], [], []
2227 2234 def flushss():
2228 2235 if not ss:
2229 2236 return
2230 2237 if len(ss) == 1:
2231 2238 w, t = ss[0]
2232 2239 else:
2233 2240 s = '\0'.join(t[1] for w, t in ss)
2234 2241 y = ('func', ('symbol', '_list'), ('string', s))
2235 2242 w, t = optimize(y, False)
2236 2243 ws.append(w)
2237 2244 ts.append(t)
2238 2245 del ss[:]
2239 2246 for y in x[1:]:
2240 2247 w, t = optimize(y, False)
2241 2248 if t[0] == 'string' or t[0] == 'symbol':
2242 2249 ss.append((w, t))
2243 2250 continue
2244 2251 flushss()
2245 2252 ws.append(w)
2246 2253 ts.append(t)
2247 2254 flushss()
2248 2255 if len(ts) == 1:
2249 2256 return ws[0], ts[0] # 'or' operation is fully optimized out
2250 2257 # we can't reorder trees by weight because it would change the order.
2251 2258 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2252 2259 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2253 2260 return max(ws), (op,) + tuple(ts)
2254 2261 elif op == 'not':
2255 2262 # Optimize not public() to _notpublic() because we have a fast version
2256 2263 if x[1] == ('func', ('symbol', 'public'), None):
2257 2264 newsym = ('func', ('symbol', '_notpublic'), None)
2258 2265 o = optimize(newsym, not small)
2259 2266 return o[0], o[1]
2260 2267 else:
2261 2268 o = optimize(x[1], not small)
2262 2269 return o[0], (op, o[1])
2263 2270 elif op == 'parentpost':
2264 2271 o = optimize(x[1], small)
2265 2272 return o[0], (op, o[1])
2266 2273 elif op == 'group':
2267 2274 return optimize(x[1], small)
2268 2275 elif op in 'dagrange range list parent ancestorspec':
2269 2276 if op == 'parent':
2270 2277 # x^:y means (x^) : y, not x ^ (:y)
2271 2278 post = ('parentpost', x[1])
2272 2279 if x[2][0] == 'dagrangepre':
2273 2280 return optimize(('dagrange', post, x[2][1]), small)
2274 2281 elif x[2][0] == 'rangepre':
2275 2282 return optimize(('range', post, x[2][1]), small)
2276 2283
2277 2284 wa, ta = optimize(x[1], small)
2278 2285 wb, tb = optimize(x[2], small)
2279 2286 return wa + wb, (op, ta, tb)
2280 2287 elif op == 'func':
2281 2288 f = getstring(x[1], _("not a symbol"))
2282 2289 wa, ta = optimize(x[2], small)
2283 2290 if f in ("author branch closed date desc file grep keyword "
2284 2291 "outgoing user"):
2285 2292 w = 10 # slow
2286 2293 elif f in "modifies adds removes":
2287 2294 w = 30 # slower
2288 2295 elif f == "contains":
2289 2296 w = 100 # very slow
2290 2297 elif f == "ancestor":
2291 2298 w = 1 * smallbonus
2292 2299 elif f in "reverse limit first _intlist":
2293 2300 w = 0
2294 2301 elif f in "sort":
2295 2302 w = 10 # assume most sorts look at changelog
2296 2303 else:
2297 2304 w = 1
2298 2305 return w + wa, (op, x[1], ta)
2299 2306 return 1, x
2300 2307
2301 2308 _aliasarg = ('func', ('symbol', '_aliasarg'))
2302 2309 def _getaliasarg(tree):
2303 2310 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2304 2311 return X, None otherwise.
2305 2312 """
2306 2313 if (len(tree) == 3 and tree[:2] == _aliasarg
2307 2314 and tree[2][0] == 'string'):
2308 2315 return tree[2][1]
2309 2316 return None
2310 2317
2311 2318 def _checkaliasarg(tree, known=None):
2312 2319 """Check tree contains no _aliasarg construct or only ones which
2313 2320 value is in known. Used to avoid alias placeholders injection.
2314 2321 """
2315 2322 if isinstance(tree, tuple):
2316 2323 arg = _getaliasarg(tree)
2317 2324 if arg is not None and (not known or arg not in known):
2318 2325 raise error.UnknownIdentifier('_aliasarg', [])
2319 2326 for t in tree:
2320 2327 _checkaliasarg(t, known)
2321 2328
2322 2329 # the set of valid characters for the initial letter of symbols in
2323 2330 # alias declarations and definitions
2324 2331 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2325 2332 if c.isalnum() or c in '._@$' or ord(c) > 127)
2326 2333
2327 2334 def _tokenizealias(program, lookup=None):
2328 2335 """Parse alias declaration/definition into a stream of tokens
2329 2336
2330 2337 This allows symbol names to use also ``$`` as an initial letter
2331 2338 (for backward compatibility), and callers of this function should
2332 2339 examine whether ``$`` is used also for unexpected symbols or not.
2333 2340 """
2334 2341 return tokenize(program, lookup=lookup,
2335 2342 syminitletters=_aliassyminitletters)
2336 2343
2337 2344 def _parsealiasdecl(decl):
2338 2345 """Parse alias declaration ``decl``
2339 2346
2340 2347 This returns ``(name, tree, args, errorstr)`` tuple:
2341 2348
2342 2349 - ``name``: of declared alias (may be ``decl`` itself at error)
2343 2350 - ``tree``: parse result (or ``None`` at error)
2344 2351 - ``args``: list of alias argument names (or None for symbol declaration)
2345 2352 - ``errorstr``: detail about detected error (or None)
2346 2353
2347 2354 >>> _parsealiasdecl('foo')
2348 2355 ('foo', ('symbol', 'foo'), None, None)
2349 2356 >>> _parsealiasdecl('$foo')
2350 2357 ('$foo', None, None, "'$' not for alias arguments")
2351 2358 >>> _parsealiasdecl('foo::bar')
2352 2359 ('foo::bar', None, None, 'invalid format')
2353 2360 >>> _parsealiasdecl('foo bar')
2354 2361 ('foo bar', None, None, 'at 4: invalid token')
2355 2362 >>> _parsealiasdecl('foo()')
2356 2363 ('foo', ('func', ('symbol', 'foo')), [], None)
2357 2364 >>> _parsealiasdecl('$foo()')
2358 2365 ('$foo()', None, None, "'$' not for alias arguments")
2359 2366 >>> _parsealiasdecl('foo($1, $2)')
2360 2367 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2361 2368 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2362 2369 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2363 2370 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2364 2371 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2365 2372 >>> _parsealiasdecl('foo(bar($1, $2))')
2366 2373 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2367 2374 >>> _parsealiasdecl('foo("string")')
2368 2375 ('foo("string")', None, None, 'invalid argument list')
2369 2376 >>> _parsealiasdecl('foo($1, $2')
2370 2377 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2371 2378 >>> _parsealiasdecl('foo("string')
2372 2379 ('foo("string', None, None, 'at 5: unterminated string')
2373 2380 >>> _parsealiasdecl('foo($1, $2, $1)')
2374 2381 ('foo', None, None, 'argument names collide with each other')
2375 2382 """
2376 2383 p = parser.parser(_tokenizealias, elements)
2377 2384 try:
2378 2385 tree, pos = p.parse(decl)
2379 2386 if (pos != len(decl)):
2380 2387 raise error.ParseError(_('invalid token'), pos)
2381 2388
2382 2389 if isvalidsymbol(tree):
2383 2390 # "name = ...." style
2384 2391 name = getsymbol(tree)
2385 2392 if name.startswith('$'):
2386 2393 return (decl, None, None, _("'$' not for alias arguments"))
2387 2394 return (name, ('symbol', name), None, None)
2388 2395
2389 2396 if isvalidfunc(tree):
2390 2397 # "name(arg, ....) = ...." style
2391 2398 name = getfuncname(tree)
2392 2399 if name.startswith('$'):
2393 2400 return (decl, None, None, _("'$' not for alias arguments"))
2394 2401 args = []
2395 2402 for arg in getfuncargs(tree):
2396 2403 if not isvalidsymbol(arg):
2397 2404 return (decl, None, None, _("invalid argument list"))
2398 2405 args.append(getsymbol(arg))
2399 2406 if len(args) != len(set(args)):
2400 2407 return (name, None, None,
2401 2408 _("argument names collide with each other"))
2402 2409 return (name, ('func', ('symbol', name)), args, None)
2403 2410
2404 2411 return (decl, None, None, _("invalid format"))
2405 2412 except error.ParseError, inst:
2406 2413 return (decl, None, None, parseerrordetail(inst))
2407 2414
2408 2415 def _parsealiasdefn(defn, args):
2409 2416 """Parse alias definition ``defn``
2410 2417
2411 2418 This function also replaces alias argument references in the
2412 2419 specified definition by ``_aliasarg(ARGNAME)``.
2413 2420
2414 2421 ``args`` is a list of alias argument names, or None if the alias
2415 2422 is declared as a symbol.
2416 2423
2417 2424 This returns "tree" as parsing result.
2418 2425
2419 2426 >>> args = ['$1', '$2', 'foo']
2420 2427 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2421 2428 (or
2422 2429 (func
2423 2430 ('symbol', '_aliasarg')
2424 2431 ('string', '$1'))
2425 2432 (func
2426 2433 ('symbol', '_aliasarg')
2427 2434 ('string', 'foo')))
2428 2435 >>> try:
2429 2436 ... _parsealiasdefn('$1 or $bar', args)
2430 2437 ... except error.ParseError, inst:
2431 2438 ... print parseerrordetail(inst)
2432 2439 at 6: '$' not for alias arguments
2433 2440 >>> args = ['$1', '$10', 'foo']
2434 2441 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2435 2442 (or
2436 2443 (func
2437 2444 ('symbol', '_aliasarg')
2438 2445 ('string', '$10'))
2439 2446 ('symbol', 'foobar'))
2440 2447 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2441 2448 (or
2442 2449 ('string', '$1')
2443 2450 ('string', 'foo'))
2444 2451 """
2445 2452 def tokenizedefn(program, lookup=None):
2446 2453 if args:
2447 2454 argset = set(args)
2448 2455 else:
2449 2456 argset = set()
2450 2457
2451 2458 for t, value, pos in _tokenizealias(program, lookup=lookup):
2452 2459 if t == 'symbol':
2453 2460 if value in argset:
2454 2461 # emulate tokenization of "_aliasarg('ARGNAME')":
2455 2462 # "_aliasarg()" is an unknown symbol only used separate
2456 2463 # alias argument placeholders from regular strings.
2457 2464 yield ('symbol', '_aliasarg', pos)
2458 2465 yield ('(', None, pos)
2459 2466 yield ('string', value, pos)
2460 2467 yield (')', None, pos)
2461 2468 continue
2462 2469 elif value.startswith('$'):
2463 2470 raise error.ParseError(_("'$' not for alias arguments"),
2464 2471 pos)
2465 2472 yield (t, value, pos)
2466 2473
2467 2474 p = parser.parser(tokenizedefn, elements)
2468 2475 tree, pos = p.parse(defn)
2469 2476 if pos != len(defn):
2470 2477 raise error.ParseError(_('invalid token'), pos)
2471 2478 return parser.simplifyinfixops(tree, ('or',))
2472 2479
2473 2480 class revsetalias(object):
2474 2481 # whether own `error` information is already shown or not.
2475 2482 # this avoids showing same warning multiple times at each `findaliases`.
2476 2483 warned = False
2477 2484
2478 2485 def __init__(self, name, value):
2479 2486 '''Aliases like:
2480 2487
2481 2488 h = heads(default)
2482 2489 b($1) = ancestors($1) - ancestors(default)
2483 2490 '''
2484 2491 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2485 2492 if self.error:
2486 2493 self.error = _('failed to parse the declaration of revset alias'
2487 2494 ' "%s": %s') % (self.name, self.error)
2488 2495 return
2489 2496
2490 2497 try:
2491 2498 self.replacement = _parsealiasdefn(value, self.args)
2492 2499 # Check for placeholder injection
2493 2500 _checkaliasarg(self.replacement, self.args)
2494 2501 except error.ParseError, inst:
2495 2502 self.error = _('failed to parse the definition of revset alias'
2496 2503 ' "%s": %s') % (self.name, parseerrordetail(inst))
2497 2504
2498 2505 def _getalias(aliases, tree):
2499 2506 """If tree looks like an unexpanded alias, return it. Return None
2500 2507 otherwise.
2501 2508 """
2502 2509 if isinstance(tree, tuple) and tree:
2503 2510 if tree[0] == 'symbol' and len(tree) == 2:
2504 2511 name = tree[1]
2505 2512 alias = aliases.get(name)
2506 2513 if alias and alias.args is None and alias.tree == tree:
2507 2514 return alias
2508 2515 if tree[0] == 'func' and len(tree) > 1:
2509 2516 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2510 2517 name = tree[1][1]
2511 2518 alias = aliases.get(name)
2512 2519 if alias and alias.args is not None and alias.tree == tree[:2]:
2513 2520 return alias
2514 2521 return None
2515 2522
2516 2523 def _expandargs(tree, args):
2517 2524 """Replace _aliasarg instances with the substitution value of the
2518 2525 same name in args, recursively.
2519 2526 """
2520 2527 if not tree or not isinstance(tree, tuple):
2521 2528 return tree
2522 2529 arg = _getaliasarg(tree)
2523 2530 if arg is not None:
2524 2531 return args[arg]
2525 2532 return tuple(_expandargs(t, args) for t in tree)
2526 2533
2527 2534 def _expandaliases(aliases, tree, expanding, cache):
2528 2535 """Expand aliases in tree, recursively.
2529 2536
2530 2537 'aliases' is a dictionary mapping user defined aliases to
2531 2538 revsetalias objects.
2532 2539 """
2533 2540 if not isinstance(tree, tuple):
2534 2541 # Do not expand raw strings
2535 2542 return tree
2536 2543 alias = _getalias(aliases, tree)
2537 2544 if alias is not None:
2538 2545 if alias.error:
2539 2546 raise util.Abort(alias.error)
2540 2547 if alias in expanding:
2541 2548 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2542 2549 'detected') % alias.name)
2543 2550 expanding.append(alias)
2544 2551 if alias.name not in cache:
2545 2552 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2546 2553 expanding, cache)
2547 2554 result = cache[alias.name]
2548 2555 expanding.pop()
2549 2556 if alias.args is not None:
2550 2557 l = getlist(tree[2])
2551 2558 if len(l) != len(alias.args):
2552 2559 raise error.ParseError(
2553 2560 _('invalid number of arguments: %s') % len(l))
2554 2561 l = [_expandaliases(aliases, a, [], cache) for a in l]
2555 2562 result = _expandargs(result, dict(zip(alias.args, l)))
2556 2563 else:
2557 2564 result = tuple(_expandaliases(aliases, t, expanding, cache)
2558 2565 for t in tree)
2559 2566 return result
2560 2567
2561 2568 def findaliases(ui, tree, showwarning=None):
2562 2569 _checkaliasarg(tree)
2563 2570 aliases = {}
2564 2571 for k, v in ui.configitems('revsetalias'):
2565 2572 alias = revsetalias(k, v)
2566 2573 aliases[alias.name] = alias
2567 2574 tree = _expandaliases(aliases, tree, [], {})
2568 2575 if showwarning:
2569 2576 # warn about problematic (but not referred) aliases
2570 2577 for name, alias in sorted(aliases.iteritems()):
2571 2578 if alias.error and not alias.warned:
2572 2579 showwarning(_('warning: %s\n') % (alias.error))
2573 2580 alias.warned = True
2574 2581 return tree
2575 2582
2576 2583 def foldconcat(tree):
2577 2584 """Fold elements to be concatenated by `##`
2578 2585 """
2579 2586 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2580 2587 return tree
2581 2588 if tree[0] == '_concat':
2582 2589 pending = [tree]
2583 2590 l = []
2584 2591 while pending:
2585 2592 e = pending.pop()
2586 2593 if e[0] == '_concat':
2587 2594 pending.extend(reversed(e[1:]))
2588 2595 elif e[0] in ('string', 'symbol'):
2589 2596 l.append(e[1])
2590 2597 else:
2591 2598 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2592 2599 raise error.ParseError(msg)
2593 2600 return ('string', ''.join(l))
2594 2601 else:
2595 2602 return tuple(foldconcat(t) for t in tree)
2596 2603
2597 2604 def parse(spec, lookup=None):
2598 2605 p = parser.parser(tokenize, elements)
2599 2606 tree, pos = p.parse(spec, lookup=lookup)
2600 2607 if pos != len(spec):
2601 2608 raise error.ParseError(_("invalid token"), pos)
2602 2609 return parser.simplifyinfixops(tree, ('or',))
2603 2610
2604 2611 def posttreebuilthook(tree, repo):
2605 2612 # hook for extensions to execute code on the optimized tree
2606 2613 pass
2607 2614
2608 2615 def match(ui, spec, repo=None):
2609 2616 if not spec:
2610 2617 raise error.ParseError(_("empty query"))
2611 2618 lookup = None
2612 2619 if repo:
2613 2620 lookup = repo.__contains__
2614 2621 tree = parse(spec, lookup)
2615 2622 if ui:
2616 2623 tree = findaliases(ui, tree, showwarning=ui.warn)
2617 2624 tree = foldconcat(tree)
2618 2625 weight, tree = optimize(tree, True)
2619 2626 posttreebuilthook(tree, repo)
2620 2627 def mfunc(repo, subset=None):
2621 2628 if subset is None:
2622 2629 subset = fullreposet(repo)
2623 2630 if util.safehasattr(subset, 'isascending'):
2624 2631 result = getset(repo, subset, tree)
2625 2632 else:
2626 2633 result = getset(repo, baseset(subset), tree)
2627 2634 return result
2628 2635 return mfunc
2629 2636
2630 2637 def formatspec(expr, *args):
2631 2638 '''
2632 2639 This is a convenience function for using revsets internally, and
2633 2640 escapes arguments appropriately. Aliases are intentionally ignored
2634 2641 so that intended expression behavior isn't accidentally subverted.
2635 2642
2636 2643 Supported arguments:
2637 2644
2638 2645 %r = revset expression, parenthesized
2639 2646 %d = int(arg), no quoting
2640 2647 %s = string(arg), escaped and single-quoted
2641 2648 %b = arg.branch(), escaped and single-quoted
2642 2649 %n = hex(arg), single-quoted
2643 2650 %% = a literal '%'
2644 2651
2645 2652 Prefixing the type with 'l' specifies a parenthesized list of that type.
2646 2653
2647 2654 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2648 2655 '(10 or 11):: and ((this()) or (that()))'
2649 2656 >>> formatspec('%d:: and not %d::', 10, 20)
2650 2657 '10:: and not 20::'
2651 2658 >>> formatspec('%ld or %ld', [], [1])
2652 2659 "_list('') or 1"
2653 2660 >>> formatspec('keyword(%s)', 'foo\\xe9')
2654 2661 "keyword('foo\\\\xe9')"
2655 2662 >>> b = lambda: 'default'
2656 2663 >>> b.branch = b
2657 2664 >>> formatspec('branch(%b)', b)
2658 2665 "branch('default')"
2659 2666 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2660 2667 "root(_list('a\\x00b\\x00c\\x00d'))"
2661 2668 '''
2662 2669
2663 2670 def quote(s):
2664 2671 return repr(str(s))
2665 2672
2666 2673 def argtype(c, arg):
2667 2674 if c == 'd':
2668 2675 return str(int(arg))
2669 2676 elif c == 's':
2670 2677 return quote(arg)
2671 2678 elif c == 'r':
2672 2679 parse(arg) # make sure syntax errors are confined
2673 2680 return '(%s)' % arg
2674 2681 elif c == 'n':
2675 2682 return quote(node.hex(arg))
2676 2683 elif c == 'b':
2677 2684 return quote(arg.branch())
2678 2685
2679 2686 def listexp(s, t):
2680 2687 l = len(s)
2681 2688 if l == 0:
2682 2689 return "_list('')"
2683 2690 elif l == 1:
2684 2691 return argtype(t, s[0])
2685 2692 elif t == 'd':
2686 2693 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2687 2694 elif t == 's':
2688 2695 return "_list('%s')" % "\0".join(s)
2689 2696 elif t == 'n':
2690 2697 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2691 2698 elif t == 'b':
2692 2699 return "_list('%s')" % "\0".join(a.branch() for a in s)
2693 2700
2694 2701 m = l // 2
2695 2702 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2696 2703
2697 2704 ret = ''
2698 2705 pos = 0
2699 2706 arg = 0
2700 2707 while pos < len(expr):
2701 2708 c = expr[pos]
2702 2709 if c == '%':
2703 2710 pos += 1
2704 2711 d = expr[pos]
2705 2712 if d == '%':
2706 2713 ret += d
2707 2714 elif d in 'dsnbr':
2708 2715 ret += argtype(d, args[arg])
2709 2716 arg += 1
2710 2717 elif d == 'l':
2711 2718 # a list of some type
2712 2719 pos += 1
2713 2720 d = expr[pos]
2714 2721 ret += listexp(list(args[arg]), d)
2715 2722 arg += 1
2716 2723 else:
2717 2724 raise util.Abort('unexpected revspec format character %s' % d)
2718 2725 else:
2719 2726 ret += c
2720 2727 pos += 1
2721 2728
2722 2729 return ret
2723 2730
2724 2731 def prettyformat(tree):
2725 2732 return parser.prettyformat(tree, ('string', 'symbol'))
2726 2733
2727 2734 def depth(tree):
2728 2735 if isinstance(tree, tuple):
2729 2736 return max(map(depth, tree)) + 1
2730 2737 else:
2731 2738 return 0
2732 2739
2733 2740 def funcsused(tree):
2734 2741 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2735 2742 return set()
2736 2743 else:
2737 2744 funcs = set()
2738 2745 for s in tree[1:]:
2739 2746 funcs |= funcsused(s)
2740 2747 if tree[0] == 'func':
2741 2748 funcs.add(tree[1][1])
2742 2749 return funcs
2743 2750
2744 2751 class abstractsmartset(object):
2745 2752
2746 2753 def __nonzero__(self):
2747 2754 """True if the smartset is not empty"""
2748 2755 raise NotImplementedError()
2749 2756
2750 2757 def __contains__(self, rev):
2751 2758 """provide fast membership testing"""
2752 2759 raise NotImplementedError()
2753 2760
2754 2761 def __iter__(self):
2755 2762 """iterate the set in the order it is supposed to be iterated"""
2756 2763 raise NotImplementedError()
2757 2764
2758 2765 # Attributes containing a function to perform a fast iteration in a given
2759 2766 # direction. A smartset can have none, one, or both defined.
2760 2767 #
2761 2768 # Default value is None instead of a function returning None to avoid
2762 2769 # initializing an iterator just for testing if a fast method exists.
2763 2770 fastasc = None
2764 2771 fastdesc = None
2765 2772
2766 2773 def isascending(self):
2767 2774 """True if the set will iterate in ascending order"""
2768 2775 raise NotImplementedError()
2769 2776
2770 2777 def isdescending(self):
2771 2778 """True if the set will iterate in descending order"""
2772 2779 raise NotImplementedError()
2773 2780
2774 2781 def min(self):
2775 2782 """return the minimum element in the set"""
2776 2783 if self.fastasc is not None:
2777 2784 for r in self.fastasc():
2778 2785 return r
2779 2786 raise ValueError('arg is an empty sequence')
2780 2787 return min(self)
2781 2788
2782 2789 def max(self):
2783 2790 """return the maximum element in the set"""
2784 2791 if self.fastdesc is not None:
2785 2792 for r in self.fastdesc():
2786 2793 return r
2787 2794 raise ValueError('arg is an empty sequence')
2788 2795 return max(self)
2789 2796
2790 2797 def first(self):
2791 2798 """return the first element in the set (user iteration perspective)
2792 2799
2793 2800 Return None if the set is empty"""
2794 2801 raise NotImplementedError()
2795 2802
2796 2803 def last(self):
2797 2804 """return the last element in the set (user iteration perspective)
2798 2805
2799 2806 Return None if the set is empty"""
2800 2807 raise NotImplementedError()
2801 2808
2802 2809 def __len__(self):
2803 2810 """return the length of the smartsets
2804 2811
2805 2812 This can be expensive on smartset that could be lazy otherwise."""
2806 2813 raise NotImplementedError()
2807 2814
2808 2815 def reverse(self):
2809 2816 """reverse the expected iteration order"""
2810 2817 raise NotImplementedError()
2811 2818
2812 2819 def sort(self, reverse=True):
2813 2820 """get the set to iterate in an ascending or descending order"""
2814 2821 raise NotImplementedError()
2815 2822
2816 2823 def __and__(self, other):
2817 2824 """Returns a new object with the intersection of the two collections.
2818 2825
2819 2826 This is part of the mandatory API for smartset."""
2820 2827 if isinstance(other, fullreposet):
2821 2828 return self
2822 2829 return self.filter(other.__contains__, cache=False)
2823 2830
2824 2831 def __add__(self, other):
2825 2832 """Returns a new object with the union of the two collections.
2826 2833
2827 2834 This is part of the mandatory API for smartset."""
2828 2835 return addset(self, other)
2829 2836
2830 2837 def __sub__(self, other):
2831 2838 """Returns a new object with the substraction of the two collections.
2832 2839
2833 2840 This is part of the mandatory API for smartset."""
2834 2841 c = other.__contains__
2835 2842 return self.filter(lambda r: not c(r), cache=False)
2836 2843
2837 2844 def filter(self, condition, cache=True):
2838 2845 """Returns this smartset filtered by condition as a new smartset.
2839 2846
2840 2847 `condition` is a callable which takes a revision number and returns a
2841 2848 boolean.
2842 2849
2843 2850 This is part of the mandatory API for smartset."""
2844 2851 # builtin cannot be cached. but do not needs to
2845 2852 if cache and util.safehasattr(condition, 'func_code'):
2846 2853 condition = util.cachefunc(condition)
2847 2854 return filteredset(self, condition)
2848 2855
2849 2856 class baseset(abstractsmartset):
2850 2857 """Basic data structure that represents a revset and contains the basic
2851 2858 operation that it should be able to perform.
2852 2859
2853 2860 Every method in this class should be implemented by any smartset class.
2854 2861 """
2855 2862 def __init__(self, data=()):
2856 2863 if not isinstance(data, list):
2857 2864 data = list(data)
2858 2865 self._list = data
2859 2866 self._ascending = None
2860 2867
2861 2868 @util.propertycache
2862 2869 def _set(self):
2863 2870 return set(self._list)
2864 2871
2865 2872 @util.propertycache
2866 2873 def _asclist(self):
2867 2874 asclist = self._list[:]
2868 2875 asclist.sort()
2869 2876 return asclist
2870 2877
2871 2878 def __iter__(self):
2872 2879 if self._ascending is None:
2873 2880 return iter(self._list)
2874 2881 elif self._ascending:
2875 2882 return iter(self._asclist)
2876 2883 else:
2877 2884 return reversed(self._asclist)
2878 2885
2879 2886 def fastasc(self):
2880 2887 return iter(self._asclist)
2881 2888
2882 2889 def fastdesc(self):
2883 2890 return reversed(self._asclist)
2884 2891
2885 2892 @util.propertycache
2886 2893 def __contains__(self):
2887 2894 return self._set.__contains__
2888 2895
2889 2896 def __nonzero__(self):
2890 2897 return bool(self._list)
2891 2898
2892 2899 def sort(self, reverse=False):
2893 2900 self._ascending = not bool(reverse)
2894 2901
2895 2902 def reverse(self):
2896 2903 if self._ascending is None:
2897 2904 self._list.reverse()
2898 2905 else:
2899 2906 self._ascending = not self._ascending
2900 2907
2901 2908 def __len__(self):
2902 2909 return len(self._list)
2903 2910
2904 2911 def isascending(self):
2905 2912 """Returns True if the collection is ascending order, False if not.
2906 2913
2907 2914 This is part of the mandatory API for smartset."""
2908 2915 if len(self) <= 1:
2909 2916 return True
2910 2917 return self._ascending is not None and self._ascending
2911 2918
2912 2919 def isdescending(self):
2913 2920 """Returns True if the collection is descending order, False if not.
2914 2921
2915 2922 This is part of the mandatory API for smartset."""
2916 2923 if len(self) <= 1:
2917 2924 return True
2918 2925 return self._ascending is not None and not self._ascending
2919 2926
2920 2927 def first(self):
2921 2928 if self:
2922 2929 if self._ascending is None:
2923 2930 return self._list[0]
2924 2931 elif self._ascending:
2925 2932 return self._asclist[0]
2926 2933 else:
2927 2934 return self._asclist[-1]
2928 2935 return None
2929 2936
2930 2937 def last(self):
2931 2938 if self:
2932 2939 if self._ascending is None:
2933 2940 return self._list[-1]
2934 2941 elif self._ascending:
2935 2942 return self._asclist[-1]
2936 2943 else:
2937 2944 return self._asclist[0]
2938 2945 return None
2939 2946
2940 2947 def __repr__(self):
2941 2948 d = {None: '', False: '-', True: '+'}[self._ascending]
2942 2949 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2943 2950
2944 2951 class filteredset(abstractsmartset):
2945 2952 """Duck type for baseset class which iterates lazily over the revisions in
2946 2953 the subset and contains a function which tests for membership in the
2947 2954 revset
2948 2955 """
2949 2956 def __init__(self, subset, condition=lambda x: True):
2950 2957 """
2951 2958 condition: a function that decide whether a revision in the subset
2952 2959 belongs to the revset or not.
2953 2960 """
2954 2961 self._subset = subset
2955 2962 self._condition = condition
2956 2963 self._cache = {}
2957 2964
2958 2965 def __contains__(self, x):
2959 2966 c = self._cache
2960 2967 if x not in c:
2961 2968 v = c[x] = x in self._subset and self._condition(x)
2962 2969 return v
2963 2970 return c[x]
2964 2971
2965 2972 def __iter__(self):
2966 2973 return self._iterfilter(self._subset)
2967 2974
2968 2975 def _iterfilter(self, it):
2969 2976 cond = self._condition
2970 2977 for x in it:
2971 2978 if cond(x):
2972 2979 yield x
2973 2980
2974 2981 @property
2975 2982 def fastasc(self):
2976 2983 it = self._subset.fastasc
2977 2984 if it is None:
2978 2985 return None
2979 2986 return lambda: self._iterfilter(it())
2980 2987
2981 2988 @property
2982 2989 def fastdesc(self):
2983 2990 it = self._subset.fastdesc
2984 2991 if it is None:
2985 2992 return None
2986 2993 return lambda: self._iterfilter(it())
2987 2994
2988 2995 def __nonzero__(self):
2989 2996 for r in self:
2990 2997 return True
2991 2998 return False
2992 2999
2993 3000 def __len__(self):
2994 3001 # Basic implementation to be changed in future patches.
2995 3002 l = baseset([r for r in self])
2996 3003 return len(l)
2997 3004
2998 3005 def sort(self, reverse=False):
2999 3006 self._subset.sort(reverse=reverse)
3000 3007
3001 3008 def reverse(self):
3002 3009 self._subset.reverse()
3003 3010
3004 3011 def isascending(self):
3005 3012 return self._subset.isascending()
3006 3013
3007 3014 def isdescending(self):
3008 3015 return self._subset.isdescending()
3009 3016
3010 3017 def first(self):
3011 3018 for x in self:
3012 3019 return x
3013 3020 return None
3014 3021
3015 3022 def last(self):
3016 3023 it = None
3017 3024 if self._subset.isascending:
3018 3025 it = self.fastdesc
3019 3026 elif self._subset.isdescending:
3020 3027 it = self.fastdesc
3021 3028 if it is None:
3022 3029 # slowly consume everything. This needs improvement
3023 3030 it = lambda: reversed(list(self))
3024 3031 for x in it():
3025 3032 return x
3026 3033 return None
3027 3034
3028 3035 def __repr__(self):
3029 3036 return '<%s %r>' % (type(self).__name__, self._subset)
3030 3037
3031 3038 # this function will be removed, or merged to addset or orset, when
3032 3039 # - scmutil.revrange() can be rewritten to not combine calculated smartsets
3033 3040 # - or addset can handle more than two sets without balanced tree
3034 3041 def _combinesets(subsets):
3035 3042 """Create balanced tree of addsets representing union of given sets"""
3036 3043 if not subsets:
3037 3044 return baseset()
3038 3045 if len(subsets) == 1:
3039 3046 return subsets[0]
3040 3047 p = len(subsets) // 2
3041 3048 xs = _combinesets(subsets[:p])
3042 3049 ys = _combinesets(subsets[p:])
3043 3050 return addset(xs, ys)
3044 3051
3045 3052 def _iterordered(ascending, iter1, iter2):
3046 3053 """produce an ordered iteration from two iterators with the same order
3047 3054
3048 3055 The ascending is used to indicated the iteration direction.
3049 3056 """
3050 3057 choice = max
3051 3058 if ascending:
3052 3059 choice = min
3053 3060
3054 3061 val1 = None
3055 3062 val2 = None
3056 3063 try:
3057 3064 # Consume both iterators in an ordered way until one is empty
3058 3065 while True:
3059 3066 if val1 is None:
3060 3067 val1 = iter1.next()
3061 3068 if val2 is None:
3062 3069 val2 = iter2.next()
3063 3070 next = choice(val1, val2)
3064 3071 yield next
3065 3072 if val1 == next:
3066 3073 val1 = None
3067 3074 if val2 == next:
3068 3075 val2 = None
3069 3076 except StopIteration:
3070 3077 # Flush any remaining values and consume the other one
3071 3078 it = iter2
3072 3079 if val1 is not None:
3073 3080 yield val1
3074 3081 it = iter1
3075 3082 elif val2 is not None:
3076 3083 # might have been equality and both are empty
3077 3084 yield val2
3078 3085 for val in it:
3079 3086 yield val
3080 3087
3081 3088 class addset(abstractsmartset):
3082 3089 """Represent the addition of two sets
3083 3090
3084 3091 Wrapper structure for lazily adding two structures without losing much
3085 3092 performance on the __contains__ method
3086 3093
3087 3094 If the ascending attribute is set, that means the two structures are
3088 3095 ordered in either an ascending or descending way. Therefore, we can add
3089 3096 them maintaining the order by iterating over both at the same time
3090 3097
3091 3098 >>> xs = baseset([0, 3, 2])
3092 3099 >>> ys = baseset([5, 2, 4])
3093 3100
3094 3101 >>> rs = addset(xs, ys)
3095 3102 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3096 3103 (True, True, False, True, 0, 4)
3097 3104 >>> rs = addset(xs, baseset([]))
3098 3105 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3099 3106 (True, True, False, 0, 2)
3100 3107 >>> rs = addset(baseset([]), baseset([]))
3101 3108 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3102 3109 (False, False, None, None)
3103 3110
3104 3111 iterate unsorted:
3105 3112 >>> rs = addset(xs, ys)
3106 3113 >>> [x for x in rs] # without _genlist
3107 3114 [0, 3, 2, 5, 4]
3108 3115 >>> assert not rs._genlist
3109 3116 >>> len(rs)
3110 3117 5
3111 3118 >>> [x for x in rs] # with _genlist
3112 3119 [0, 3, 2, 5, 4]
3113 3120 >>> assert rs._genlist
3114 3121
3115 3122 iterate ascending:
3116 3123 >>> rs = addset(xs, ys, ascending=True)
3117 3124 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3118 3125 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3119 3126 >>> assert not rs._asclist
3120 3127 >>> len(rs)
3121 3128 5
3122 3129 >>> [x for x in rs], [x for x in rs.fastasc()]
3123 3130 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3124 3131 >>> assert rs._asclist
3125 3132
3126 3133 iterate descending:
3127 3134 >>> rs = addset(xs, ys, ascending=False)
3128 3135 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3129 3136 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3130 3137 >>> assert not rs._asclist
3131 3138 >>> len(rs)
3132 3139 5
3133 3140 >>> [x for x in rs], [x for x in rs.fastdesc()]
3134 3141 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3135 3142 >>> assert rs._asclist
3136 3143
3137 3144 iterate ascending without fastasc:
3138 3145 >>> rs = addset(xs, generatorset(ys), ascending=True)
3139 3146 >>> assert rs.fastasc is None
3140 3147 >>> [x for x in rs]
3141 3148 [0, 2, 3, 4, 5]
3142 3149
3143 3150 iterate descending without fastdesc:
3144 3151 >>> rs = addset(generatorset(xs), ys, ascending=False)
3145 3152 >>> assert rs.fastdesc is None
3146 3153 >>> [x for x in rs]
3147 3154 [5, 4, 3, 2, 0]
3148 3155 """
3149 3156 def __init__(self, revs1, revs2, ascending=None):
3150 3157 self._r1 = revs1
3151 3158 self._r2 = revs2
3152 3159 self._iter = None
3153 3160 self._ascending = ascending
3154 3161 self._genlist = None
3155 3162 self._asclist = None
3156 3163
3157 3164 def __len__(self):
3158 3165 return len(self._list)
3159 3166
3160 3167 def __nonzero__(self):
3161 3168 return bool(self._r1) or bool(self._r2)
3162 3169
3163 3170 @util.propertycache
3164 3171 def _list(self):
3165 3172 if not self._genlist:
3166 3173 self._genlist = baseset(iter(self))
3167 3174 return self._genlist
3168 3175
3169 3176 def __iter__(self):
3170 3177 """Iterate over both collections without repeating elements
3171 3178
3172 3179 If the ascending attribute is not set, iterate over the first one and
3173 3180 then over the second one checking for membership on the first one so we
3174 3181 dont yield any duplicates.
3175 3182
3176 3183 If the ascending attribute is set, iterate over both collections at the
3177 3184 same time, yielding only one value at a time in the given order.
3178 3185 """
3179 3186 if self._ascending is None:
3180 3187 if self._genlist:
3181 3188 return iter(self._genlist)
3182 3189 def arbitraryordergen():
3183 3190 for r in self._r1:
3184 3191 yield r
3185 3192 inr1 = self._r1.__contains__
3186 3193 for r in self._r2:
3187 3194 if not inr1(r):
3188 3195 yield r
3189 3196 return arbitraryordergen()
3190 3197 # try to use our own fast iterator if it exists
3191 3198 self._trysetasclist()
3192 3199 if self._ascending:
3193 3200 attr = 'fastasc'
3194 3201 else:
3195 3202 attr = 'fastdesc'
3196 3203 it = getattr(self, attr)
3197 3204 if it is not None:
3198 3205 return it()
3199 3206 # maybe half of the component supports fast
3200 3207 # get iterator for _r1
3201 3208 iter1 = getattr(self._r1, attr)
3202 3209 if iter1 is None:
3203 3210 # let's avoid side effect (not sure it matters)
3204 3211 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3205 3212 else:
3206 3213 iter1 = iter1()
3207 3214 # get iterator for _r2
3208 3215 iter2 = getattr(self._r2, attr)
3209 3216 if iter2 is None:
3210 3217 # let's avoid side effect (not sure it matters)
3211 3218 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3212 3219 else:
3213 3220 iter2 = iter2()
3214 3221 return _iterordered(self._ascending, iter1, iter2)
3215 3222
3216 3223 def _trysetasclist(self):
3217 3224 """populate the _asclist attribute if possible and necessary"""
3218 3225 if self._genlist is not None and self._asclist is None:
3219 3226 self._asclist = sorted(self._genlist)
3220 3227
3221 3228 @property
3222 3229 def fastasc(self):
3223 3230 self._trysetasclist()
3224 3231 if self._asclist is not None:
3225 3232 return self._asclist.__iter__
3226 3233 iter1 = self._r1.fastasc
3227 3234 iter2 = self._r2.fastasc
3228 3235 if None in (iter1, iter2):
3229 3236 return None
3230 3237 return lambda: _iterordered(True, iter1(), iter2())
3231 3238
3232 3239 @property
3233 3240 def fastdesc(self):
3234 3241 self._trysetasclist()
3235 3242 if self._asclist is not None:
3236 3243 return self._asclist.__reversed__
3237 3244 iter1 = self._r1.fastdesc
3238 3245 iter2 = self._r2.fastdesc
3239 3246 if None in (iter1, iter2):
3240 3247 return None
3241 3248 return lambda: _iterordered(False, iter1(), iter2())
3242 3249
3243 3250 def __contains__(self, x):
3244 3251 return x in self._r1 or x in self._r2
3245 3252
3246 3253 def sort(self, reverse=False):
3247 3254 """Sort the added set
3248 3255
3249 3256 For this we use the cached list with all the generated values and if we
3250 3257 know they are ascending or descending we can sort them in a smart way.
3251 3258 """
3252 3259 self._ascending = not reverse
3253 3260
3254 3261 def isascending(self):
3255 3262 return self._ascending is not None and self._ascending
3256 3263
3257 3264 def isdescending(self):
3258 3265 return self._ascending is not None and not self._ascending
3259 3266
3260 3267 def reverse(self):
3261 3268 if self._ascending is None:
3262 3269 self._list.reverse()
3263 3270 else:
3264 3271 self._ascending = not self._ascending
3265 3272
3266 3273 def first(self):
3267 3274 for x in self:
3268 3275 return x
3269 3276 return None
3270 3277
3271 3278 def last(self):
3272 3279 self.reverse()
3273 3280 val = self.first()
3274 3281 self.reverse()
3275 3282 return val
3276 3283
3277 3284 def __repr__(self):
3278 3285 d = {None: '', False: '-', True: '+'}[self._ascending]
3279 3286 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3280 3287
3281 3288 class generatorset(abstractsmartset):
3282 3289 """Wrap a generator for lazy iteration
3283 3290
3284 3291 Wrapper structure for generators that provides lazy membership and can
3285 3292 be iterated more than once.
3286 3293 When asked for membership it generates values until either it finds the
3287 3294 requested one or has gone through all the elements in the generator
3288 3295 """
3289 3296 def __init__(self, gen, iterasc=None):
3290 3297 """
3291 3298 gen: a generator producing the values for the generatorset.
3292 3299 """
3293 3300 self._gen = gen
3294 3301 self._asclist = None
3295 3302 self._cache = {}
3296 3303 self._genlist = []
3297 3304 self._finished = False
3298 3305 self._ascending = True
3299 3306 if iterasc is not None:
3300 3307 if iterasc:
3301 3308 self.fastasc = self._iterator
3302 3309 self.__contains__ = self._asccontains
3303 3310 else:
3304 3311 self.fastdesc = self._iterator
3305 3312 self.__contains__ = self._desccontains
3306 3313
3307 3314 def __nonzero__(self):
3308 3315 # Do not use 'for r in self' because it will enforce the iteration
3309 3316 # order (default ascending), possibly unrolling a whole descending
3310 3317 # iterator.
3311 3318 if self._genlist:
3312 3319 return True
3313 3320 for r in self._consumegen():
3314 3321 return True
3315 3322 return False
3316 3323
3317 3324 def __contains__(self, x):
3318 3325 if x in self._cache:
3319 3326 return self._cache[x]
3320 3327
3321 3328 # Use new values only, as existing values would be cached.
3322 3329 for l in self._consumegen():
3323 3330 if l == x:
3324 3331 return True
3325 3332
3326 3333 self._cache[x] = False
3327 3334 return False
3328 3335
3329 3336 def _asccontains(self, x):
3330 3337 """version of contains optimised for ascending generator"""
3331 3338 if x in self._cache:
3332 3339 return self._cache[x]
3333 3340
3334 3341 # Use new values only, as existing values would be cached.
3335 3342 for l in self._consumegen():
3336 3343 if l == x:
3337 3344 return True
3338 3345 if l > x:
3339 3346 break
3340 3347
3341 3348 self._cache[x] = False
3342 3349 return False
3343 3350
3344 3351 def _desccontains(self, x):
3345 3352 """version of contains optimised for descending generator"""
3346 3353 if x in self._cache:
3347 3354 return self._cache[x]
3348 3355
3349 3356 # Use new values only, as existing values would be cached.
3350 3357 for l in self._consumegen():
3351 3358 if l == x:
3352 3359 return True
3353 3360 if l < x:
3354 3361 break
3355 3362
3356 3363 self._cache[x] = False
3357 3364 return False
3358 3365
3359 3366 def __iter__(self):
3360 3367 if self._ascending:
3361 3368 it = self.fastasc
3362 3369 else:
3363 3370 it = self.fastdesc
3364 3371 if it is not None:
3365 3372 return it()
3366 3373 # we need to consume the iterator
3367 3374 for x in self._consumegen():
3368 3375 pass
3369 3376 # recall the same code
3370 3377 return iter(self)
3371 3378
3372 3379 def _iterator(self):
3373 3380 if self._finished:
3374 3381 return iter(self._genlist)
3375 3382
3376 3383 # We have to use this complex iteration strategy to allow multiple
3377 3384 # iterations at the same time. We need to be able to catch revision
3378 3385 # removed from _consumegen and added to genlist in another instance.
3379 3386 #
3380 3387 # Getting rid of it would provide an about 15% speed up on this
3381 3388 # iteration.
3382 3389 genlist = self._genlist
3383 3390 nextrev = self._consumegen().next
3384 3391 _len = len # cache global lookup
3385 3392 def gen():
3386 3393 i = 0
3387 3394 while True:
3388 3395 if i < _len(genlist):
3389 3396 yield genlist[i]
3390 3397 else:
3391 3398 yield nextrev()
3392 3399 i += 1
3393 3400 return gen()
3394 3401
3395 3402 def _consumegen(self):
3396 3403 cache = self._cache
3397 3404 genlist = self._genlist.append
3398 3405 for item in self._gen:
3399 3406 cache[item] = True
3400 3407 genlist(item)
3401 3408 yield item
3402 3409 if not self._finished:
3403 3410 self._finished = True
3404 3411 asc = self._genlist[:]
3405 3412 asc.sort()
3406 3413 self._asclist = asc
3407 3414 self.fastasc = asc.__iter__
3408 3415 self.fastdesc = asc.__reversed__
3409 3416
3410 3417 def __len__(self):
3411 3418 for x in self._consumegen():
3412 3419 pass
3413 3420 return len(self._genlist)
3414 3421
3415 3422 def sort(self, reverse=False):
3416 3423 self._ascending = not reverse
3417 3424
3418 3425 def reverse(self):
3419 3426 self._ascending = not self._ascending
3420 3427
3421 3428 def isascending(self):
3422 3429 return self._ascending
3423 3430
3424 3431 def isdescending(self):
3425 3432 return not self._ascending
3426 3433
3427 3434 def first(self):
3428 3435 if self._ascending:
3429 3436 it = self.fastasc
3430 3437 else:
3431 3438 it = self.fastdesc
3432 3439 if it is None:
3433 3440 # we need to consume all and try again
3434 3441 for x in self._consumegen():
3435 3442 pass
3436 3443 return self.first()
3437 3444 return next(it(), None)
3438 3445
3439 3446 def last(self):
3440 3447 if self._ascending:
3441 3448 it = self.fastdesc
3442 3449 else:
3443 3450 it = self.fastasc
3444 3451 if it is None:
3445 3452 # we need to consume all and try again
3446 3453 for x in self._consumegen():
3447 3454 pass
3448 3455 return self.first()
3449 3456 return next(it(), None)
3450 3457
3451 3458 def __repr__(self):
3452 3459 d = {False: '-', True: '+'}[self._ascending]
3453 3460 return '<%s%s>' % (type(self).__name__, d)
3454 3461
3455 3462 class spanset(abstractsmartset):
3456 3463 """Duck type for baseset class which represents a range of revisions and
3457 3464 can work lazily and without having all the range in memory
3458 3465
3459 3466 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3460 3467 notable points:
3461 3468 - when x < y it will be automatically descending,
3462 3469 - revision filtered with this repoview will be skipped.
3463 3470
3464 3471 """
3465 3472 def __init__(self, repo, start=0, end=None):
3466 3473 """
3467 3474 start: first revision included the set
3468 3475 (default to 0)
3469 3476 end: first revision excluded (last+1)
3470 3477 (default to len(repo)
3471 3478
3472 3479 Spanset will be descending if `end` < `start`.
3473 3480 """
3474 3481 if end is None:
3475 3482 end = len(repo)
3476 3483 self._ascending = start <= end
3477 3484 if not self._ascending:
3478 3485 start, end = end + 1, start +1
3479 3486 self._start = start
3480 3487 self._end = end
3481 3488 self._hiddenrevs = repo.changelog.filteredrevs
3482 3489
3483 3490 def sort(self, reverse=False):
3484 3491 self._ascending = not reverse
3485 3492
3486 3493 def reverse(self):
3487 3494 self._ascending = not self._ascending
3488 3495
3489 3496 def _iterfilter(self, iterrange):
3490 3497 s = self._hiddenrevs
3491 3498 for r in iterrange:
3492 3499 if r not in s:
3493 3500 yield r
3494 3501
3495 3502 def __iter__(self):
3496 3503 if self._ascending:
3497 3504 return self.fastasc()
3498 3505 else:
3499 3506 return self.fastdesc()
3500 3507
3501 3508 def fastasc(self):
3502 3509 iterrange = xrange(self._start, self._end)
3503 3510 if self._hiddenrevs:
3504 3511 return self._iterfilter(iterrange)
3505 3512 return iter(iterrange)
3506 3513
3507 3514 def fastdesc(self):
3508 3515 iterrange = xrange(self._end - 1, self._start - 1, -1)
3509 3516 if self._hiddenrevs:
3510 3517 return self._iterfilter(iterrange)
3511 3518 return iter(iterrange)
3512 3519
3513 3520 def __contains__(self, rev):
3514 3521 hidden = self._hiddenrevs
3515 3522 return ((self._start <= rev < self._end)
3516 3523 and not (hidden and rev in hidden))
3517 3524
3518 3525 def __nonzero__(self):
3519 3526 for r in self:
3520 3527 return True
3521 3528 return False
3522 3529
3523 3530 def __len__(self):
3524 3531 if not self._hiddenrevs:
3525 3532 return abs(self._end - self._start)
3526 3533 else:
3527 3534 count = 0
3528 3535 start = self._start
3529 3536 end = self._end
3530 3537 for rev in self._hiddenrevs:
3531 3538 if (end < rev <= start) or (start <= rev < end):
3532 3539 count += 1
3533 3540 return abs(self._end - self._start) - count
3534 3541
3535 3542 def isascending(self):
3536 3543 return self._ascending
3537 3544
3538 3545 def isdescending(self):
3539 3546 return not self._ascending
3540 3547
3541 3548 def first(self):
3542 3549 if self._ascending:
3543 3550 it = self.fastasc
3544 3551 else:
3545 3552 it = self.fastdesc
3546 3553 for x in it():
3547 3554 return x
3548 3555 return None
3549 3556
3550 3557 def last(self):
3551 3558 if self._ascending:
3552 3559 it = self.fastdesc
3553 3560 else:
3554 3561 it = self.fastasc
3555 3562 for x in it():
3556 3563 return x
3557 3564 return None
3558 3565
3559 3566 def __repr__(self):
3560 3567 d = {False: '-', True: '+'}[self._ascending]
3561 3568 return '<%s%s %d:%d>' % (type(self).__name__, d,
3562 3569 self._start, self._end - 1)
3563 3570
3564 3571 class fullreposet(spanset):
3565 3572 """a set containing all revisions in the repo
3566 3573
3567 3574 This class exists to host special optimization and magic to handle virtual
3568 3575 revisions such as "null".
3569 3576 """
3570 3577
3571 3578 def __init__(self, repo):
3572 3579 super(fullreposet, self).__init__(repo)
3573 3580
3574 3581 def __and__(self, other):
3575 3582 """As self contains the whole repo, all of the other set should also be
3576 3583 in self. Therefore `self & other = other`.
3577 3584
3578 3585 This boldly assumes the other contains valid revs only.
3579 3586 """
3580 3587 # other not a smartset, make is so
3581 3588 if not util.safehasattr(other, 'isascending'):
3582 3589 # filter out hidden revision
3583 3590 # (this boldly assumes all smartset are pure)
3584 3591 #
3585 3592 # `other` was used with "&", let's assume this is a set like
3586 3593 # object.
3587 3594 other = baseset(other - self._hiddenrevs)
3588 3595
3589 3596 # XXX As fullreposet is also used as bootstrap, this is wrong.
3590 3597 #
3591 3598 # With a giveme312() revset returning [3,1,2], this makes
3592 3599 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3593 3600 # We cannot just drop it because other usage still need to sort it:
3594 3601 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3595 3602 #
3596 3603 # There is also some faulty revset implementations that rely on it
3597 3604 # (eg: children as of its state in e8075329c5fb)
3598 3605 #
3599 3606 # When we fix the two points above we can move this into the if clause
3600 3607 other.sort(reverse=self.isdescending())
3601 3608 return other
3602 3609
3603 3610 def prettyformatset(revs):
3604 3611 lines = []
3605 3612 rs = repr(revs)
3606 3613 p = 0
3607 3614 while p < len(rs):
3608 3615 q = rs.find('<', p + 1)
3609 3616 if q < 0:
3610 3617 q = len(rs)
3611 3618 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3612 3619 assert l >= 0
3613 3620 lines.append((l, rs[p:q].rstrip()))
3614 3621 p = q
3615 3622 return '\n'.join(' ' * l + s for l, s in lines)
3616 3623
3617 3624 # tell hggettext to extract docstrings from these functions:
3618 3625 i18nfunctions = symbols.values()
@@ -1,109 +1,118 b''
1 1
2 2 $ cat << EOF > buggylocking.py
3 3 > """A small extension that acquire locks in the wrong order
4 4 > """
5 5 >
6 > from mercurial import cmdutil, repair
6 > from mercurial import cmdutil, repair, revset
7 7 >
8 8 > cmdtable = {}
9 9 > command = cmdutil.command(cmdtable)
10 10 >
11 11 > @command('buggylocking', [], '')
12 12 > def buggylocking(ui, repo):
13 13 > tr = repo.transaction('buggy')
14 14 > lo = repo.lock()
15 15 > wl = repo.wlock()
16 16 > wl.release()
17 17 > lo.release()
18 18 >
19 19 > @command('properlocking', [], '')
20 20 > def properlocking(ui, repo):
21 21 > """check that reentrance is fine"""
22 22 > wl = repo.wlock()
23 23 > lo = repo.lock()
24 24 > tr = repo.transaction('proper')
25 25 > tr2 = repo.transaction('proper')
26 26 > lo2 = repo.lock()
27 27 > wl2 = repo.wlock()
28 28 > wl2.release()
29 29 > lo2.release()
30 30 > tr2.close()
31 31 > tr.close()
32 32 > lo.release()
33 33 > wl.release()
34 34 >
35 35 > @command('nowaitlocking', [], '')
36 36 > def nowaitlocking(ui, repo):
37 37 > lo = repo.lock()
38 38 > wl = repo.wlock(wait=False)
39 39 > wl.release()
40 40 > lo.release()
41 41 >
42 42 > @command('stripintr', [], '')
43 43 > def stripintr(ui, repo):
44 44 > lo = repo.lock()
45 45 > tr = repo.transaction('foobar')
46 46 > try:
47 47 > repair.strip(repo.ui, repo, [repo['.'].node()])
48 48 > finally:
49 49 > lo.release()
50 >
51 > def oldstylerevset(repo, subset, x):
52 > return list(subset)
53 >
54 > revset.symbols['oldstyle'] = oldstylerevset
50 55 > EOF
51 56
52 57 $ cat << EOF >> $HGRCPATH
53 58 > [extensions]
54 59 > buggylocking=$TESTTMP/buggylocking.py
55 60 > [devel]
56 61 > all-warnings=1
57 62 > EOF
58 63
59 64 $ hg init lock-checker
60 65 $ cd lock-checker
61 66 $ hg buggylocking
62 67 devel-warn: transaction with no lock at: $TESTTMP/buggylocking.py:11 (buggylocking)
63 68 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:13 (buggylocking)
64 69 $ cat << EOF >> $HGRCPATH
65 70 > [devel]
66 71 > all=0
67 72 > check-locks=1
68 73 > EOF
69 74 $ hg buggylocking
70 75 devel-warn: transaction with no lock at: $TESTTMP/buggylocking.py:11 (buggylocking)
71 76 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:13 (buggylocking)
72 77 $ hg buggylocking --traceback
73 78 devel-warn: transaction with no lock at:
74 79 */hg:* in * (glob)
75 80 */mercurial/dispatch.py:* in run (glob)
76 81 */mercurial/dispatch.py:* in dispatch (glob)
77 82 */mercurial/dispatch.py:* in _runcatch (glob)
78 83 */mercurial/dispatch.py:* in _dispatch (glob)
79 84 */mercurial/dispatch.py:* in runcommand (glob)
80 85 */mercurial/dispatch.py:* in _runcommand (glob)
81 86 */mercurial/dispatch.py:* in checkargs (glob)
82 87 */mercurial/dispatch.py:* in <lambda> (glob)
83 88 */mercurial/util.py:* in check (glob)
84 89 $TESTTMP/buggylocking.py:* in buggylocking (glob)
85 90 devel-warn: "wlock" acquired after "lock" at:
86 91 */hg:* in * (glob)
87 92 */mercurial/dispatch.py:* in run (glob)
88 93 */mercurial/dispatch.py:* in dispatch (glob)
89 94 */mercurial/dispatch.py:* in _runcatch (glob)
90 95 */mercurial/dispatch.py:* in _dispatch (glob)
91 96 */mercurial/dispatch.py:* in runcommand (glob)
92 97 */mercurial/dispatch.py:* in _runcommand (glob)
93 98 */mercurial/dispatch.py:* in checkargs (glob)
94 99 */mercurial/dispatch.py:* in <lambda> (glob)
95 100 */mercurial/util.py:* in check (glob)
96 101 $TESTTMP/buggylocking.py:* in buggylocking (glob)
97 102 $ hg properlocking
98 103 $ hg nowaitlocking
99 104
100 105 $ echo a > a
101 106 $ hg add a
102 107 $ hg commit -m a
103 108 $ hg stripintr
104 109 saved backup bundle to $TESTTMP/lock-checker/.hg/strip-backup/cb9a9f314b8b-cc5ccb0b-backup.hg (glob)
105 110 abort: programming error: cannot strip from inside a transaction
106 111 (contact your extension maintainer)
107 112 [255]
108 113
114 $ hg log -r "oldstyle()" -T '{rev}\n'
115 devel-warn: revset "oldstyle" use list instead of smartset, (upgrade your code) at: */mercurial/revset.py:* (mfunc) (glob)
116 0
117
109 118 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now