##// END OF EJS Templates
revset: add the 'subrepo' symbol...
Matt Harbison -
r24446:582cfcc8 default
parent child Browse files
Show More
@@ -1,3359 +1,3403 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 if followfirst:
22 22 cut = 1
23 23 else:
24 24 cut = None
25 25 cl = repo.changelog
26 26
27 27 def iterate():
28 28 revqueue, revsnode = None, None
29 29 h = []
30 30
31 31 revs.sort(reverse=True)
32 32 revqueue = util.deque(revs)
33 33 if revqueue:
34 34 revsnode = revqueue.popleft()
35 35 heapq.heappush(h, -revsnode)
36 36
37 37 seen = set()
38 38 while h:
39 39 current = -heapq.heappop(h)
40 40 if current not in seen:
41 41 if revsnode and current == revsnode:
42 42 if revqueue:
43 43 revsnode = revqueue.popleft()
44 44 heapq.heappush(h, -revsnode)
45 45 seen.add(current)
46 46 yield current
47 47 for parent in cl.parentrevs(current)[:cut]:
48 48 if parent != node.nullrev:
49 49 heapq.heappush(h, -parent)
50 50
51 51 return generatorset(iterate(), iterasc=False)
52 52
53 53 def _revdescendants(repo, revs, followfirst):
54 54 """Like revlog.descendants() but supports followfirst."""
55 55 if followfirst:
56 56 cut = 1
57 57 else:
58 58 cut = None
59 59
60 60 def iterate():
61 61 cl = repo.changelog
62 62 first = min(revs)
63 63 nullrev = node.nullrev
64 64 if first == nullrev:
65 65 # Are there nodes with a null first parent and a non-null
66 66 # second one? Maybe. Do we care? Probably not.
67 67 for i in cl:
68 68 yield i
69 69 else:
70 70 seen = set(revs)
71 71 for i in cl.revs(first + 1):
72 72 for x in cl.parentrevs(i)[:cut]:
73 73 if x != nullrev and x in seen:
74 74 seen.add(i)
75 75 yield i
76 76 break
77 77
78 78 return generatorset(iterate(), iterasc=True)
79 79
80 80 def _revsbetween(repo, roots, heads):
81 81 """Return all paths between roots and heads, inclusive of both endpoint
82 82 sets."""
83 83 if not roots:
84 84 return baseset()
85 85 parentrevs = repo.changelog.parentrevs
86 86 visit = list(heads)
87 87 reachable = set()
88 88 seen = {}
89 89 minroot = min(roots)
90 90 roots = set(roots)
91 91 # open-code the post-order traversal due to the tiny size of
92 92 # sys.getrecursionlimit()
93 93 while visit:
94 94 rev = visit.pop()
95 95 if rev in roots:
96 96 reachable.add(rev)
97 97 parents = parentrevs(rev)
98 98 seen[rev] = parents
99 99 for parent in parents:
100 100 if parent >= minroot and parent not in seen:
101 101 visit.append(parent)
102 102 if not reachable:
103 103 return baseset()
104 104 for rev in sorted(seen):
105 105 for parent in seen[rev]:
106 106 if parent in reachable:
107 107 reachable.add(rev)
108 108 return baseset(sorted(reachable))
109 109
110 110 elements = {
111 111 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
112 112 "##": (20, None, ("_concat", 20)),
113 113 "~": (18, None, ("ancestor", 18)),
114 114 "^": (18, None, ("parent", 18), ("parentpost", 18)),
115 115 "-": (5, ("negate", 19), ("minus", 5)),
116 116 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
117 117 ("dagrangepost", 17)),
118 118 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
119 119 ("dagrangepost", 17)),
120 120 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
121 121 "not": (10, ("not", 10)),
122 122 "!": (10, ("not", 10)),
123 123 "and": (5, None, ("and", 5)),
124 124 "&": (5, None, ("and", 5)),
125 125 "%": (5, None, ("only", 5), ("onlypost", 5)),
126 126 "or": (4, None, ("or", 4)),
127 127 "|": (4, None, ("or", 4)),
128 128 "+": (4, None, ("or", 4)),
129 129 ",": (2, None, ("list", 2)),
130 130 ")": (0, None, None),
131 131 "symbol": (0, ("symbol",), None),
132 132 "string": (0, ("string",), None),
133 133 "end": (0, None, None),
134 134 }
135 135
136 136 keywords = set(['and', 'or', 'not'])
137 137
138 138 # default set of valid characters for the initial letter of symbols
139 139 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
140 140 if c.isalnum() or c in '._@' or ord(c) > 127)
141 141
142 142 # default set of valid characters for non-initial letters of symbols
143 143 _symletters = set(c for c in [chr(i) for i in xrange(256)]
144 144 if c.isalnum() or c in '-._/@' or ord(c) > 127)
145 145
146 146 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
147 147 '''
148 148 Parse a revset statement into a stream of tokens
149 149
150 150 ``syminitletters`` is the set of valid characters for the initial
151 151 letter of symbols.
152 152
153 153 By default, character ``c`` is recognized as valid for initial
154 154 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
155 155
156 156 ``symletters`` is the set of valid characters for non-initial
157 157 letters of symbols.
158 158
159 159 By default, character ``c`` is recognized as valid for non-initial
160 160 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
161 161
162 162 Check that @ is a valid unquoted token character (issue3686):
163 163 >>> list(tokenize("@::"))
164 164 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
165 165
166 166 '''
167 167 if syminitletters is None:
168 168 syminitletters = _syminitletters
169 169 if symletters is None:
170 170 symletters = _symletters
171 171
172 172 pos, l = 0, len(program)
173 173 while pos < l:
174 174 c = program[pos]
175 175 if c.isspace(): # skip inter-token whitespace
176 176 pass
177 177 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
178 178 yield ('::', None, pos)
179 179 pos += 1 # skip ahead
180 180 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
181 181 yield ('..', None, pos)
182 182 pos += 1 # skip ahead
183 183 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
184 184 yield ('##', None, pos)
185 185 pos += 1 # skip ahead
186 186 elif c in "():,-|&+!~^%": # handle simple operators
187 187 yield (c, None, pos)
188 188 elif (c in '"\'' or c == 'r' and
189 189 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
190 190 if c == 'r':
191 191 pos += 1
192 192 c = program[pos]
193 193 decode = lambda x: x
194 194 else:
195 195 decode = lambda x: x.decode('string-escape')
196 196 pos += 1
197 197 s = pos
198 198 while pos < l: # find closing quote
199 199 d = program[pos]
200 200 if d == '\\': # skip over escaped characters
201 201 pos += 2
202 202 continue
203 203 if d == c:
204 204 yield ('string', decode(program[s:pos]), s)
205 205 break
206 206 pos += 1
207 207 else:
208 208 raise error.ParseError(_("unterminated string"), s)
209 209 # gather up a symbol/keyword
210 210 elif c in syminitletters:
211 211 s = pos
212 212 pos += 1
213 213 while pos < l: # find end of symbol
214 214 d = program[pos]
215 215 if d not in symletters:
216 216 break
217 217 if d == '.' and program[pos - 1] == '.': # special case for ..
218 218 pos -= 1
219 219 break
220 220 pos += 1
221 221 sym = program[s:pos]
222 222 if sym in keywords: # operator keywords
223 223 yield (sym, None, s)
224 224 elif '-' in sym:
225 225 # some jerk gave us foo-bar-baz, try to check if it's a symbol
226 226 if lookup and lookup(sym):
227 227 # looks like a real symbol
228 228 yield ('symbol', sym, s)
229 229 else:
230 230 # looks like an expression
231 231 parts = sym.split('-')
232 232 for p in parts[:-1]:
233 233 if p: # possible consecutive -
234 234 yield ('symbol', p, s)
235 235 s += len(p)
236 236 yield ('-', None, pos)
237 237 s += 1
238 238 if parts[-1]: # possible trailing -
239 239 yield ('symbol', parts[-1], s)
240 240 else:
241 241 yield ('symbol', sym, s)
242 242 pos -= 1
243 243 else:
244 244 raise error.ParseError(_("syntax error"), pos)
245 245 pos += 1
246 246 yield ('end', None, pos)
247 247
248 248 def parseerrordetail(inst):
249 249 """Compose error message from specified ParseError object
250 250 """
251 251 if len(inst.args) > 1:
252 252 return _('at %s: %s') % (inst.args[1], inst.args[0])
253 253 else:
254 254 return inst.args[0]
255 255
256 256 # helpers
257 257
258 258 def getstring(x, err):
259 259 if x and (x[0] == 'string' or x[0] == 'symbol'):
260 260 return x[1]
261 261 raise error.ParseError(err)
262 262
263 263 def getlist(x):
264 264 if not x:
265 265 return []
266 266 if x[0] == 'list':
267 267 return getlist(x[1]) + [x[2]]
268 268 return [x]
269 269
270 270 def getargs(x, min, max, err):
271 271 l = getlist(x)
272 272 if len(l) < min or (max >= 0 and len(l) > max):
273 273 raise error.ParseError(err)
274 274 return l
275 275
276 276 def isvalidsymbol(tree):
277 277 """Examine whether specified ``tree`` is valid ``symbol`` or not
278 278 """
279 279 return tree[0] == 'symbol' and len(tree) > 1
280 280
281 281 def getsymbol(tree):
282 282 """Get symbol name from valid ``symbol`` in ``tree``
283 283
284 284 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
285 285 """
286 286 return tree[1]
287 287
288 288 def isvalidfunc(tree):
289 289 """Examine whether specified ``tree`` is valid ``func`` or not
290 290 """
291 291 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
292 292
293 293 def getfuncname(tree):
294 294 """Get function name from valid ``func`` in ``tree``
295 295
296 296 This assumes that ``tree`` is already examined by ``isvalidfunc``.
297 297 """
298 298 return getsymbol(tree[1])
299 299
300 300 def getfuncargs(tree):
301 301 """Get list of function arguments from valid ``func`` in ``tree``
302 302
303 303 This assumes that ``tree`` is already examined by ``isvalidfunc``.
304 304 """
305 305 if len(tree) > 2:
306 306 return getlist(tree[2])
307 307 else:
308 308 return []
309 309
310 310 def getset(repo, subset, x):
311 311 if not x:
312 312 raise error.ParseError(_("missing argument"))
313 313 s = methods[x[0]](repo, subset, *x[1:])
314 314 if util.safehasattr(s, 'isascending'):
315 315 return s
316 316 return baseset(s)
317 317
318 318 def _getrevsource(repo, r):
319 319 extra = repo[r].extra()
320 320 for label in ('source', 'transplant_source', 'rebase_source'):
321 321 if label in extra:
322 322 try:
323 323 return repo[extra[label]].rev()
324 324 except error.RepoLookupError:
325 325 pass
326 326 return None
327 327
328 328 # operator methods
329 329
330 330 def stringset(repo, subset, x):
331 331 x = repo[x].rev()
332 332 if x in subset:
333 333 return baseset([x])
334 334 return baseset()
335 335
336 336 def symbolset(repo, subset, x):
337 337 if x in symbols:
338 338 raise error.ParseError(_("can't use %s here") % x)
339 339 return stringset(repo, subset, x)
340 340
341 341 def rangeset(repo, subset, x, y):
342 342 m = getset(repo, fullreposet(repo), x)
343 343 n = getset(repo, fullreposet(repo), y)
344 344
345 345 if not m or not n:
346 346 return baseset()
347 347 m, n = m.first(), n.last()
348 348
349 349 if m < n:
350 350 r = spanset(repo, m, n + 1)
351 351 else:
352 352 r = spanset(repo, m, n - 1)
353 353 return r & subset
354 354
355 355 def dagrange(repo, subset, x, y):
356 356 r = fullreposet(repo)
357 357 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
358 358 return xs & subset
359 359
360 360 def andset(repo, subset, x, y):
361 361 return getset(repo, getset(repo, subset, x), y)
362 362
363 363 def orset(repo, subset, x, y):
364 364 xl = getset(repo, subset, x)
365 365 yl = getset(repo, subset - xl, y)
366 366 return xl + yl
367 367
368 368 def notset(repo, subset, x):
369 369 return subset - getset(repo, subset, x)
370 370
371 371 def listset(repo, subset, a, b):
372 372 raise error.ParseError(_("can't use a list in this context"))
373 373
374 374 def func(repo, subset, a, b):
375 375 if a[0] == 'symbol' and a[1] in symbols:
376 376 return symbols[a[1]](repo, subset, b)
377 377 raise error.UnknownIdentifier(a[1], symbols.keys())
378 378
379 379 # functions
380 380
381 381 def adds(repo, subset, x):
382 382 """``adds(pattern)``
383 383 Changesets that add a file matching pattern.
384 384
385 385 The pattern without explicit kind like ``glob:`` is expected to be
386 386 relative to the current directory and match against a file or a
387 387 directory.
388 388 """
389 389 # i18n: "adds" is a keyword
390 390 pat = getstring(x, _("adds requires a pattern"))
391 391 return checkstatus(repo, subset, pat, 1)
392 392
393 393 def ancestor(repo, subset, x):
394 394 """``ancestor(*changeset)``
395 395 A greatest common ancestor of the changesets.
396 396
397 397 Accepts 0 or more changesets.
398 398 Will return empty list when passed no args.
399 399 Greatest common ancestor of a single changeset is that changeset.
400 400 """
401 401 # i18n: "ancestor" is a keyword
402 402 l = getlist(x)
403 403 rl = fullreposet(repo)
404 404 anc = None
405 405
406 406 # (getset(repo, rl, i) for i in l) generates a list of lists
407 407 for revs in (getset(repo, rl, i) for i in l):
408 408 for r in revs:
409 409 if anc is None:
410 410 anc = repo[r]
411 411 else:
412 412 anc = anc.ancestor(repo[r])
413 413
414 414 if anc is not None and anc.rev() in subset:
415 415 return baseset([anc.rev()])
416 416 return baseset()
417 417
418 418 def _ancestors(repo, subset, x, followfirst=False):
419 419 heads = getset(repo, fullreposet(repo), x)
420 420 if not heads:
421 421 return baseset()
422 422 s = _revancestors(repo, heads, followfirst)
423 423 return subset & s
424 424
425 425 def ancestors(repo, subset, x):
426 426 """``ancestors(set)``
427 427 Changesets that are ancestors of a changeset in set.
428 428 """
429 429 return _ancestors(repo, subset, x)
430 430
431 431 def _firstancestors(repo, subset, x):
432 432 # ``_firstancestors(set)``
433 433 # Like ``ancestors(set)`` but follows only the first parents.
434 434 return _ancestors(repo, subset, x, followfirst=True)
435 435
436 436 def ancestorspec(repo, subset, x, n):
437 437 """``set~n``
438 438 Changesets that are the Nth ancestor (first parents only) of a changeset
439 439 in set.
440 440 """
441 441 try:
442 442 n = int(n[1])
443 443 except (TypeError, ValueError):
444 444 raise error.ParseError(_("~ expects a number"))
445 445 ps = set()
446 446 cl = repo.changelog
447 447 for r in getset(repo, fullreposet(repo), x):
448 448 for i in range(n):
449 449 r = cl.parentrevs(r)[0]
450 450 ps.add(r)
451 451 return subset & ps
452 452
453 453 def author(repo, subset, x):
454 454 """``author(string)``
455 455 Alias for ``user(string)``.
456 456 """
457 457 # i18n: "author" is a keyword
458 458 n = encoding.lower(getstring(x, _("author requires a string")))
459 459 kind, pattern, matcher = _substringmatcher(n)
460 460 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
461 461
462 462 def bisect(repo, subset, x):
463 463 """``bisect(string)``
464 464 Changesets marked in the specified bisect status:
465 465
466 466 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
467 467 - ``goods``, ``bads`` : csets topologically good/bad
468 468 - ``range`` : csets taking part in the bisection
469 469 - ``pruned`` : csets that are goods, bads or skipped
470 470 - ``untested`` : csets whose fate is yet unknown
471 471 - ``ignored`` : csets ignored due to DAG topology
472 472 - ``current`` : the cset currently being bisected
473 473 """
474 474 # i18n: "bisect" is a keyword
475 475 status = getstring(x, _("bisect requires a string")).lower()
476 476 state = set(hbisect.get(repo, status))
477 477 return subset & state
478 478
479 479 # Backward-compatibility
480 480 # - no help entry so that we do not advertise it any more
481 481 def bisected(repo, subset, x):
482 482 return bisect(repo, subset, x)
483 483
484 484 def bookmark(repo, subset, x):
485 485 """``bookmark([name])``
486 486 The named bookmark or all bookmarks.
487 487
488 488 If `name` starts with `re:`, the remainder of the name is treated as
489 489 a regular expression. To match a bookmark that actually starts with `re:`,
490 490 use the prefix `literal:`.
491 491 """
492 492 # i18n: "bookmark" is a keyword
493 493 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
494 494 if args:
495 495 bm = getstring(args[0],
496 496 # i18n: "bookmark" is a keyword
497 497 _('the argument to bookmark must be a string'))
498 498 kind, pattern, matcher = _stringmatcher(bm)
499 499 bms = set()
500 500 if kind == 'literal':
501 501 bmrev = repo._bookmarks.get(pattern, None)
502 502 if not bmrev:
503 503 raise error.RepoLookupError(_("bookmark '%s' does not exist")
504 504 % bm)
505 505 bms.add(repo[bmrev].rev())
506 506 else:
507 507 matchrevs = set()
508 508 for name, bmrev in repo._bookmarks.iteritems():
509 509 if matcher(name):
510 510 matchrevs.add(bmrev)
511 511 if not matchrevs:
512 512 raise error.RepoLookupError(_("no bookmarks exist"
513 513 " that match '%s'") % pattern)
514 514 for bmrev in matchrevs:
515 515 bms.add(repo[bmrev].rev())
516 516 else:
517 517 bms = set([repo[r].rev()
518 518 for r in repo._bookmarks.values()])
519 519 bms -= set([node.nullrev])
520 520 return subset & bms
521 521
522 522 def branch(repo, subset, x):
523 523 """``branch(string or set)``
524 524 All changesets belonging to the given branch or the branches of the given
525 525 changesets.
526 526
527 527 If `string` starts with `re:`, the remainder of the name is treated as
528 528 a regular expression. To match a branch that actually starts with `re:`,
529 529 use the prefix `literal:`.
530 530 """
531 531 getbi = repo.revbranchcache().branchinfo
532 532
533 533 try:
534 534 b = getstring(x, '')
535 535 except error.ParseError:
536 536 # not a string, but another revspec, e.g. tip()
537 537 pass
538 538 else:
539 539 kind, pattern, matcher = _stringmatcher(b)
540 540 if kind == 'literal':
541 541 # note: falls through to the revspec case if no branch with
542 542 # this name exists
543 543 if pattern in repo.branchmap():
544 544 return subset.filter(lambda r: matcher(getbi(r)[0]))
545 545 else:
546 546 return subset.filter(lambda r: matcher(getbi(r)[0]))
547 547
548 548 s = getset(repo, fullreposet(repo), x)
549 549 b = set()
550 550 for r in s:
551 551 b.add(getbi(r)[0])
552 552 c = s.__contains__
553 553 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
554 554
555 555 def bumped(repo, subset, x):
556 556 """``bumped()``
557 557 Mutable changesets marked as successors of public changesets.
558 558
559 559 Only non-public and non-obsolete changesets can be `bumped`.
560 560 """
561 561 # i18n: "bumped" is a keyword
562 562 getargs(x, 0, 0, _("bumped takes no arguments"))
563 563 bumped = obsmod.getrevs(repo, 'bumped')
564 564 return subset & bumped
565 565
566 566 def bundle(repo, subset, x):
567 567 """``bundle()``
568 568 Changesets in the bundle.
569 569
570 570 Bundle must be specified by the -R option."""
571 571
572 572 try:
573 573 bundlerevs = repo.changelog.bundlerevs
574 574 except AttributeError:
575 575 raise util.Abort(_("no bundle provided - specify with -R"))
576 576 return subset & bundlerevs
577 577
578 578 def checkstatus(repo, subset, pat, field):
579 579 hasset = matchmod.patkind(pat) == 'set'
580 580
581 581 mcache = [None]
582 582 def matches(x):
583 583 c = repo[x]
584 584 if not mcache[0] or hasset:
585 585 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
586 586 m = mcache[0]
587 587 fname = None
588 588 if not m.anypats() and len(m.files()) == 1:
589 589 fname = m.files()[0]
590 590 if fname is not None:
591 591 if fname not in c.files():
592 592 return False
593 593 else:
594 594 for f in c.files():
595 595 if m(f):
596 596 break
597 597 else:
598 598 return False
599 599 files = repo.status(c.p1().node(), c.node())[field]
600 600 if fname is not None:
601 601 if fname in files:
602 602 return True
603 603 else:
604 604 for f in files:
605 605 if m(f):
606 606 return True
607 607
608 608 return subset.filter(matches)
609 609
610 610 def _children(repo, narrow, parentset):
611 611 cs = set()
612 612 if not parentset:
613 613 return baseset(cs)
614 614 pr = repo.changelog.parentrevs
615 615 minrev = min(parentset)
616 616 for r in narrow:
617 617 if r <= minrev:
618 618 continue
619 619 for p in pr(r):
620 620 if p in parentset:
621 621 cs.add(r)
622 622 return baseset(cs)
623 623
624 624 def children(repo, subset, x):
625 625 """``children(set)``
626 626 Child changesets of changesets in set.
627 627 """
628 628 s = getset(repo, fullreposet(repo), x)
629 629 cs = _children(repo, subset, s)
630 630 return subset & cs
631 631
632 632 def closed(repo, subset, x):
633 633 """``closed()``
634 634 Changeset is closed.
635 635 """
636 636 # i18n: "closed" is a keyword
637 637 getargs(x, 0, 0, _("closed takes no arguments"))
638 638 return subset.filter(lambda r: repo[r].closesbranch())
639 639
640 640 def contains(repo, subset, x):
641 641 """``contains(pattern)``
642 642 The revision's manifest contains a file matching pattern (but might not
643 643 modify it). See :hg:`help patterns` for information about file patterns.
644 644
645 645 The pattern without explicit kind like ``glob:`` is expected to be
646 646 relative to the current directory and match against a file exactly
647 647 for efficiency.
648 648 """
649 649 # i18n: "contains" is a keyword
650 650 pat = getstring(x, _("contains requires a pattern"))
651 651
652 652 def matches(x):
653 653 if not matchmod.patkind(pat):
654 654 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
655 655 if pats in repo[x]:
656 656 return True
657 657 else:
658 658 c = repo[x]
659 659 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
660 660 for f in c.manifest():
661 661 if m(f):
662 662 return True
663 663 return False
664 664
665 665 return subset.filter(matches)
666 666
667 667 def converted(repo, subset, x):
668 668 """``converted([id])``
669 669 Changesets converted from the given identifier in the old repository if
670 670 present, or all converted changesets if no identifier is specified.
671 671 """
672 672
673 673 # There is exactly no chance of resolving the revision, so do a simple
674 674 # string compare and hope for the best
675 675
676 676 rev = None
677 677 # i18n: "converted" is a keyword
678 678 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
679 679 if l:
680 680 # i18n: "converted" is a keyword
681 681 rev = getstring(l[0], _('converted requires a revision'))
682 682
683 683 def _matchvalue(r):
684 684 source = repo[r].extra().get('convert_revision', None)
685 685 return source is not None and (rev is None or source.startswith(rev))
686 686
687 687 return subset.filter(lambda r: _matchvalue(r))
688 688
689 689 def date(repo, subset, x):
690 690 """``date(interval)``
691 691 Changesets within the interval, see :hg:`help dates`.
692 692 """
693 693 # i18n: "date" is a keyword
694 694 ds = getstring(x, _("date requires a string"))
695 695 dm = util.matchdate(ds)
696 696 return subset.filter(lambda x: dm(repo[x].date()[0]))
697 697
698 698 def desc(repo, subset, x):
699 699 """``desc(string)``
700 700 Search commit message for string. The match is case-insensitive.
701 701 """
702 702 # i18n: "desc" is a keyword
703 703 ds = encoding.lower(getstring(x, _("desc requires a string")))
704 704
705 705 def matches(x):
706 706 c = repo[x]
707 707 return ds in encoding.lower(c.description())
708 708
709 709 return subset.filter(matches)
710 710
711 711 def _descendants(repo, subset, x, followfirst=False):
712 712 roots = getset(repo, fullreposet(repo), x)
713 713 if not roots:
714 714 return baseset()
715 715 s = _revdescendants(repo, roots, followfirst)
716 716
717 717 # Both sets need to be ascending in order to lazily return the union
718 718 # in the correct order.
719 719 base = subset & roots
720 720 desc = subset & s
721 721 result = base + desc
722 722 if subset.isascending():
723 723 result.sort()
724 724 elif subset.isdescending():
725 725 result.sort(reverse=True)
726 726 else:
727 727 result = subset & result
728 728 return result
729 729
730 730 def descendants(repo, subset, x):
731 731 """``descendants(set)``
732 732 Changesets which are descendants of changesets in set.
733 733 """
734 734 return _descendants(repo, subset, x)
735 735
736 736 def _firstdescendants(repo, subset, x):
737 737 # ``_firstdescendants(set)``
738 738 # Like ``descendants(set)`` but follows only the first parents.
739 739 return _descendants(repo, subset, x, followfirst=True)
740 740
741 741 def destination(repo, subset, x):
742 742 """``destination([set])``
743 743 Changesets that were created by a graft, transplant or rebase operation,
744 744 with the given revisions specified as the source. Omitting the optional set
745 745 is the same as passing all().
746 746 """
747 747 if x is not None:
748 748 sources = getset(repo, fullreposet(repo), x)
749 749 else:
750 750 sources = fullreposet(repo)
751 751
752 752 dests = set()
753 753
754 754 # subset contains all of the possible destinations that can be returned, so
755 755 # iterate over them and see if their source(s) were provided in the arg set.
756 756 # Even if the immediate src of r is not in the arg set, src's source (or
757 757 # further back) may be. Scanning back further than the immediate src allows
758 758 # transitive transplants and rebases to yield the same results as transitive
759 759 # grafts.
760 760 for r in subset:
761 761 src = _getrevsource(repo, r)
762 762 lineage = None
763 763
764 764 while src is not None:
765 765 if lineage is None:
766 766 lineage = list()
767 767
768 768 lineage.append(r)
769 769
770 770 # The visited lineage is a match if the current source is in the arg
771 771 # set. Since every candidate dest is visited by way of iterating
772 772 # subset, any dests further back in the lineage will be tested by a
773 773 # different iteration over subset. Likewise, if the src was already
774 774 # selected, the current lineage can be selected without going back
775 775 # further.
776 776 if src in sources or src in dests:
777 777 dests.update(lineage)
778 778 break
779 779
780 780 r = src
781 781 src = _getrevsource(repo, r)
782 782
783 783 return subset.filter(dests.__contains__)
784 784
785 785 def divergent(repo, subset, x):
786 786 """``divergent()``
787 787 Final successors of changesets with an alternative set of final successors.
788 788 """
789 789 # i18n: "divergent" is a keyword
790 790 getargs(x, 0, 0, _("divergent takes no arguments"))
791 791 divergent = obsmod.getrevs(repo, 'divergent')
792 792 return subset & divergent
793 793
794 794 def draft(repo, subset, x):
795 795 """``draft()``
796 796 Changeset in draft phase."""
797 797 # i18n: "draft" is a keyword
798 798 getargs(x, 0, 0, _("draft takes no arguments"))
799 799 phase = repo._phasecache.phase
800 800 target = phases.draft
801 801 condition = lambda r: phase(repo, r) == target
802 802 return subset.filter(condition, cache=False)
803 803
804 804 def extinct(repo, subset, x):
805 805 """``extinct()``
806 806 Obsolete changesets with obsolete descendants only.
807 807 """
808 808 # i18n: "extinct" is a keyword
809 809 getargs(x, 0, 0, _("extinct takes no arguments"))
810 810 extincts = obsmod.getrevs(repo, 'extinct')
811 811 return subset & extincts
812 812
813 813 def extra(repo, subset, x):
814 814 """``extra(label, [value])``
815 815 Changesets with the given label in the extra metadata, with the given
816 816 optional value.
817 817
818 818 If `value` starts with `re:`, the remainder of the value is treated as
819 819 a regular expression. To match a value that actually starts with `re:`,
820 820 use the prefix `literal:`.
821 821 """
822 822
823 823 # i18n: "extra" is a keyword
824 824 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
825 825 # i18n: "extra" is a keyword
826 826 label = getstring(l[0], _('first argument to extra must be a string'))
827 827 value = None
828 828
829 829 if len(l) > 1:
830 830 # i18n: "extra" is a keyword
831 831 value = getstring(l[1], _('second argument to extra must be a string'))
832 832 kind, value, matcher = _stringmatcher(value)
833 833
834 834 def _matchvalue(r):
835 835 extra = repo[r].extra()
836 836 return label in extra and (value is None or matcher(extra[label]))
837 837
838 838 return subset.filter(lambda r: _matchvalue(r))
839 839
840 840 def filelog(repo, subset, x):
841 841 """``filelog(pattern)``
842 842 Changesets connected to the specified filelog.
843 843
844 844 For performance reasons, visits only revisions mentioned in the file-level
845 845 filelog, rather than filtering through all changesets (much faster, but
846 846 doesn't include deletes or duplicate changes). For a slower, more accurate
847 847 result, use ``file()``.
848 848
849 849 The pattern without explicit kind like ``glob:`` is expected to be
850 850 relative to the current directory and match against a file exactly
851 851 for efficiency.
852 852
853 853 If some linkrev points to revisions filtered by the current repoview, we'll
854 854 work around it to return a non-filtered value.
855 855 """
856 856
857 857 # i18n: "filelog" is a keyword
858 858 pat = getstring(x, _("filelog requires a pattern"))
859 859 s = set()
860 860 cl = repo.changelog
861 861
862 862 if not matchmod.patkind(pat):
863 863 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
864 864 files = [f]
865 865 else:
866 866 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
867 867 files = (f for f in repo[None] if m(f))
868 868
869 869 for f in files:
870 870 backrevref = {} # final value for: filerev -> changerev
871 871 lowestchild = {} # lowest known filerev child of a filerev
872 872 delayed = [] # filerev with filtered linkrev, for post-processing
873 873 lowesthead = None # cache for manifest content of all head revisions
874 874 fl = repo.file(f)
875 875 for fr in list(fl):
876 876 rev = fl.linkrev(fr)
877 877 if rev not in cl:
878 878 # changerev pointed in linkrev is filtered
879 879 # record it for post processing.
880 880 delayed.append((fr, rev))
881 881 continue
882 882 for p in fl.parentrevs(fr):
883 883 if 0 <= p and p not in lowestchild:
884 884 lowestchild[p] = fr
885 885 backrevref[fr] = rev
886 886 s.add(rev)
887 887
888 888 # Post-processing of all filerevs we skipped because they were
889 889 # filtered. If such filerevs have known and unfiltered children, this
890 890 # means they have an unfiltered appearance out there. We'll use linkrev
891 891 # adjustment to find one of these appearances. The lowest known child
892 892 # will be used as a starting point because it is the best upper-bound we
893 893 # have.
894 894 #
895 895 # This approach will fail when an unfiltered but linkrev-shadowed
896 896 # appearance exists in a head changeset without unfiltered filerev
897 897 # children anywhere.
898 898 while delayed:
899 899 # must be a descending iteration. To slowly fill lowest child
900 900 # information that is of potential use by the next item.
901 901 fr, rev = delayed.pop()
902 902 lkr = rev
903 903
904 904 child = lowestchild.get(fr)
905 905
906 906 if child is None:
907 907 # search for existence of this file revision in a head revision.
908 908 # There are three possibilities:
909 909 # - the revision exists in a head and we can find an
910 910 # introduction from there,
911 911 # - the revision does not exist in a head because it has been
912 912 # changed since its introduction: we would have found a child
913 913 # and be in the other 'else' clause,
914 914 # - all versions of the revision are hidden.
915 915 if lowesthead is None:
916 916 lowesthead = {}
917 917 for h in repo.heads():
918 918 fnode = repo[h].manifest().get(f)
919 919 if fnode is not None:
920 920 lowesthead[fl.rev(fnode)] = h
921 921 headrev = lowesthead.get(fr)
922 922 if headrev is None:
923 923 # content is nowhere unfiltered
924 924 continue
925 925 rev = repo[headrev][f].introrev()
926 926 else:
927 927 # the lowest known child is a good upper bound
928 928 childcrev = backrevref[child]
929 929 # XXX this does not guarantee returning the lowest
930 930 # introduction of this revision, but this gives a
931 931 # result which is a good start and will fit in most
932 932 # cases. We probably need to fix the multiple
933 933 # introductions case properly (report each
934 934 # introduction, even for identical file revisions)
935 935 # once and for all at some point anyway.
936 936 for p in repo[childcrev][f].parents():
937 937 if p.filerev() == fr:
938 938 rev = p.rev()
939 939 break
940 940 if rev == lkr: # no shadowed entry found
941 941 # XXX This should never happen unless some manifest points
942 942 # to biggish file revisions (like a revision that uses a
943 943 # parent that never appears in the manifest ancestors)
944 944 continue
945 945
946 946 # Fill the data for the next iteration.
947 947 for p in fl.parentrevs(fr):
948 948 if 0 <= p and p not in lowestchild:
949 949 lowestchild[p] = fr
950 950 backrevref[fr] = rev
951 951 s.add(rev)
952 952
953 953 return subset & s
954 954
955 955 def first(repo, subset, x):
956 956 """``first(set, [n])``
957 957 An alias for limit().
958 958 """
959 959 return limit(repo, subset, x)
960 960
961 961 def _follow(repo, subset, x, name, followfirst=False):
962 962 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
963 963 c = repo['.']
964 964 if l:
965 965 x = getstring(l[0], _("%s expected a filename") % name)
966 966 if x in c:
967 967 cx = c[x]
968 968 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
969 969 # include the revision responsible for the most recent version
970 970 s.add(cx.introrev())
971 971 else:
972 972 return baseset()
973 973 else:
974 974 s = _revancestors(repo, baseset([c.rev()]), followfirst)
975 975
976 976 return subset & s
977 977
978 978 def follow(repo, subset, x):
979 979 """``follow([file])``
980 980 An alias for ``::.`` (ancestors of the working directory's first parent).
981 981 If a filename is specified, the history of the given file is followed,
982 982 including copies.
983 983 """
984 984 return _follow(repo, subset, x, 'follow')
985 985
986 986 def _followfirst(repo, subset, x):
987 987 # ``followfirst([file])``
988 988 # Like ``follow([file])`` but follows only the first parent of
989 989 # every revision or file revision.
990 990 return _follow(repo, subset, x, '_followfirst', followfirst=True)
991 991
992 992 def getall(repo, subset, x):
993 993 """``all()``
994 994 All changesets, the same as ``0:tip``.
995 995 """
996 996 # i18n: "all" is a keyword
997 997 getargs(x, 0, 0, _("all takes no arguments"))
998 998 return subset & spanset(repo) # drop "null" if any
999 999
1000 1000 def grep(repo, subset, x):
1001 1001 """``grep(regex)``
1002 1002 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1003 1003 to ensure special escape characters are handled correctly. Unlike
1004 1004 ``keyword(string)``, the match is case-sensitive.
1005 1005 """
1006 1006 try:
1007 1007 # i18n: "grep" is a keyword
1008 1008 gr = re.compile(getstring(x, _("grep requires a string")))
1009 1009 except re.error, e:
1010 1010 raise error.ParseError(_('invalid match pattern: %s') % e)
1011 1011
1012 1012 def matches(x):
1013 1013 c = repo[x]
1014 1014 for e in c.files() + [c.user(), c.description()]:
1015 1015 if gr.search(e):
1016 1016 return True
1017 1017 return False
1018 1018
1019 1019 return subset.filter(matches)
1020 1020
1021 1021 def _matchfiles(repo, subset, x):
1022 1022 # _matchfiles takes a revset list of prefixed arguments:
1023 1023 #
1024 1024 # [p:foo, i:bar, x:baz]
1025 1025 #
1026 1026 # builds a match object from them and filters subset. Allowed
1027 1027 # prefixes are 'p:' for regular patterns, 'i:' for include
1028 1028 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1029 1029 # a revision identifier, or the empty string to reference the
1030 1030 # working directory, from which the match object is
1031 1031 # initialized. Use 'd:' to set the default matching mode, default
1032 1032 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1033 1033
1034 1034 # i18n: "_matchfiles" is a keyword
1035 1035 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1036 1036 pats, inc, exc = [], [], []
1037 1037 rev, default = None, None
1038 1038 for arg in l:
1039 1039 # i18n: "_matchfiles" is a keyword
1040 1040 s = getstring(arg, _("_matchfiles requires string arguments"))
1041 1041 prefix, value = s[:2], s[2:]
1042 1042 if prefix == 'p:':
1043 1043 pats.append(value)
1044 1044 elif prefix == 'i:':
1045 1045 inc.append(value)
1046 1046 elif prefix == 'x:':
1047 1047 exc.append(value)
1048 1048 elif prefix == 'r:':
1049 1049 if rev is not None:
1050 1050 # i18n: "_matchfiles" is a keyword
1051 1051 raise error.ParseError(_('_matchfiles expected at most one '
1052 1052 'revision'))
1053 1053 if value != '': # empty means working directory; leave rev as None
1054 1054 rev = value
1055 1055 elif prefix == 'd:':
1056 1056 if default is not None:
1057 1057 # i18n: "_matchfiles" is a keyword
1058 1058 raise error.ParseError(_('_matchfiles expected at most one '
1059 1059 'default mode'))
1060 1060 default = value
1061 1061 else:
1062 1062 # i18n: "_matchfiles" is a keyword
1063 1063 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1064 1064 if not default:
1065 1065 default = 'glob'
1066 1066
1067 1067 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1068 1068 exclude=exc, ctx=repo[rev], default=default)
1069 1069
1070 1070 def matches(x):
1071 1071 for f in repo[x].files():
1072 1072 if m(f):
1073 1073 return True
1074 1074 return False
1075 1075
1076 1076 return subset.filter(matches)
1077 1077
1078 1078 def hasfile(repo, subset, x):
1079 1079 """``file(pattern)``
1080 1080 Changesets affecting files matched by pattern.
1081 1081
1082 1082 For a faster but less accurate result, consider using ``filelog()``
1083 1083 instead.
1084 1084
1085 1085 This predicate uses ``glob:`` as the default kind of pattern.
1086 1086 """
1087 1087 # i18n: "file" is a keyword
1088 1088 pat = getstring(x, _("file requires a pattern"))
1089 1089 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1090 1090
1091 1091 def head(repo, subset, x):
1092 1092 """``head()``
1093 1093 Changeset is a named branch head.
1094 1094 """
1095 1095 # i18n: "head" is a keyword
1096 1096 getargs(x, 0, 0, _("head takes no arguments"))
1097 1097 hs = set()
1098 1098 for b, ls in repo.branchmap().iteritems():
1099 1099 hs.update(repo[h].rev() for h in ls)
1100 1100 return baseset(hs).filter(subset.__contains__)
1101 1101
1102 1102 def heads(repo, subset, x):
1103 1103 """``heads(set)``
1104 1104 Members of set with no children in set.
1105 1105 """
1106 1106 s = getset(repo, subset, x)
1107 1107 ps = parents(repo, subset, x)
1108 1108 return s - ps
1109 1109
1110 1110 def hidden(repo, subset, x):
1111 1111 """``hidden()``
1112 1112 Hidden changesets.
1113 1113 """
1114 1114 # i18n: "hidden" is a keyword
1115 1115 getargs(x, 0, 0, _("hidden takes no arguments"))
1116 1116 hiddenrevs = repoview.filterrevs(repo, 'visible')
1117 1117 return subset & hiddenrevs
1118 1118
1119 1119 def keyword(repo, subset, x):
1120 1120 """``keyword(string)``
1121 1121 Search commit message, user name, and names of changed files for
1122 1122 string. The match is case-insensitive.
1123 1123 """
1124 1124 # i18n: "keyword" is a keyword
1125 1125 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1126 1126
1127 1127 def matches(r):
1128 1128 c = repo[r]
1129 1129 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1130 1130 c.description()])
1131 1131
1132 1132 return subset.filter(matches)
1133 1133
1134 1134 def limit(repo, subset, x):
1135 1135 """``limit(set, [n])``
1136 1136 First n members of set, defaulting to 1.
1137 1137 """
1138 1138 # i18n: "limit" is a keyword
1139 1139 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1140 1140 try:
1141 1141 lim = 1
1142 1142 if len(l) == 2:
1143 1143 # i18n: "limit" is a keyword
1144 1144 lim = int(getstring(l[1], _("limit requires a number")))
1145 1145 except (TypeError, ValueError):
1146 1146 # i18n: "limit" is a keyword
1147 1147 raise error.ParseError(_("limit expects a number"))
1148 1148 ss = subset
1149 1149 os = getset(repo, fullreposet(repo), l[0])
1150 1150 result = []
1151 1151 it = iter(os)
1152 1152 for x in xrange(lim):
1153 1153 try:
1154 1154 y = it.next()
1155 1155 if y in ss:
1156 1156 result.append(y)
1157 1157 except (StopIteration):
1158 1158 break
1159 1159 return baseset(result)
1160 1160
1161 1161 def last(repo, subset, x):
1162 1162 """``last(set, [n])``
1163 1163 Last n members of set, defaulting to 1.
1164 1164 """
1165 1165 # i18n: "last" is a keyword
1166 1166 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1167 1167 try:
1168 1168 lim = 1
1169 1169 if len(l) == 2:
1170 1170 # i18n: "last" is a keyword
1171 1171 lim = int(getstring(l[1], _("last requires a number")))
1172 1172 except (TypeError, ValueError):
1173 1173 # i18n: "last" is a keyword
1174 1174 raise error.ParseError(_("last expects a number"))
1175 1175 ss = subset
1176 1176 os = getset(repo, fullreposet(repo), l[0])
1177 1177 os.reverse()
1178 1178 result = []
1179 1179 it = iter(os)
1180 1180 for x in xrange(lim):
1181 1181 try:
1182 1182 y = it.next()
1183 1183 if y in ss:
1184 1184 result.append(y)
1185 1185 except (StopIteration):
1186 1186 break
1187 1187 return baseset(result)
1188 1188
1189 1189 def maxrev(repo, subset, x):
1190 1190 """``max(set)``
1191 1191 Changeset with highest revision number in set.
1192 1192 """
1193 1193 os = getset(repo, fullreposet(repo), x)
1194 1194 if os:
1195 1195 m = os.max()
1196 1196 if m in subset:
1197 1197 return baseset([m])
1198 1198 return baseset()
1199 1199
1200 1200 def merge(repo, subset, x):
1201 1201 """``merge()``
1202 1202 Changeset is a merge changeset.
1203 1203 """
1204 1204 # i18n: "merge" is a keyword
1205 1205 getargs(x, 0, 0, _("merge takes no arguments"))
1206 1206 cl = repo.changelog
1207 1207 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1208 1208
1209 1209 def branchpoint(repo, subset, x):
1210 1210 """``branchpoint()``
1211 1211 Changesets with more than one child.
1212 1212 """
1213 1213 # i18n: "branchpoint" is a keyword
1214 1214 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1215 1215 cl = repo.changelog
1216 1216 if not subset:
1217 1217 return baseset()
1218 1218 baserev = min(subset)
1219 1219 parentscount = [0]*(len(repo) - baserev)
1220 1220 for r in cl.revs(start=baserev + 1):
1221 1221 for p in cl.parentrevs(r):
1222 1222 if p >= baserev:
1223 1223 parentscount[p - baserev] += 1
1224 1224 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1225 1225
1226 1226 def minrev(repo, subset, x):
1227 1227 """``min(set)``
1228 1228 Changeset with lowest revision number in set.
1229 1229 """
1230 1230 os = getset(repo, fullreposet(repo), x)
1231 1231 if os:
1232 1232 m = os.min()
1233 1233 if m in subset:
1234 1234 return baseset([m])
1235 1235 return baseset()
1236 1236
1237 1237 def modifies(repo, subset, x):
1238 1238 """``modifies(pattern)``
1239 1239 Changesets modifying files matched by pattern.
1240 1240
1241 1241 The pattern without explicit kind like ``glob:`` is expected to be
1242 1242 relative to the current directory and match against a file or a
1243 1243 directory.
1244 1244 """
1245 1245 # i18n: "modifies" is a keyword
1246 1246 pat = getstring(x, _("modifies requires a pattern"))
1247 1247 return checkstatus(repo, subset, pat, 0)
1248 1248
1249 1249 def named(repo, subset, x):
1250 1250 """``named(namespace)``
1251 1251 The changesets in a given namespace.
1252 1252
1253 1253 If `namespace` starts with `re:`, the remainder of the string is treated as
1254 1254 a regular expression. To match a namespace that actually starts with `re:`,
1255 1255 use the prefix `literal:`.
1256 1256 """
1257 1257 # i18n: "named" is a keyword
1258 1258 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1259 1259
1260 1260 ns = getstring(args[0],
1261 1261 # i18n: "named" is a keyword
1262 1262 _('the argument to named must be a string'))
1263 1263 kind, pattern, matcher = _stringmatcher(ns)
1264 1264 namespaces = set()
1265 1265 if kind == 'literal':
1266 1266 if pattern not in repo.names:
1267 1267 raise error.RepoLookupError(_("namespace '%s' does not exist")
1268 1268 % ns)
1269 1269 namespaces.add(repo.names[pattern])
1270 1270 else:
1271 1271 for name, ns in repo.names.iteritems():
1272 1272 if matcher(name):
1273 1273 namespaces.add(ns)
1274 1274 if not namespaces:
1275 1275 raise error.RepoLookupError(_("no namespace exists"
1276 1276 " that match '%s'") % pattern)
1277 1277
1278 1278 names = set()
1279 1279 for ns in namespaces:
1280 1280 for name in ns.listnames(repo):
1281 1281 if name not in ns.deprecated:
1282 1282 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1283 1283
1284 1284 names -= set([node.nullrev])
1285 1285 return subset & names
1286 1286
1287 1287 def node_(repo, subset, x):
1288 1288 """``id(string)``
1289 1289 Revision non-ambiguously specified by the given hex string prefix.
1290 1290 """
1291 1291 # i18n: "id" is a keyword
1292 1292 l = getargs(x, 1, 1, _("id requires one argument"))
1293 1293 # i18n: "id" is a keyword
1294 1294 n = getstring(l[0], _("id requires a string"))
1295 1295 if len(n) == 40:
1296 1296 rn = repo[n].rev()
1297 1297 else:
1298 1298 rn = None
1299 1299 pm = repo.changelog._partialmatch(n)
1300 1300 if pm is not None:
1301 1301 rn = repo.changelog.rev(pm)
1302 1302
1303 1303 if rn is None:
1304 1304 return baseset()
1305 1305 result = baseset([rn])
1306 1306 return result & subset
1307 1307
1308 1308 def obsolete(repo, subset, x):
1309 1309 """``obsolete()``
1310 1310 Mutable changeset with a newer version."""
1311 1311 # i18n: "obsolete" is a keyword
1312 1312 getargs(x, 0, 0, _("obsolete takes no arguments"))
1313 1313 obsoletes = obsmod.getrevs(repo, 'obsolete')
1314 1314 return subset & obsoletes
1315 1315
1316 1316 def only(repo, subset, x):
1317 1317 """``only(set, [set])``
1318 1318 Changesets that are ancestors of the first set that are not ancestors
1319 1319 of any other head in the repo. If a second set is specified, the result
1320 1320 is ancestors of the first set that are not ancestors of the second set
1321 1321 (i.e. ::<set1> - ::<set2>).
1322 1322 """
1323 1323 cl = repo.changelog
1324 1324 # i18n: "only" is a keyword
1325 1325 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1326 1326 include = getset(repo, fullreposet(repo), args[0])
1327 1327 if len(args) == 1:
1328 1328 if not include:
1329 1329 return baseset()
1330 1330
1331 1331 descendants = set(_revdescendants(repo, include, False))
1332 1332 exclude = [rev for rev in cl.headrevs()
1333 1333 if not rev in descendants and not rev in include]
1334 1334 else:
1335 1335 exclude = getset(repo, fullreposet(repo), args[1])
1336 1336
1337 1337 results = set(cl.findmissingrevs(common=exclude, heads=include))
1338 1338 return subset & results
1339 1339
1340 1340 def origin(repo, subset, x):
1341 1341 """``origin([set])``
1342 1342 Changesets that were specified as a source for the grafts, transplants or
1343 1343 rebases that created the given revisions. Omitting the optional set is the
1344 1344 same as passing all(). If a changeset created by these operations is itself
1345 1345 specified as a source for one of these operations, only the source changeset
1346 1346 for the first operation is selected.
1347 1347 """
1348 1348 if x is not None:
1349 1349 dests = getset(repo, fullreposet(repo), x)
1350 1350 else:
1351 1351 dests = fullreposet(repo)
1352 1352
1353 1353 def _firstsrc(rev):
1354 1354 src = _getrevsource(repo, rev)
1355 1355 if src is None:
1356 1356 return None
1357 1357
1358 1358 while True:
1359 1359 prev = _getrevsource(repo, src)
1360 1360
1361 1361 if prev is None:
1362 1362 return src
1363 1363 src = prev
1364 1364
1365 1365 o = set([_firstsrc(r) for r in dests])
1366 1366 o -= set([None])
1367 1367 return subset & o
1368 1368
1369 1369 def outgoing(repo, subset, x):
1370 1370 """``outgoing([path])``
1371 1371 Changesets not found in the specified destination repository, or the
1372 1372 default push location.
1373 1373 """
1374 1374 import hg # avoid start-up nasties
1375 1375 # i18n: "outgoing" is a keyword
1376 1376 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1377 1377 # i18n: "outgoing" is a keyword
1378 1378 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1379 1379 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1380 1380 dest, branches = hg.parseurl(dest)
1381 1381 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1382 1382 if revs:
1383 1383 revs = [repo.lookup(rev) for rev in revs]
1384 1384 other = hg.peer(repo, {}, dest)
1385 1385 repo.ui.pushbuffer()
1386 1386 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1387 1387 repo.ui.popbuffer()
1388 1388 cl = repo.changelog
1389 1389 o = set([cl.rev(r) for r in outgoing.missing])
1390 1390 return subset & o
1391 1391
1392 1392 def p1(repo, subset, x):
1393 1393 """``p1([set])``
1394 1394 First parent of changesets in set, or the working directory.
1395 1395 """
1396 1396 if x is None:
1397 1397 p = repo[x].p1().rev()
1398 1398 if p >= 0:
1399 1399 return subset & baseset([p])
1400 1400 return baseset()
1401 1401
1402 1402 ps = set()
1403 1403 cl = repo.changelog
1404 1404 for r in getset(repo, fullreposet(repo), x):
1405 1405 ps.add(cl.parentrevs(r)[0])
1406 1406 ps -= set([node.nullrev])
1407 1407 return subset & ps
1408 1408
1409 1409 def p2(repo, subset, x):
1410 1410 """``p2([set])``
1411 1411 Second parent of changesets in set, or the working directory.
1412 1412 """
1413 1413 if x is None:
1414 1414 ps = repo[x].parents()
1415 1415 try:
1416 1416 p = ps[1].rev()
1417 1417 if p >= 0:
1418 1418 return subset & baseset([p])
1419 1419 return baseset()
1420 1420 except IndexError:
1421 1421 return baseset()
1422 1422
1423 1423 ps = set()
1424 1424 cl = repo.changelog
1425 1425 for r in getset(repo, fullreposet(repo), x):
1426 1426 ps.add(cl.parentrevs(r)[1])
1427 1427 ps -= set([node.nullrev])
1428 1428 return subset & ps
1429 1429
1430 1430 def parents(repo, subset, x):
1431 1431 """``parents([set])``
1432 1432 The set of all parents for all changesets in set, or the working directory.
1433 1433 """
1434 1434 if x is None:
1435 1435 ps = set(p.rev() for p in repo[x].parents())
1436 1436 else:
1437 1437 ps = set()
1438 1438 cl = repo.changelog
1439 1439 for r in getset(repo, fullreposet(repo), x):
1440 1440 ps.update(cl.parentrevs(r))
1441 1441 ps -= set([node.nullrev])
1442 1442 return subset & ps
1443 1443
1444 1444 def parentspec(repo, subset, x, n):
1445 1445 """``set^0``
1446 1446 The set.
1447 1447 ``set^1`` (or ``set^``), ``set^2``
1448 1448 First or second parent, respectively, of all changesets in set.
1449 1449 """
1450 1450 try:
1451 1451 n = int(n[1])
1452 1452 if n not in (0, 1, 2):
1453 1453 raise ValueError
1454 1454 except (TypeError, ValueError):
1455 1455 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1456 1456 ps = set()
1457 1457 cl = repo.changelog
1458 1458 for r in getset(repo, fullreposet(repo), x):
1459 1459 if n == 0:
1460 1460 ps.add(r)
1461 1461 elif n == 1:
1462 1462 ps.add(cl.parentrevs(r)[0])
1463 1463 elif n == 2:
1464 1464 parents = cl.parentrevs(r)
1465 1465 if len(parents) > 1:
1466 1466 ps.add(parents[1])
1467 1467 return subset & ps
1468 1468
1469 1469 def present(repo, subset, x):
1470 1470 """``present(set)``
1471 1471 An empty set, if any revision in set isn't found; otherwise,
1472 1472 all revisions in set.
1473 1473
1474 1474 If any of specified revisions is not present in the local repository,
1475 1475 the query is normally aborted. But this predicate allows the query
1476 1476 to continue even in such cases.
1477 1477 """
1478 1478 try:
1479 1479 return getset(repo, subset, x)
1480 1480 except error.RepoLookupError:
1481 1481 return baseset()
1482 1482
1483 1483 def public(repo, subset, x):
1484 1484 """``public()``
1485 1485 Changeset in public phase."""
1486 1486 # i18n: "public" is a keyword
1487 1487 getargs(x, 0, 0, _("public takes no arguments"))
1488 1488 phase = repo._phasecache.phase
1489 1489 target = phases.public
1490 1490 condition = lambda r: phase(repo, r) == target
1491 1491 return subset.filter(condition, cache=False)
1492 1492
1493 1493 def remote(repo, subset, x):
1494 1494 """``remote([id [,path]])``
1495 1495 Local revision that corresponds to the given identifier in a
1496 1496 remote repository, if present. Here, the '.' identifier is a
1497 1497 synonym for the current local branch.
1498 1498 """
1499 1499
1500 1500 import hg # avoid start-up nasties
1501 1501 # i18n: "remote" is a keyword
1502 1502 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1503 1503
1504 1504 q = '.'
1505 1505 if len(l) > 0:
1506 1506 # i18n: "remote" is a keyword
1507 1507 q = getstring(l[0], _("remote requires a string id"))
1508 1508 if q == '.':
1509 1509 q = repo['.'].branch()
1510 1510
1511 1511 dest = ''
1512 1512 if len(l) > 1:
1513 1513 # i18n: "remote" is a keyword
1514 1514 dest = getstring(l[1], _("remote requires a repository path"))
1515 1515 dest = repo.ui.expandpath(dest or 'default')
1516 1516 dest, branches = hg.parseurl(dest)
1517 1517 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1518 1518 if revs:
1519 1519 revs = [repo.lookup(rev) for rev in revs]
1520 1520 other = hg.peer(repo, {}, dest)
1521 1521 n = other.lookup(q)
1522 1522 if n in repo:
1523 1523 r = repo[n].rev()
1524 1524 if r in subset:
1525 1525 return baseset([r])
1526 1526 return baseset()
1527 1527
1528 1528 def removes(repo, subset, x):
1529 1529 """``removes(pattern)``
1530 1530 Changesets which remove files matching pattern.
1531 1531
1532 1532 The pattern without explicit kind like ``glob:`` is expected to be
1533 1533 relative to the current directory and match against a file or a
1534 1534 directory.
1535 1535 """
1536 1536 # i18n: "removes" is a keyword
1537 1537 pat = getstring(x, _("removes requires a pattern"))
1538 1538 return checkstatus(repo, subset, pat, 2)
1539 1539
1540 1540 def rev(repo, subset, x):
1541 1541 """``rev(number)``
1542 1542 Revision with the given numeric identifier.
1543 1543 """
1544 1544 # i18n: "rev" is a keyword
1545 1545 l = getargs(x, 1, 1, _("rev requires one argument"))
1546 1546 try:
1547 1547 # i18n: "rev" is a keyword
1548 1548 l = int(getstring(l[0], _("rev requires a number")))
1549 1549 except (TypeError, ValueError):
1550 1550 # i18n: "rev" is a keyword
1551 1551 raise error.ParseError(_("rev expects a number"))
1552 1552 if l not in repo.changelog and l != node.nullrev:
1553 1553 return baseset()
1554 1554 return subset & baseset([l])
1555 1555
1556 1556 def matching(repo, subset, x):
1557 1557 """``matching(revision [, field])``
1558 1558 Changesets in which a given set of fields match the set of fields in the
1559 1559 selected revision or set.
1560 1560
1561 1561 To match more than one field pass the list of fields to match separated
1562 1562 by spaces (e.g. ``author description``).
1563 1563
1564 1564 Valid fields are most regular revision fields and some special fields.
1565 1565
1566 1566 Regular revision fields are ``description``, ``author``, ``branch``,
1567 1567 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1568 1568 and ``diff``.
1569 1569 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1570 1570 contents of the revision. Two revisions matching their ``diff`` will
1571 1571 also match their ``files``.
1572 1572
1573 1573 Special fields are ``summary`` and ``metadata``:
1574 1574 ``summary`` matches the first line of the description.
1575 1575 ``metadata`` is equivalent to matching ``description user date``
1576 1576 (i.e. it matches the main metadata fields).
1577 1577
1578 1578 ``metadata`` is the default field which is used when no fields are
1579 1579 specified. You can match more than one field at a time.
1580 1580 """
1581 1581 # i18n: "matching" is a keyword
1582 1582 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1583 1583
1584 1584 revs = getset(repo, fullreposet(repo), l[0])
1585 1585
1586 1586 fieldlist = ['metadata']
1587 1587 if len(l) > 1:
1588 1588 fieldlist = getstring(l[1],
1589 1589 # i18n: "matching" is a keyword
1590 1590 _("matching requires a string "
1591 1591 "as its second argument")).split()
1592 1592
1593 1593 # Make sure that there are no repeated fields,
1594 1594 # expand the 'special' 'metadata' field type
1595 1595 # and check the 'files' whenever we check the 'diff'
1596 1596 fields = []
1597 1597 for field in fieldlist:
1598 1598 if field == 'metadata':
1599 1599 fields += ['user', 'description', 'date']
1600 1600 elif field == 'diff':
1601 1601 # a revision matching the diff must also match the files
1602 1602 # since matching the diff is very costly, make sure to
1603 1603 # also match the files first
1604 1604 fields += ['files', 'diff']
1605 1605 else:
1606 1606 if field == 'author':
1607 1607 field = 'user'
1608 1608 fields.append(field)
1609 1609 fields = set(fields)
1610 1610 if 'summary' in fields and 'description' in fields:
1611 1611 # If a revision matches its description it also matches its summary
1612 1612 fields.discard('summary')
1613 1613
1614 1614 # We may want to match more than one field
1615 1615 # Not all fields take the same amount of time to be matched
1616 1616 # Sort the selected fields in order of increasing matching cost
1617 1617 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1618 1618 'files', 'description', 'substate', 'diff']
1619 1619 def fieldkeyfunc(f):
1620 1620 try:
1621 1621 return fieldorder.index(f)
1622 1622 except ValueError:
1623 1623 # assume an unknown field is very costly
1624 1624 return len(fieldorder)
1625 1625 fields = list(fields)
1626 1626 fields.sort(key=fieldkeyfunc)
1627 1627
1628 1628 # Each field will be matched with its own "getfield" function
1629 1629 # which will be added to the getfieldfuncs array of functions
1630 1630 getfieldfuncs = []
1631 1631 _funcs = {
1632 1632 'user': lambda r: repo[r].user(),
1633 1633 'branch': lambda r: repo[r].branch(),
1634 1634 'date': lambda r: repo[r].date(),
1635 1635 'description': lambda r: repo[r].description(),
1636 1636 'files': lambda r: repo[r].files(),
1637 1637 'parents': lambda r: repo[r].parents(),
1638 1638 'phase': lambda r: repo[r].phase(),
1639 1639 'substate': lambda r: repo[r].substate,
1640 1640 'summary': lambda r: repo[r].description().splitlines()[0],
1641 1641 'diff': lambda r: list(repo[r].diff(git=True),)
1642 1642 }
1643 1643 for info in fields:
1644 1644 getfield = _funcs.get(info, None)
1645 1645 if getfield is None:
1646 1646 raise error.ParseError(
1647 1647 # i18n: "matching" is a keyword
1648 1648 _("unexpected field name passed to matching: %s") % info)
1649 1649 getfieldfuncs.append(getfield)
1650 1650 # convert the getfield array of functions into a "getinfo" function
1651 1651 # which returns an array of field values (or a single value if there
1652 1652 # is only one field to match)
1653 1653 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1654 1654
1655 1655 def matches(x):
1656 1656 for rev in revs:
1657 1657 target = getinfo(rev)
1658 1658 match = True
1659 1659 for n, f in enumerate(getfieldfuncs):
1660 1660 if target[n] != f(x):
1661 1661 match = False
1662 1662 if match:
1663 1663 return True
1664 1664 return False
1665 1665
1666 1666 return subset.filter(matches)
1667 1667
1668 1668 def reverse(repo, subset, x):
1669 1669 """``reverse(set)``
1670 1670 Reverse order of set.
1671 1671 """
1672 1672 l = getset(repo, subset, x)
1673 1673 l.reverse()
1674 1674 return l
1675 1675
1676 1676 def roots(repo, subset, x):
1677 1677 """``roots(set)``
1678 1678 Changesets in set with no parent changeset in set.
1679 1679 """
1680 1680 s = getset(repo, fullreposet(repo), x)
1681 1681 subset = baseset([r for r in s if r in subset])
1682 1682 cs = _children(repo, subset, s)
1683 1683 return subset - cs
1684 1684
1685 1685 def secret(repo, subset, x):
1686 1686 """``secret()``
1687 1687 Changeset in secret phase."""
1688 1688 # i18n: "secret" is a keyword
1689 1689 getargs(x, 0, 0, _("secret takes no arguments"))
1690 1690 phase = repo._phasecache.phase
1691 1691 target = phases.secret
1692 1692 condition = lambda r: phase(repo, r) == target
1693 1693 return subset.filter(condition, cache=False)
1694 1694
1695 1695 def sort(repo, subset, x):
1696 1696 """``sort(set[, [-]key...])``
1697 1697 Sort set by keys. The default sort order is ascending, specify a key
1698 1698 as ``-key`` to sort in descending order.
1699 1699
1700 1700 The keys can be:
1701 1701
1702 1702 - ``rev`` for the revision number,
1703 1703 - ``branch`` for the branch name,
1704 1704 - ``desc`` for the commit message (description),
1705 1705 - ``user`` for user name (``author`` can be used as an alias),
1706 1706 - ``date`` for the commit date
1707 1707 """
1708 1708 # i18n: "sort" is a keyword
1709 1709 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1710 1710 keys = "rev"
1711 1711 if len(l) == 2:
1712 1712 # i18n: "sort" is a keyword
1713 1713 keys = getstring(l[1], _("sort spec must be a string"))
1714 1714
1715 1715 s = l[0]
1716 1716 keys = keys.split()
1717 1717 l = []
1718 1718 def invert(s):
1719 1719 return "".join(chr(255 - ord(c)) for c in s)
1720 1720 revs = getset(repo, subset, s)
1721 1721 if keys == ["rev"]:
1722 1722 revs.sort()
1723 1723 return revs
1724 1724 elif keys == ["-rev"]:
1725 1725 revs.sort(reverse=True)
1726 1726 return revs
1727 1727 for r in revs:
1728 1728 c = repo[r]
1729 1729 e = []
1730 1730 for k in keys:
1731 1731 if k == 'rev':
1732 1732 e.append(r)
1733 1733 elif k == '-rev':
1734 1734 e.append(-r)
1735 1735 elif k == 'branch':
1736 1736 e.append(c.branch())
1737 1737 elif k == '-branch':
1738 1738 e.append(invert(c.branch()))
1739 1739 elif k == 'desc':
1740 1740 e.append(c.description())
1741 1741 elif k == '-desc':
1742 1742 e.append(invert(c.description()))
1743 1743 elif k in 'user author':
1744 1744 e.append(c.user())
1745 1745 elif k in '-user -author':
1746 1746 e.append(invert(c.user()))
1747 1747 elif k == 'date':
1748 1748 e.append(c.date()[0])
1749 1749 elif k == '-date':
1750 1750 e.append(-c.date()[0])
1751 1751 else:
1752 1752 raise error.ParseError(_("unknown sort key %r") % k)
1753 1753 e.append(r)
1754 1754 l.append(e)
1755 1755 l.sort()
1756 1756 return baseset([e[-1] for e in l])
1757 1757
1758 def subrepo(repo, subset, x):
1759 """``subrepo([pattern])``
1760 Changesets that add, modify or remove the given subrepo. If no subrepo
1761 pattern is named, any subrepo changes are returned.
1762 """
1763 # i18n: "subrepo" is a keyword
1764 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1765 if len(args) != 0:
1766 pat = getstring(args[0], _("subrepo requires a pattern"))
1767
1768 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1769
1770 def submatches(names):
1771 k, p, m = _stringmatcher(pat)
1772 for name in names:
1773 if m(name):
1774 yield name
1775
1776 def matches(x):
1777 c = repo[x]
1778 s = repo.status(c.p1().node(), c.node(), match=m)
1779
1780 if len(args) == 0:
1781 return s.added or s.modified or s.removed
1782
1783 if s.added:
1784 return util.any(submatches(c.substate.keys()))
1785
1786 if s.modified:
1787 subs = set(c.p1().substate.keys())
1788 subs.update(c.substate.keys())
1789
1790 for path in submatches(subs):
1791 if c.p1().substate.get(path) != c.substate.get(path):
1792 return True
1793
1794 if s.removed:
1795 return util.any(submatches(c.p1().substate.keys()))
1796
1797 return False
1798
1799 return subset.filter(matches)
1800
1758 1801 def _stringmatcher(pattern):
1759 1802 """
1760 1803 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1761 1804 returns the matcher name, pattern, and matcher function.
1762 1805 missing or unknown prefixes are treated as literal matches.
1763 1806
1764 1807 helper for tests:
1765 1808 >>> def test(pattern, *tests):
1766 1809 ... kind, pattern, matcher = _stringmatcher(pattern)
1767 1810 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1768 1811
1769 1812 exact matching (no prefix):
1770 1813 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1771 1814 ('literal', 'abcdefg', [False, False, True])
1772 1815
1773 1816 regex matching ('re:' prefix)
1774 1817 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1775 1818 ('re', 'a.+b', [False, False, True])
1776 1819
1777 1820 force exact matches ('literal:' prefix)
1778 1821 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1779 1822 ('literal', 're:foobar', [False, True])
1780 1823
1781 1824 unknown prefixes are ignored and treated as literals
1782 1825 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1783 1826 ('literal', 'foo:bar', [False, False, True])
1784 1827 """
1785 1828 if pattern.startswith('re:'):
1786 1829 pattern = pattern[3:]
1787 1830 try:
1788 1831 regex = re.compile(pattern)
1789 1832 except re.error, e:
1790 1833 raise error.ParseError(_('invalid regular expression: %s')
1791 1834 % e)
1792 1835 return 're', pattern, regex.search
1793 1836 elif pattern.startswith('literal:'):
1794 1837 pattern = pattern[8:]
1795 1838 return 'literal', pattern, pattern.__eq__
1796 1839
1797 1840 def _substringmatcher(pattern):
1798 1841 kind, pattern, matcher = _stringmatcher(pattern)
1799 1842 if kind == 'literal':
1800 1843 matcher = lambda s: pattern in s
1801 1844 return kind, pattern, matcher
1802 1845
1803 1846 def tag(repo, subset, x):
1804 1847 """``tag([name])``
1805 1848 The specified tag by name, or all tagged revisions if no name is given.
1806 1849
1807 1850 If `name` starts with `re:`, the remainder of the name is treated as
1808 1851 a regular expression. To match a tag that actually starts with `re:`,
1809 1852 use the prefix `literal:`.
1810 1853 """
1811 1854 # i18n: "tag" is a keyword
1812 1855 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1813 1856 cl = repo.changelog
1814 1857 if args:
1815 1858 pattern = getstring(args[0],
1816 1859 # i18n: "tag" is a keyword
1817 1860 _('the argument to tag must be a string'))
1818 1861 kind, pattern, matcher = _stringmatcher(pattern)
1819 1862 if kind == 'literal':
1820 1863 # avoid resolving all tags
1821 1864 tn = repo._tagscache.tags.get(pattern, None)
1822 1865 if tn is None:
1823 1866 raise error.RepoLookupError(_("tag '%s' does not exist")
1824 1867 % pattern)
1825 1868 s = set([repo[tn].rev()])
1826 1869 else:
1827 1870 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1828 1871 else:
1829 1872 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1830 1873 return subset & s
1831 1874
1832 1875 def tagged(repo, subset, x):
1833 1876 return tag(repo, subset, x)
1834 1877
1835 1878 def unstable(repo, subset, x):
1836 1879 """``unstable()``
1837 1880 Non-obsolete changesets with obsolete ancestors.
1838 1881 """
1839 1882 # i18n: "unstable" is a keyword
1840 1883 getargs(x, 0, 0, _("unstable takes no arguments"))
1841 1884 unstables = obsmod.getrevs(repo, 'unstable')
1842 1885 return subset & unstables
1843 1886
1844 1887
1845 1888 def user(repo, subset, x):
1846 1889 """``user(string)``
1847 1890 User name contains string. The match is case-insensitive.
1848 1891
1849 1892 If `string` starts with `re:`, the remainder of the string is treated as
1850 1893 a regular expression. To match a user that actually contains `re:`, use
1851 1894 the prefix `literal:`.
1852 1895 """
1853 1896 return author(repo, subset, x)
1854 1897
1855 1898 def wdir(repo, subset, x):
1856 1899 """``wdir()``
1857 1900 Working directory.
1858 1901 """
1859 1902 # i18n: "wdir" is a keyword
1860 1903 getargs(x, 0, 0, _("wdir takes no arguments"))
1861 1904 if None in subset:
1862 1905 return baseset([None])
1863 1906 return baseset()
1864 1907
1865 1908 # for internal use
1866 1909 def _list(repo, subset, x):
1867 1910 s = getstring(x, "internal error")
1868 1911 if not s:
1869 1912 return baseset()
1870 1913 ls = [repo[r].rev() for r in s.split('\0')]
1871 1914 s = subset
1872 1915 return baseset([r for r in ls if r in s])
1873 1916
1874 1917 # for internal use
1875 1918 def _intlist(repo, subset, x):
1876 1919 s = getstring(x, "internal error")
1877 1920 if not s:
1878 1921 return baseset()
1879 1922 ls = [int(r) for r in s.split('\0')]
1880 1923 s = subset
1881 1924 return baseset([r for r in ls if r in s])
1882 1925
1883 1926 # for internal use
1884 1927 def _hexlist(repo, subset, x):
1885 1928 s = getstring(x, "internal error")
1886 1929 if not s:
1887 1930 return baseset()
1888 1931 cl = repo.changelog
1889 1932 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1890 1933 s = subset
1891 1934 return baseset([r for r in ls if r in s])
1892 1935
1893 1936 symbols = {
1894 1937 "adds": adds,
1895 1938 "all": getall,
1896 1939 "ancestor": ancestor,
1897 1940 "ancestors": ancestors,
1898 1941 "_firstancestors": _firstancestors,
1899 1942 "author": author,
1900 1943 "bisect": bisect,
1901 1944 "bisected": bisected,
1902 1945 "bookmark": bookmark,
1903 1946 "branch": branch,
1904 1947 "branchpoint": branchpoint,
1905 1948 "bumped": bumped,
1906 1949 "bundle": bundle,
1907 1950 "children": children,
1908 1951 "closed": closed,
1909 1952 "contains": contains,
1910 1953 "converted": converted,
1911 1954 "date": date,
1912 1955 "desc": desc,
1913 1956 "descendants": descendants,
1914 1957 "_firstdescendants": _firstdescendants,
1915 1958 "destination": destination,
1916 1959 "divergent": divergent,
1917 1960 "draft": draft,
1918 1961 "extinct": extinct,
1919 1962 "extra": extra,
1920 1963 "file": hasfile,
1921 1964 "filelog": filelog,
1922 1965 "first": first,
1923 1966 "follow": follow,
1924 1967 "_followfirst": _followfirst,
1925 1968 "grep": grep,
1926 1969 "head": head,
1927 1970 "heads": heads,
1928 1971 "hidden": hidden,
1929 1972 "id": node_,
1930 1973 "keyword": keyword,
1931 1974 "last": last,
1932 1975 "limit": limit,
1933 1976 "_matchfiles": _matchfiles,
1934 1977 "max": maxrev,
1935 1978 "merge": merge,
1936 1979 "min": minrev,
1937 1980 "modifies": modifies,
1938 1981 "named": named,
1939 1982 "obsolete": obsolete,
1940 1983 "only": only,
1941 1984 "origin": origin,
1942 1985 "outgoing": outgoing,
1943 1986 "p1": p1,
1944 1987 "p2": p2,
1945 1988 "parents": parents,
1946 1989 "present": present,
1947 1990 "public": public,
1948 1991 "remote": remote,
1949 1992 "removes": removes,
1950 1993 "rev": rev,
1951 1994 "reverse": reverse,
1952 1995 "roots": roots,
1953 1996 "sort": sort,
1954 1997 "secret": secret,
1998 "subrepo": subrepo,
1955 1999 "matching": matching,
1956 2000 "tag": tag,
1957 2001 "tagged": tagged,
1958 2002 "user": user,
1959 2003 "unstable": unstable,
1960 2004 "wdir": wdir,
1961 2005 "_list": _list,
1962 2006 "_intlist": _intlist,
1963 2007 "_hexlist": _hexlist,
1964 2008 }
1965 2009
1966 2010 # symbols which can't be used for a DoS attack for any given input
1967 2011 # (e.g. those which accept regexes as plain strings shouldn't be included)
1968 2012 # functions that just return a lot of changesets (like all) don't count here
1969 2013 safesymbols = set([
1970 2014 "adds",
1971 2015 "all",
1972 2016 "ancestor",
1973 2017 "ancestors",
1974 2018 "_firstancestors",
1975 2019 "author",
1976 2020 "bisect",
1977 2021 "bisected",
1978 2022 "bookmark",
1979 2023 "branch",
1980 2024 "branchpoint",
1981 2025 "bumped",
1982 2026 "bundle",
1983 2027 "children",
1984 2028 "closed",
1985 2029 "converted",
1986 2030 "date",
1987 2031 "desc",
1988 2032 "descendants",
1989 2033 "_firstdescendants",
1990 2034 "destination",
1991 2035 "divergent",
1992 2036 "draft",
1993 2037 "extinct",
1994 2038 "extra",
1995 2039 "file",
1996 2040 "filelog",
1997 2041 "first",
1998 2042 "follow",
1999 2043 "_followfirst",
2000 2044 "head",
2001 2045 "heads",
2002 2046 "hidden",
2003 2047 "id",
2004 2048 "keyword",
2005 2049 "last",
2006 2050 "limit",
2007 2051 "_matchfiles",
2008 2052 "max",
2009 2053 "merge",
2010 2054 "min",
2011 2055 "modifies",
2012 2056 "obsolete",
2013 2057 "only",
2014 2058 "origin",
2015 2059 "outgoing",
2016 2060 "p1",
2017 2061 "p2",
2018 2062 "parents",
2019 2063 "present",
2020 2064 "public",
2021 2065 "remote",
2022 2066 "removes",
2023 2067 "rev",
2024 2068 "reverse",
2025 2069 "roots",
2026 2070 "sort",
2027 2071 "secret",
2028 2072 "matching",
2029 2073 "tag",
2030 2074 "tagged",
2031 2075 "user",
2032 2076 "unstable",
2033 2077 "wdir",
2034 2078 "_list",
2035 2079 "_intlist",
2036 2080 "_hexlist",
2037 2081 ])
2038 2082
2039 2083 methods = {
2040 2084 "range": rangeset,
2041 2085 "dagrange": dagrange,
2042 2086 "string": stringset,
2043 2087 "symbol": symbolset,
2044 2088 "and": andset,
2045 2089 "or": orset,
2046 2090 "not": notset,
2047 2091 "list": listset,
2048 2092 "func": func,
2049 2093 "ancestor": ancestorspec,
2050 2094 "parent": parentspec,
2051 2095 "parentpost": p1,
2052 2096 "only": only,
2053 2097 "onlypost": only,
2054 2098 }
2055 2099
2056 2100 def optimize(x, small):
2057 2101 if x is None:
2058 2102 return 0, x
2059 2103
2060 2104 smallbonus = 1
2061 2105 if small:
2062 2106 smallbonus = .5
2063 2107
2064 2108 op = x[0]
2065 2109 if op == 'minus':
2066 2110 return optimize(('and', x[1], ('not', x[2])), small)
2067 2111 elif op == 'only':
2068 2112 return optimize(('func', ('symbol', 'only'),
2069 2113 ('list', x[1], x[2])), small)
2070 2114 elif op == 'dagrangepre':
2071 2115 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2072 2116 elif op == 'dagrangepost':
2073 2117 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2074 2118 elif op == 'rangepre':
2075 2119 return optimize(('range', ('string', '0'), x[1]), small)
2076 2120 elif op == 'rangepost':
2077 2121 return optimize(('range', x[1], ('string', 'tip')), small)
2078 2122 elif op == 'negate':
2079 2123 return optimize(('string',
2080 2124 '-' + getstring(x[1], _("can't negate that"))), small)
2081 2125 elif op in 'string symbol negate':
2082 2126 return smallbonus, x # single revisions are small
2083 2127 elif op == 'and':
2084 2128 wa, ta = optimize(x[1], True)
2085 2129 wb, tb = optimize(x[2], True)
2086 2130
2087 2131 # (::x and not ::y)/(not ::y and ::x) have a fast path
2088 2132 def isonly(revs, bases):
2089 2133 return (
2090 2134 revs[0] == 'func'
2091 2135 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2092 2136 and bases[0] == 'not'
2093 2137 and bases[1][0] == 'func'
2094 2138 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2095 2139
2096 2140 w = min(wa, wb)
2097 2141 if isonly(ta, tb):
2098 2142 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2099 2143 if isonly(tb, ta):
2100 2144 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2101 2145
2102 2146 if wa > wb:
2103 2147 return w, (op, tb, ta)
2104 2148 return w, (op, ta, tb)
2105 2149 elif op == 'or':
2106 2150 wa, ta = optimize(x[1], False)
2107 2151 wb, tb = optimize(x[2], False)
2108 2152 if wb < wa:
2109 2153 wb, wa = wa, wb
2110 2154 return max(wa, wb), (op, ta, tb)
2111 2155 elif op == 'not':
2112 2156 o = optimize(x[1], not small)
2113 2157 return o[0], (op, o[1])
2114 2158 elif op == 'parentpost':
2115 2159 o = optimize(x[1], small)
2116 2160 return o[0], (op, o[1])
2117 2161 elif op == 'group':
2118 2162 return optimize(x[1], small)
2119 2163 elif op in 'dagrange range list parent ancestorspec':
2120 2164 if op == 'parent':
2121 2165 # x^:y means (x^) : y, not x ^ (:y)
2122 2166 post = ('parentpost', x[1])
2123 2167 if x[2][0] == 'dagrangepre':
2124 2168 return optimize(('dagrange', post, x[2][1]), small)
2125 2169 elif x[2][0] == 'rangepre':
2126 2170 return optimize(('range', post, x[2][1]), small)
2127 2171
2128 2172 wa, ta = optimize(x[1], small)
2129 2173 wb, tb = optimize(x[2], small)
2130 2174 return wa + wb, (op, ta, tb)
2131 2175 elif op == 'func':
2132 2176 f = getstring(x[1], _("not a symbol"))
2133 2177 wa, ta = optimize(x[2], small)
2134 2178 if f in ("author branch closed date desc file grep keyword "
2135 2179 "outgoing user"):
2136 2180 w = 10 # slow
2137 2181 elif f in "modifies adds removes":
2138 2182 w = 30 # slower
2139 2183 elif f == "contains":
2140 2184 w = 100 # very slow
2141 2185 elif f == "ancestor":
2142 2186 w = 1 * smallbonus
2143 2187 elif f in "reverse limit first _intlist":
2144 2188 w = 0
2145 2189 elif f in "sort":
2146 2190 w = 10 # assume most sorts look at changelog
2147 2191 else:
2148 2192 w = 1
2149 2193 return w + wa, (op, x[1], ta)
2150 2194 return 1, x
2151 2195
2152 2196 _aliasarg = ('func', ('symbol', '_aliasarg'))
2153 2197 def _getaliasarg(tree):
2154 2198 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2155 2199 return X, None otherwise.
2156 2200 """
2157 2201 if (len(tree) == 3 and tree[:2] == _aliasarg
2158 2202 and tree[2][0] == 'string'):
2159 2203 return tree[2][1]
2160 2204 return None
2161 2205
2162 2206 def _checkaliasarg(tree, known=None):
2163 2207 """Check tree contains no _aliasarg construct or only ones which
2164 2208 value is in known. Used to avoid alias placeholders injection.
2165 2209 """
2166 2210 if isinstance(tree, tuple):
2167 2211 arg = _getaliasarg(tree)
2168 2212 if arg is not None and (not known or arg not in known):
2169 2213 raise error.UnknownIdentifier('_aliasarg', [])
2170 2214 for t in tree:
2171 2215 _checkaliasarg(t, known)
2172 2216
2173 2217 # the set of valid characters for the initial letter of symbols in
2174 2218 # alias declarations and definitions
2175 2219 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2176 2220 if c.isalnum() or c in '._@$' or ord(c) > 127)
2177 2221
2178 2222 def _tokenizealias(program, lookup=None):
2179 2223 """Parse alias declaration/definition into a stream of tokens
2180 2224
2181 2225 This allows symbol names to use also ``$`` as an initial letter
2182 2226 (for backward compatibility), and callers of this function should
2183 2227 examine whether ``$`` is used also for unexpected symbols or not.
2184 2228 """
2185 2229 return tokenize(program, lookup=lookup,
2186 2230 syminitletters=_aliassyminitletters)
2187 2231
2188 2232 def _parsealiasdecl(decl):
2189 2233 """Parse alias declaration ``decl``
2190 2234
2191 2235 This returns ``(name, tree, args, errorstr)`` tuple:
2192 2236
2193 2237 - ``name``: of declared alias (may be ``decl`` itself at error)
2194 2238 - ``tree``: parse result (or ``None`` at error)
2195 2239 - ``args``: list of alias argument names (or None for symbol declaration)
2196 2240 - ``errorstr``: detail about detected error (or None)
2197 2241
2198 2242 >>> _parsealiasdecl('foo')
2199 2243 ('foo', ('symbol', 'foo'), None, None)
2200 2244 >>> _parsealiasdecl('$foo')
2201 2245 ('$foo', None, None, "'$' not for alias arguments")
2202 2246 >>> _parsealiasdecl('foo::bar')
2203 2247 ('foo::bar', None, None, 'invalid format')
2204 2248 >>> _parsealiasdecl('foo bar')
2205 2249 ('foo bar', None, None, 'at 4: invalid token')
2206 2250 >>> _parsealiasdecl('foo()')
2207 2251 ('foo', ('func', ('symbol', 'foo')), [], None)
2208 2252 >>> _parsealiasdecl('$foo()')
2209 2253 ('$foo()', None, None, "'$' not for alias arguments")
2210 2254 >>> _parsealiasdecl('foo($1, $2)')
2211 2255 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2212 2256 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2213 2257 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2214 2258 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2215 2259 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2216 2260 >>> _parsealiasdecl('foo(bar($1, $2))')
2217 2261 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2218 2262 >>> _parsealiasdecl('foo("string")')
2219 2263 ('foo("string")', None, None, 'invalid argument list')
2220 2264 >>> _parsealiasdecl('foo($1, $2')
2221 2265 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2222 2266 >>> _parsealiasdecl('foo("string')
2223 2267 ('foo("string', None, None, 'at 5: unterminated string')
2224 2268 >>> _parsealiasdecl('foo($1, $2, $1)')
2225 2269 ('foo', None, None, 'argument names collide with each other')
2226 2270 """
2227 2271 p = parser.parser(_tokenizealias, elements)
2228 2272 try:
2229 2273 tree, pos = p.parse(decl)
2230 2274 if (pos != len(decl)):
2231 2275 raise error.ParseError(_('invalid token'), pos)
2232 2276
2233 2277 if isvalidsymbol(tree):
2234 2278 # "name = ...." style
2235 2279 name = getsymbol(tree)
2236 2280 if name.startswith('$'):
2237 2281 return (decl, None, None, _("'$' not for alias arguments"))
2238 2282 return (name, ('symbol', name), None, None)
2239 2283
2240 2284 if isvalidfunc(tree):
2241 2285 # "name(arg, ....) = ...." style
2242 2286 name = getfuncname(tree)
2243 2287 if name.startswith('$'):
2244 2288 return (decl, None, None, _("'$' not for alias arguments"))
2245 2289 args = []
2246 2290 for arg in getfuncargs(tree):
2247 2291 if not isvalidsymbol(arg):
2248 2292 return (decl, None, None, _("invalid argument list"))
2249 2293 args.append(getsymbol(arg))
2250 2294 if len(args) != len(set(args)):
2251 2295 return (name, None, None,
2252 2296 _("argument names collide with each other"))
2253 2297 return (name, ('func', ('symbol', name)), args, None)
2254 2298
2255 2299 return (decl, None, None, _("invalid format"))
2256 2300 except error.ParseError, inst:
2257 2301 return (decl, None, None, parseerrordetail(inst))
2258 2302
2259 2303 def _parsealiasdefn(defn, args):
2260 2304 """Parse alias definition ``defn``
2261 2305
2262 2306 This function also replaces alias argument references in the
2263 2307 specified definition by ``_aliasarg(ARGNAME)``.
2264 2308
2265 2309 ``args`` is a list of alias argument names, or None if the alias
2266 2310 is declared as a symbol.
2267 2311
2268 2312 This returns "tree" as parsing result.
2269 2313
2270 2314 >>> args = ['$1', '$2', 'foo']
2271 2315 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2272 2316 (or
2273 2317 (func
2274 2318 ('symbol', '_aliasarg')
2275 2319 ('string', '$1'))
2276 2320 (func
2277 2321 ('symbol', '_aliasarg')
2278 2322 ('string', 'foo')))
2279 2323 >>> try:
2280 2324 ... _parsealiasdefn('$1 or $bar', args)
2281 2325 ... except error.ParseError, inst:
2282 2326 ... print parseerrordetail(inst)
2283 2327 at 6: '$' not for alias arguments
2284 2328 >>> args = ['$1', '$10', 'foo']
2285 2329 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2286 2330 (or
2287 2331 (func
2288 2332 ('symbol', '_aliasarg')
2289 2333 ('string', '$10'))
2290 2334 ('symbol', 'foobar'))
2291 2335 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2292 2336 (or
2293 2337 ('string', '$1')
2294 2338 ('string', 'foo'))
2295 2339 """
2296 2340 def tokenizedefn(program, lookup=None):
2297 2341 if args:
2298 2342 argset = set(args)
2299 2343 else:
2300 2344 argset = set()
2301 2345
2302 2346 for t, value, pos in _tokenizealias(program, lookup=lookup):
2303 2347 if t == 'symbol':
2304 2348 if value in argset:
2305 2349 # emulate tokenization of "_aliasarg('ARGNAME')":
2306 2350 # "_aliasarg()" is an unknown symbol only used separate
2307 2351 # alias argument placeholders from regular strings.
2308 2352 yield ('symbol', '_aliasarg', pos)
2309 2353 yield ('(', None, pos)
2310 2354 yield ('string', value, pos)
2311 2355 yield (')', None, pos)
2312 2356 continue
2313 2357 elif value.startswith('$'):
2314 2358 raise error.ParseError(_("'$' not for alias arguments"),
2315 2359 pos)
2316 2360 yield (t, value, pos)
2317 2361
2318 2362 p = parser.parser(tokenizedefn, elements)
2319 2363 tree, pos = p.parse(defn)
2320 2364 if pos != len(defn):
2321 2365 raise error.ParseError(_('invalid token'), pos)
2322 2366 return tree
2323 2367
2324 2368 class revsetalias(object):
2325 2369 # whether own `error` information is already shown or not.
2326 2370 # this avoids showing same warning multiple times at each `findaliases`.
2327 2371 warned = False
2328 2372
2329 2373 def __init__(self, name, value):
2330 2374 '''Aliases like:
2331 2375
2332 2376 h = heads(default)
2333 2377 b($1) = ancestors($1) - ancestors(default)
2334 2378 '''
2335 2379 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2336 2380 if self.error:
2337 2381 self.error = _('failed to parse the declaration of revset alias'
2338 2382 ' "%s": %s') % (self.name, self.error)
2339 2383 return
2340 2384
2341 2385 try:
2342 2386 self.replacement = _parsealiasdefn(value, self.args)
2343 2387 # Check for placeholder injection
2344 2388 _checkaliasarg(self.replacement, self.args)
2345 2389 except error.ParseError, inst:
2346 2390 self.error = _('failed to parse the definition of revset alias'
2347 2391 ' "%s": %s') % (self.name, parseerrordetail(inst))
2348 2392
2349 2393 def _getalias(aliases, tree):
2350 2394 """If tree looks like an unexpanded alias, return it. Return None
2351 2395 otherwise.
2352 2396 """
2353 2397 if isinstance(tree, tuple) and tree:
2354 2398 if tree[0] == 'symbol' and len(tree) == 2:
2355 2399 name = tree[1]
2356 2400 alias = aliases.get(name)
2357 2401 if alias and alias.args is None and alias.tree == tree:
2358 2402 return alias
2359 2403 if tree[0] == 'func' and len(tree) > 1:
2360 2404 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2361 2405 name = tree[1][1]
2362 2406 alias = aliases.get(name)
2363 2407 if alias and alias.args is not None and alias.tree == tree[:2]:
2364 2408 return alias
2365 2409 return None
2366 2410
2367 2411 def _expandargs(tree, args):
2368 2412 """Replace _aliasarg instances with the substitution value of the
2369 2413 same name in args, recursively.
2370 2414 """
2371 2415 if not tree or not isinstance(tree, tuple):
2372 2416 return tree
2373 2417 arg = _getaliasarg(tree)
2374 2418 if arg is not None:
2375 2419 return args[arg]
2376 2420 return tuple(_expandargs(t, args) for t in tree)
2377 2421
2378 2422 def _expandaliases(aliases, tree, expanding, cache):
2379 2423 """Expand aliases in tree, recursively.
2380 2424
2381 2425 'aliases' is a dictionary mapping user defined aliases to
2382 2426 revsetalias objects.
2383 2427 """
2384 2428 if not isinstance(tree, tuple):
2385 2429 # Do not expand raw strings
2386 2430 return tree
2387 2431 alias = _getalias(aliases, tree)
2388 2432 if alias is not None:
2389 2433 if alias.error:
2390 2434 raise util.Abort(alias.error)
2391 2435 if alias in expanding:
2392 2436 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2393 2437 'detected') % alias.name)
2394 2438 expanding.append(alias)
2395 2439 if alias.name not in cache:
2396 2440 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2397 2441 expanding, cache)
2398 2442 result = cache[alias.name]
2399 2443 expanding.pop()
2400 2444 if alias.args is not None:
2401 2445 l = getlist(tree[2])
2402 2446 if len(l) != len(alias.args):
2403 2447 raise error.ParseError(
2404 2448 _('invalid number of arguments: %s') % len(l))
2405 2449 l = [_expandaliases(aliases, a, [], cache) for a in l]
2406 2450 result = _expandargs(result, dict(zip(alias.args, l)))
2407 2451 else:
2408 2452 result = tuple(_expandaliases(aliases, t, expanding, cache)
2409 2453 for t in tree)
2410 2454 return result
2411 2455
2412 2456 def findaliases(ui, tree, showwarning=None):
2413 2457 _checkaliasarg(tree)
2414 2458 aliases = {}
2415 2459 for k, v in ui.configitems('revsetalias'):
2416 2460 alias = revsetalias(k, v)
2417 2461 aliases[alias.name] = alias
2418 2462 tree = _expandaliases(aliases, tree, [], {})
2419 2463 if showwarning:
2420 2464 # warn about problematic (but not referred) aliases
2421 2465 for name, alias in sorted(aliases.iteritems()):
2422 2466 if alias.error and not alias.warned:
2423 2467 showwarning(_('warning: %s\n') % (alias.error))
2424 2468 alias.warned = True
2425 2469 return tree
2426 2470
2427 2471 def foldconcat(tree):
2428 2472 """Fold elements to be concatenated by `##`
2429 2473 """
2430 2474 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2431 2475 return tree
2432 2476 if tree[0] == '_concat':
2433 2477 pending = [tree]
2434 2478 l = []
2435 2479 while pending:
2436 2480 e = pending.pop()
2437 2481 if e[0] == '_concat':
2438 2482 pending.extend(reversed(e[1:]))
2439 2483 elif e[0] in ('string', 'symbol'):
2440 2484 l.append(e[1])
2441 2485 else:
2442 2486 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2443 2487 raise error.ParseError(msg)
2444 2488 return ('string', ''.join(l))
2445 2489 else:
2446 2490 return tuple(foldconcat(t) for t in tree)
2447 2491
2448 2492 def parse(spec, lookup=None):
2449 2493 p = parser.parser(tokenize, elements)
2450 2494 return p.parse(spec, lookup=lookup)
2451 2495
2452 2496 def match(ui, spec, repo=None):
2453 2497 if not spec:
2454 2498 raise error.ParseError(_("empty query"))
2455 2499 lookup = None
2456 2500 if repo:
2457 2501 lookup = repo.__contains__
2458 2502 tree, pos = parse(spec, lookup)
2459 2503 if (pos != len(spec)):
2460 2504 raise error.ParseError(_("invalid token"), pos)
2461 2505 if ui:
2462 2506 tree = findaliases(ui, tree, showwarning=ui.warn)
2463 2507 tree = foldconcat(tree)
2464 2508 weight, tree = optimize(tree, True)
2465 2509 def mfunc(repo, subset=None):
2466 2510 if subset is None:
2467 2511 subset = fullreposet(repo)
2468 2512 if util.safehasattr(subset, 'isascending'):
2469 2513 result = getset(repo, subset, tree)
2470 2514 else:
2471 2515 result = getset(repo, baseset(subset), tree)
2472 2516 return result
2473 2517 return mfunc
2474 2518
2475 2519 def formatspec(expr, *args):
2476 2520 '''
2477 2521 This is a convenience function for using revsets internally, and
2478 2522 escapes arguments appropriately. Aliases are intentionally ignored
2479 2523 so that intended expression behavior isn't accidentally subverted.
2480 2524
2481 2525 Supported arguments:
2482 2526
2483 2527 %r = revset expression, parenthesized
2484 2528 %d = int(arg), no quoting
2485 2529 %s = string(arg), escaped and single-quoted
2486 2530 %b = arg.branch(), escaped and single-quoted
2487 2531 %n = hex(arg), single-quoted
2488 2532 %% = a literal '%'
2489 2533
2490 2534 Prefixing the type with 'l' specifies a parenthesized list of that type.
2491 2535
2492 2536 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2493 2537 '(10 or 11):: and ((this()) or (that()))'
2494 2538 >>> formatspec('%d:: and not %d::', 10, 20)
2495 2539 '10:: and not 20::'
2496 2540 >>> formatspec('%ld or %ld', [], [1])
2497 2541 "_list('') or 1"
2498 2542 >>> formatspec('keyword(%s)', 'foo\\xe9')
2499 2543 "keyword('foo\\\\xe9')"
2500 2544 >>> b = lambda: 'default'
2501 2545 >>> b.branch = b
2502 2546 >>> formatspec('branch(%b)', b)
2503 2547 "branch('default')"
2504 2548 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2505 2549 "root(_list('a\\x00b\\x00c\\x00d'))"
2506 2550 '''
2507 2551
2508 2552 def quote(s):
2509 2553 return repr(str(s))
2510 2554
2511 2555 def argtype(c, arg):
2512 2556 if c == 'd':
2513 2557 return str(int(arg))
2514 2558 elif c == 's':
2515 2559 return quote(arg)
2516 2560 elif c == 'r':
2517 2561 parse(arg) # make sure syntax errors are confined
2518 2562 return '(%s)' % arg
2519 2563 elif c == 'n':
2520 2564 return quote(node.hex(arg))
2521 2565 elif c == 'b':
2522 2566 return quote(arg.branch())
2523 2567
2524 2568 def listexp(s, t):
2525 2569 l = len(s)
2526 2570 if l == 0:
2527 2571 return "_list('')"
2528 2572 elif l == 1:
2529 2573 return argtype(t, s[0])
2530 2574 elif t == 'd':
2531 2575 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2532 2576 elif t == 's':
2533 2577 return "_list('%s')" % "\0".join(s)
2534 2578 elif t == 'n':
2535 2579 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2536 2580 elif t == 'b':
2537 2581 return "_list('%s')" % "\0".join(a.branch() for a in s)
2538 2582
2539 2583 m = l // 2
2540 2584 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2541 2585
2542 2586 ret = ''
2543 2587 pos = 0
2544 2588 arg = 0
2545 2589 while pos < len(expr):
2546 2590 c = expr[pos]
2547 2591 if c == '%':
2548 2592 pos += 1
2549 2593 d = expr[pos]
2550 2594 if d == '%':
2551 2595 ret += d
2552 2596 elif d in 'dsnbr':
2553 2597 ret += argtype(d, args[arg])
2554 2598 arg += 1
2555 2599 elif d == 'l':
2556 2600 # a list of some type
2557 2601 pos += 1
2558 2602 d = expr[pos]
2559 2603 ret += listexp(list(args[arg]), d)
2560 2604 arg += 1
2561 2605 else:
2562 2606 raise util.Abort('unexpected revspec format character %s' % d)
2563 2607 else:
2564 2608 ret += c
2565 2609 pos += 1
2566 2610
2567 2611 return ret
2568 2612
2569 2613 def prettyformat(tree):
2570 2614 def _prettyformat(tree, level, lines):
2571 2615 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2572 2616 lines.append((level, str(tree)))
2573 2617 else:
2574 2618 lines.append((level, '(%s' % tree[0]))
2575 2619 for s in tree[1:]:
2576 2620 _prettyformat(s, level + 1, lines)
2577 2621 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2578 2622
2579 2623 lines = []
2580 2624 _prettyformat(tree, 0, lines)
2581 2625 output = '\n'.join((' '*l + s) for l, s in lines)
2582 2626 return output
2583 2627
2584 2628 def depth(tree):
2585 2629 if isinstance(tree, tuple):
2586 2630 return max(map(depth, tree)) + 1
2587 2631 else:
2588 2632 return 0
2589 2633
2590 2634 def funcsused(tree):
2591 2635 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2592 2636 return set()
2593 2637 else:
2594 2638 funcs = set()
2595 2639 for s in tree[1:]:
2596 2640 funcs |= funcsused(s)
2597 2641 if tree[0] == 'func':
2598 2642 funcs.add(tree[1][1])
2599 2643 return funcs
2600 2644
2601 2645 class abstractsmartset(object):
2602 2646
2603 2647 def __nonzero__(self):
2604 2648 """True if the smartset is not empty"""
2605 2649 raise NotImplementedError()
2606 2650
2607 2651 def __contains__(self, rev):
2608 2652 """provide fast membership testing"""
2609 2653 raise NotImplementedError()
2610 2654
2611 2655 def __iter__(self):
2612 2656 """iterate the set in the order it is supposed to be iterated"""
2613 2657 raise NotImplementedError()
2614 2658
2615 2659 # Attributes containing a function to perform a fast iteration in a given
2616 2660 # direction. A smartset can have none, one, or both defined.
2617 2661 #
2618 2662 # Default value is None instead of a function returning None to avoid
2619 2663 # initializing an iterator just for testing if a fast method exists.
2620 2664 fastasc = None
2621 2665 fastdesc = None
2622 2666
2623 2667 def isascending(self):
2624 2668 """True if the set will iterate in ascending order"""
2625 2669 raise NotImplementedError()
2626 2670
2627 2671 def isdescending(self):
2628 2672 """True if the set will iterate in descending order"""
2629 2673 raise NotImplementedError()
2630 2674
2631 2675 def min(self):
2632 2676 """return the minimum element in the set"""
2633 2677 if self.fastasc is not None:
2634 2678 for r in self.fastasc():
2635 2679 return r
2636 2680 raise ValueError('arg is an empty sequence')
2637 2681 return min(self)
2638 2682
2639 2683 def max(self):
2640 2684 """return the maximum element in the set"""
2641 2685 if self.fastdesc is not None:
2642 2686 for r in self.fastdesc():
2643 2687 return r
2644 2688 raise ValueError('arg is an empty sequence')
2645 2689 return max(self)
2646 2690
2647 2691 def first(self):
2648 2692 """return the first element in the set (user iteration perspective)
2649 2693
2650 2694 Return None if the set is empty"""
2651 2695 raise NotImplementedError()
2652 2696
2653 2697 def last(self):
2654 2698 """return the last element in the set (user iteration perspective)
2655 2699
2656 2700 Return None if the set is empty"""
2657 2701 raise NotImplementedError()
2658 2702
2659 2703 def __len__(self):
2660 2704 """return the length of the smartsets
2661 2705
2662 2706 This can be expensive on smartset that could be lazy otherwise."""
2663 2707 raise NotImplementedError()
2664 2708
2665 2709 def reverse(self):
2666 2710 """reverse the expected iteration order"""
2667 2711 raise NotImplementedError()
2668 2712
2669 2713 def sort(self, reverse=True):
2670 2714 """get the set to iterate in an ascending or descending order"""
2671 2715 raise NotImplementedError()
2672 2716
2673 2717 def __and__(self, other):
2674 2718 """Returns a new object with the intersection of the two collections.
2675 2719
2676 2720 This is part of the mandatory API for smartset."""
2677 2721 return self.filter(other.__contains__, cache=False)
2678 2722
2679 2723 def __add__(self, other):
2680 2724 """Returns a new object with the union of the two collections.
2681 2725
2682 2726 This is part of the mandatory API for smartset."""
2683 2727 return addset(self, other)
2684 2728
2685 2729 def __sub__(self, other):
2686 2730 """Returns a new object with the substraction of the two collections.
2687 2731
2688 2732 This is part of the mandatory API for smartset."""
2689 2733 c = other.__contains__
2690 2734 return self.filter(lambda r: not c(r), cache=False)
2691 2735
2692 2736 def filter(self, condition, cache=True):
2693 2737 """Returns this smartset filtered by condition as a new smartset.
2694 2738
2695 2739 `condition` is a callable which takes a revision number and returns a
2696 2740 boolean.
2697 2741
2698 2742 This is part of the mandatory API for smartset."""
2699 2743 # builtin cannot be cached. but do not needs to
2700 2744 if cache and util.safehasattr(condition, 'func_code'):
2701 2745 condition = util.cachefunc(condition)
2702 2746 return filteredset(self, condition)
2703 2747
2704 2748 class baseset(abstractsmartset):
2705 2749 """Basic data structure that represents a revset and contains the basic
2706 2750 operation that it should be able to perform.
2707 2751
2708 2752 Every method in this class should be implemented by any smartset class.
2709 2753 """
2710 2754 def __init__(self, data=()):
2711 2755 if not isinstance(data, list):
2712 2756 data = list(data)
2713 2757 self._list = data
2714 2758 self._ascending = None
2715 2759
2716 2760 @util.propertycache
2717 2761 def _set(self):
2718 2762 return set(self._list)
2719 2763
2720 2764 @util.propertycache
2721 2765 def _asclist(self):
2722 2766 asclist = self._list[:]
2723 2767 asclist.sort()
2724 2768 return asclist
2725 2769
2726 2770 def __iter__(self):
2727 2771 if self._ascending is None:
2728 2772 return iter(self._list)
2729 2773 elif self._ascending:
2730 2774 return iter(self._asclist)
2731 2775 else:
2732 2776 return reversed(self._asclist)
2733 2777
2734 2778 def fastasc(self):
2735 2779 return iter(self._asclist)
2736 2780
2737 2781 def fastdesc(self):
2738 2782 return reversed(self._asclist)
2739 2783
2740 2784 @util.propertycache
2741 2785 def __contains__(self):
2742 2786 return self._set.__contains__
2743 2787
2744 2788 def __nonzero__(self):
2745 2789 return bool(self._list)
2746 2790
2747 2791 def sort(self, reverse=False):
2748 2792 self._ascending = not bool(reverse)
2749 2793
2750 2794 def reverse(self):
2751 2795 if self._ascending is None:
2752 2796 self._list.reverse()
2753 2797 else:
2754 2798 self._ascending = not self._ascending
2755 2799
2756 2800 def __len__(self):
2757 2801 return len(self._list)
2758 2802
2759 2803 def isascending(self):
2760 2804 """Returns True if the collection is ascending order, False if not.
2761 2805
2762 2806 This is part of the mandatory API for smartset."""
2763 2807 if len(self) <= 1:
2764 2808 return True
2765 2809 return self._ascending is not None and self._ascending
2766 2810
2767 2811 def isdescending(self):
2768 2812 """Returns True if the collection is descending order, False if not.
2769 2813
2770 2814 This is part of the mandatory API for smartset."""
2771 2815 if len(self) <= 1:
2772 2816 return True
2773 2817 return self._ascending is not None and not self._ascending
2774 2818
2775 2819 def first(self):
2776 2820 if self:
2777 2821 if self._ascending is None:
2778 2822 return self._list[0]
2779 2823 elif self._ascending:
2780 2824 return self._asclist[0]
2781 2825 else:
2782 2826 return self._asclist[-1]
2783 2827 return None
2784 2828
2785 2829 def last(self):
2786 2830 if self:
2787 2831 if self._ascending is None:
2788 2832 return self._list[-1]
2789 2833 elif self._ascending:
2790 2834 return self._asclist[-1]
2791 2835 else:
2792 2836 return self._asclist[0]
2793 2837 return None
2794 2838
2795 2839 class filteredset(abstractsmartset):
2796 2840 """Duck type for baseset class which iterates lazily over the revisions in
2797 2841 the subset and contains a function which tests for membership in the
2798 2842 revset
2799 2843 """
2800 2844 def __init__(self, subset, condition=lambda x: True):
2801 2845 """
2802 2846 condition: a function that decide whether a revision in the subset
2803 2847 belongs to the revset or not.
2804 2848 """
2805 2849 self._subset = subset
2806 2850 self._condition = condition
2807 2851 self._cache = {}
2808 2852
2809 2853 def __contains__(self, x):
2810 2854 c = self._cache
2811 2855 if x not in c:
2812 2856 v = c[x] = x in self._subset and self._condition(x)
2813 2857 return v
2814 2858 return c[x]
2815 2859
2816 2860 def __iter__(self):
2817 2861 return self._iterfilter(self._subset)
2818 2862
2819 2863 def _iterfilter(self, it):
2820 2864 cond = self._condition
2821 2865 for x in it:
2822 2866 if cond(x):
2823 2867 yield x
2824 2868
2825 2869 @property
2826 2870 def fastasc(self):
2827 2871 it = self._subset.fastasc
2828 2872 if it is None:
2829 2873 return None
2830 2874 return lambda: self._iterfilter(it())
2831 2875
2832 2876 @property
2833 2877 def fastdesc(self):
2834 2878 it = self._subset.fastdesc
2835 2879 if it is None:
2836 2880 return None
2837 2881 return lambda: self._iterfilter(it())
2838 2882
2839 2883 def __nonzero__(self):
2840 2884 for r in self:
2841 2885 return True
2842 2886 return False
2843 2887
2844 2888 def __len__(self):
2845 2889 # Basic implementation to be changed in future patches.
2846 2890 l = baseset([r for r in self])
2847 2891 return len(l)
2848 2892
2849 2893 def sort(self, reverse=False):
2850 2894 self._subset.sort(reverse=reverse)
2851 2895
2852 2896 def reverse(self):
2853 2897 self._subset.reverse()
2854 2898
2855 2899 def isascending(self):
2856 2900 return self._subset.isascending()
2857 2901
2858 2902 def isdescending(self):
2859 2903 return self._subset.isdescending()
2860 2904
2861 2905 def first(self):
2862 2906 for x in self:
2863 2907 return x
2864 2908 return None
2865 2909
2866 2910 def last(self):
2867 2911 it = None
2868 2912 if self._subset.isascending:
2869 2913 it = self.fastdesc
2870 2914 elif self._subset.isdescending:
2871 2915 it = self.fastdesc
2872 2916 if it is None:
2873 2917 # slowly consume everything. This needs improvement
2874 2918 it = lambda: reversed(list(self))
2875 2919 for x in it():
2876 2920 return x
2877 2921 return None
2878 2922
2879 2923 class addset(abstractsmartset):
2880 2924 """Represent the addition of two sets
2881 2925
2882 2926 Wrapper structure for lazily adding two structures without losing much
2883 2927 performance on the __contains__ method
2884 2928
2885 2929 If the ascending attribute is set, that means the two structures are
2886 2930 ordered in either an ascending or descending way. Therefore, we can add
2887 2931 them maintaining the order by iterating over both at the same time
2888 2932 """
2889 2933 def __init__(self, revs1, revs2, ascending=None):
2890 2934 self._r1 = revs1
2891 2935 self._r2 = revs2
2892 2936 self._iter = None
2893 2937 self._ascending = ascending
2894 2938 self._genlist = None
2895 2939 self._asclist = None
2896 2940
2897 2941 def __len__(self):
2898 2942 return len(self._list)
2899 2943
2900 2944 def __nonzero__(self):
2901 2945 return bool(self._r1) or bool(self._r2)
2902 2946
2903 2947 @util.propertycache
2904 2948 def _list(self):
2905 2949 if not self._genlist:
2906 2950 self._genlist = baseset(self._iterator())
2907 2951 return self._genlist
2908 2952
2909 2953 def _iterator(self):
2910 2954 """Iterate over both collections without repeating elements
2911 2955
2912 2956 If the ascending attribute is not set, iterate over the first one and
2913 2957 then over the second one checking for membership on the first one so we
2914 2958 dont yield any duplicates.
2915 2959
2916 2960 If the ascending attribute is set, iterate over both collections at the
2917 2961 same time, yielding only one value at a time in the given order.
2918 2962 """
2919 2963 if self._ascending is None:
2920 2964 def gen():
2921 2965 for r in self._r1:
2922 2966 yield r
2923 2967 inr1 = self._r1.__contains__
2924 2968 for r in self._r2:
2925 2969 if not inr1(r):
2926 2970 yield r
2927 2971 gen = gen()
2928 2972 else:
2929 2973 iter1 = iter(self._r1)
2930 2974 iter2 = iter(self._r2)
2931 2975 gen = self._iterordered(self._ascending, iter1, iter2)
2932 2976 return gen
2933 2977
2934 2978 def __iter__(self):
2935 2979 if self._ascending is None:
2936 2980 if self._genlist:
2937 2981 return iter(self._genlist)
2938 2982 return iter(self._iterator())
2939 2983 self._trysetasclist()
2940 2984 if self._ascending:
2941 2985 it = self.fastasc
2942 2986 else:
2943 2987 it = self.fastdesc
2944 2988 if it is None:
2945 2989 # consume the gen and try again
2946 2990 self._list
2947 2991 return iter(self)
2948 2992 return it()
2949 2993
2950 2994 def _trysetasclist(self):
2951 2995 """populate the _asclist attribute if possible and necessary"""
2952 2996 if self._genlist is not None and self._asclist is None:
2953 2997 self._asclist = sorted(self._genlist)
2954 2998
2955 2999 @property
2956 3000 def fastasc(self):
2957 3001 self._trysetasclist()
2958 3002 if self._asclist is not None:
2959 3003 return self._asclist.__iter__
2960 3004 iter1 = self._r1.fastasc
2961 3005 iter2 = self._r2.fastasc
2962 3006 if None in (iter1, iter2):
2963 3007 return None
2964 3008 return lambda: self._iterordered(True, iter1(), iter2())
2965 3009
2966 3010 @property
2967 3011 def fastdesc(self):
2968 3012 self._trysetasclist()
2969 3013 if self._asclist is not None:
2970 3014 return self._asclist.__reversed__
2971 3015 iter1 = self._r1.fastdesc
2972 3016 iter2 = self._r2.fastdesc
2973 3017 if None in (iter1, iter2):
2974 3018 return None
2975 3019 return lambda: self._iterordered(False, iter1(), iter2())
2976 3020
2977 3021 def _iterordered(self, ascending, iter1, iter2):
2978 3022 """produce an ordered iteration from two iterators with the same order
2979 3023
2980 3024 The ascending is used to indicated the iteration direction.
2981 3025 """
2982 3026 choice = max
2983 3027 if ascending:
2984 3028 choice = min
2985 3029
2986 3030 val1 = None
2987 3031 val2 = None
2988 3032
2989 3033 choice = max
2990 3034 if ascending:
2991 3035 choice = min
2992 3036 try:
2993 3037 # Consume both iterators in an ordered way until one is
2994 3038 # empty
2995 3039 while True:
2996 3040 if val1 is None:
2997 3041 val1 = iter1.next()
2998 3042 if val2 is None:
2999 3043 val2 = iter2.next()
3000 3044 next = choice(val1, val2)
3001 3045 yield next
3002 3046 if val1 == next:
3003 3047 val1 = None
3004 3048 if val2 == next:
3005 3049 val2 = None
3006 3050 except StopIteration:
3007 3051 # Flush any remaining values and consume the other one
3008 3052 it = iter2
3009 3053 if val1 is not None:
3010 3054 yield val1
3011 3055 it = iter1
3012 3056 elif val2 is not None:
3013 3057 # might have been equality and both are empty
3014 3058 yield val2
3015 3059 for val in it:
3016 3060 yield val
3017 3061
3018 3062 def __contains__(self, x):
3019 3063 return x in self._r1 or x in self._r2
3020 3064
3021 3065 def sort(self, reverse=False):
3022 3066 """Sort the added set
3023 3067
3024 3068 For this we use the cached list with all the generated values and if we
3025 3069 know they are ascending or descending we can sort them in a smart way.
3026 3070 """
3027 3071 self._ascending = not reverse
3028 3072
3029 3073 def isascending(self):
3030 3074 return self._ascending is not None and self._ascending
3031 3075
3032 3076 def isdescending(self):
3033 3077 return self._ascending is not None and not self._ascending
3034 3078
3035 3079 def reverse(self):
3036 3080 if self._ascending is None:
3037 3081 self._list.reverse()
3038 3082 else:
3039 3083 self._ascending = not self._ascending
3040 3084
3041 3085 def first(self):
3042 3086 for x in self:
3043 3087 return x
3044 3088 return None
3045 3089
3046 3090 def last(self):
3047 3091 self.reverse()
3048 3092 val = self.first()
3049 3093 self.reverse()
3050 3094 return val
3051 3095
3052 3096 class generatorset(abstractsmartset):
3053 3097 """Wrap a generator for lazy iteration
3054 3098
3055 3099 Wrapper structure for generators that provides lazy membership and can
3056 3100 be iterated more than once.
3057 3101 When asked for membership it generates values until either it finds the
3058 3102 requested one or has gone through all the elements in the generator
3059 3103 """
3060 3104 def __init__(self, gen, iterasc=None):
3061 3105 """
3062 3106 gen: a generator producing the values for the generatorset.
3063 3107 """
3064 3108 self._gen = gen
3065 3109 self._asclist = None
3066 3110 self._cache = {}
3067 3111 self._genlist = []
3068 3112 self._finished = False
3069 3113 self._ascending = True
3070 3114 if iterasc is not None:
3071 3115 if iterasc:
3072 3116 self.fastasc = self._iterator
3073 3117 self.__contains__ = self._asccontains
3074 3118 else:
3075 3119 self.fastdesc = self._iterator
3076 3120 self.__contains__ = self._desccontains
3077 3121
3078 3122 def __nonzero__(self):
3079 3123 for r in self:
3080 3124 return True
3081 3125 return False
3082 3126
3083 3127 def __contains__(self, x):
3084 3128 if x in self._cache:
3085 3129 return self._cache[x]
3086 3130
3087 3131 # Use new values only, as existing values would be cached.
3088 3132 for l in self._consumegen():
3089 3133 if l == x:
3090 3134 return True
3091 3135
3092 3136 self._cache[x] = False
3093 3137 return False
3094 3138
3095 3139 def _asccontains(self, x):
3096 3140 """version of contains optimised for ascending generator"""
3097 3141 if x in self._cache:
3098 3142 return self._cache[x]
3099 3143
3100 3144 # Use new values only, as existing values would be cached.
3101 3145 for l in self._consumegen():
3102 3146 if l == x:
3103 3147 return True
3104 3148 if l > x:
3105 3149 break
3106 3150
3107 3151 self._cache[x] = False
3108 3152 return False
3109 3153
3110 3154 def _desccontains(self, x):
3111 3155 """version of contains optimised for descending generator"""
3112 3156 if x in self._cache:
3113 3157 return self._cache[x]
3114 3158
3115 3159 # Use new values only, as existing values would be cached.
3116 3160 for l in self._consumegen():
3117 3161 if l == x:
3118 3162 return True
3119 3163 if l < x:
3120 3164 break
3121 3165
3122 3166 self._cache[x] = False
3123 3167 return False
3124 3168
3125 3169 def __iter__(self):
3126 3170 if self._ascending:
3127 3171 it = self.fastasc
3128 3172 else:
3129 3173 it = self.fastdesc
3130 3174 if it is not None:
3131 3175 return it()
3132 3176 # we need to consume the iterator
3133 3177 for x in self._consumegen():
3134 3178 pass
3135 3179 # recall the same code
3136 3180 return iter(self)
3137 3181
3138 3182 def _iterator(self):
3139 3183 if self._finished:
3140 3184 return iter(self._genlist)
3141 3185
3142 3186 # We have to use this complex iteration strategy to allow multiple
3143 3187 # iterations at the same time. We need to be able to catch revision
3144 3188 # removed from _consumegen and added to genlist in another instance.
3145 3189 #
3146 3190 # Getting rid of it would provide an about 15% speed up on this
3147 3191 # iteration.
3148 3192 genlist = self._genlist
3149 3193 nextrev = self._consumegen().next
3150 3194 _len = len # cache global lookup
3151 3195 def gen():
3152 3196 i = 0
3153 3197 while True:
3154 3198 if i < _len(genlist):
3155 3199 yield genlist[i]
3156 3200 else:
3157 3201 yield nextrev()
3158 3202 i += 1
3159 3203 return gen()
3160 3204
3161 3205 def _consumegen(self):
3162 3206 cache = self._cache
3163 3207 genlist = self._genlist.append
3164 3208 for item in self._gen:
3165 3209 cache[item] = True
3166 3210 genlist(item)
3167 3211 yield item
3168 3212 if not self._finished:
3169 3213 self._finished = True
3170 3214 asc = self._genlist[:]
3171 3215 asc.sort()
3172 3216 self._asclist = asc
3173 3217 self.fastasc = asc.__iter__
3174 3218 self.fastdesc = asc.__reversed__
3175 3219
3176 3220 def __len__(self):
3177 3221 for x in self._consumegen():
3178 3222 pass
3179 3223 return len(self._genlist)
3180 3224
3181 3225 def sort(self, reverse=False):
3182 3226 self._ascending = not reverse
3183 3227
3184 3228 def reverse(self):
3185 3229 self._ascending = not self._ascending
3186 3230
3187 3231 def isascending(self):
3188 3232 return self._ascending
3189 3233
3190 3234 def isdescending(self):
3191 3235 return not self._ascending
3192 3236
3193 3237 def first(self):
3194 3238 if self._ascending:
3195 3239 it = self.fastasc
3196 3240 else:
3197 3241 it = self.fastdesc
3198 3242 if it is None:
3199 3243 # we need to consume all and try again
3200 3244 for x in self._consumegen():
3201 3245 pass
3202 3246 return self.first()
3203 3247 if self:
3204 3248 return it().next()
3205 3249 return None
3206 3250
3207 3251 def last(self):
3208 3252 if self._ascending:
3209 3253 it = self.fastdesc
3210 3254 else:
3211 3255 it = self.fastasc
3212 3256 if it is None:
3213 3257 # we need to consume all and try again
3214 3258 for x in self._consumegen():
3215 3259 pass
3216 3260 return self.first()
3217 3261 if self:
3218 3262 return it().next()
3219 3263 return None
3220 3264
3221 3265 class spanset(abstractsmartset):
3222 3266 """Duck type for baseset class which represents a range of revisions and
3223 3267 can work lazily and without having all the range in memory
3224 3268
3225 3269 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3226 3270 notable points:
3227 3271 - when x < y it will be automatically descending,
3228 3272 - revision filtered with this repoview will be skipped.
3229 3273
3230 3274 """
3231 3275 def __init__(self, repo, start=0, end=None):
3232 3276 """
3233 3277 start: first revision included the set
3234 3278 (default to 0)
3235 3279 end: first revision excluded (last+1)
3236 3280 (default to len(repo)
3237 3281
3238 3282 Spanset will be descending if `end` < `start`.
3239 3283 """
3240 3284 if end is None:
3241 3285 end = len(repo)
3242 3286 self._ascending = start <= end
3243 3287 if not self._ascending:
3244 3288 start, end = end + 1, start +1
3245 3289 self._start = start
3246 3290 self._end = end
3247 3291 self._hiddenrevs = repo.changelog.filteredrevs
3248 3292
3249 3293 def sort(self, reverse=False):
3250 3294 self._ascending = not reverse
3251 3295
3252 3296 def reverse(self):
3253 3297 self._ascending = not self._ascending
3254 3298
3255 3299 def _iterfilter(self, iterrange):
3256 3300 s = self._hiddenrevs
3257 3301 for r in iterrange:
3258 3302 if r not in s:
3259 3303 yield r
3260 3304
3261 3305 def __iter__(self):
3262 3306 if self._ascending:
3263 3307 return self.fastasc()
3264 3308 else:
3265 3309 return self.fastdesc()
3266 3310
3267 3311 def fastasc(self):
3268 3312 iterrange = xrange(self._start, self._end)
3269 3313 if self._hiddenrevs:
3270 3314 return self._iterfilter(iterrange)
3271 3315 return iter(iterrange)
3272 3316
3273 3317 def fastdesc(self):
3274 3318 iterrange = xrange(self._end - 1, self._start - 1, -1)
3275 3319 if self._hiddenrevs:
3276 3320 return self._iterfilter(iterrange)
3277 3321 return iter(iterrange)
3278 3322
3279 3323 def __contains__(self, rev):
3280 3324 hidden = self._hiddenrevs
3281 3325 return ((self._start <= rev < self._end)
3282 3326 and not (hidden and rev in hidden))
3283 3327
3284 3328 def __nonzero__(self):
3285 3329 for r in self:
3286 3330 return True
3287 3331 return False
3288 3332
3289 3333 def __len__(self):
3290 3334 if not self._hiddenrevs:
3291 3335 return abs(self._end - self._start)
3292 3336 else:
3293 3337 count = 0
3294 3338 start = self._start
3295 3339 end = self._end
3296 3340 for rev in self._hiddenrevs:
3297 3341 if (end < rev <= start) or (start <= rev < end):
3298 3342 count += 1
3299 3343 return abs(self._end - self._start) - count
3300 3344
3301 3345 def isascending(self):
3302 3346 return self._ascending
3303 3347
3304 3348 def isdescending(self):
3305 3349 return not self._ascending
3306 3350
3307 3351 def first(self):
3308 3352 if self._ascending:
3309 3353 it = self.fastasc
3310 3354 else:
3311 3355 it = self.fastdesc
3312 3356 for x in it():
3313 3357 return x
3314 3358 return None
3315 3359
3316 3360 def last(self):
3317 3361 if self._ascending:
3318 3362 it = self.fastdesc
3319 3363 else:
3320 3364 it = self.fastasc
3321 3365 for x in it():
3322 3366 return x
3323 3367 return None
3324 3368
3325 3369 class fullreposet(spanset):
3326 3370 """a set containing all revisions in the repo
3327 3371
3328 3372 This class exists to host special optimization and magic to handle virtual
3329 3373 revisions such as "null".
3330 3374 """
3331 3375
3332 3376 def __init__(self, repo):
3333 3377 super(fullreposet, self).__init__(repo)
3334 3378
3335 3379 def __contains__(self, rev):
3336 3380 # assumes the given rev is valid
3337 3381 hidden = self._hiddenrevs
3338 3382 return not (hidden and rev in hidden)
3339 3383
3340 3384 def __and__(self, other):
3341 3385 """As self contains the whole repo, all of the other set should also be
3342 3386 in self. Therefore `self & other = other`.
3343 3387
3344 3388 This boldly assumes the other contains valid revs only.
3345 3389 """
3346 3390 # other not a smartset, make is so
3347 3391 if not util.safehasattr(other, 'isascending'):
3348 3392 # filter out hidden revision
3349 3393 # (this boldly assumes all smartset are pure)
3350 3394 #
3351 3395 # `other` was used with "&", let's assume this is a set like
3352 3396 # object.
3353 3397 other = baseset(other - self._hiddenrevs)
3354 3398
3355 3399 other.sort(reverse=self.isdescending())
3356 3400 return other
3357 3401
3358 3402 # tell hggettext to extract docstrings from these functions:
3359 3403 i18nfunctions = symbols.values()
@@ -1,461 +1,542 b''
1 1 Preparing the subrepository 'sub2'
2 2
3 3 $ hg init sub2
4 4 $ echo sub2 > sub2/sub2
5 5 $ hg add -R sub2
6 6 adding sub2/sub2 (glob)
7 7 $ hg commit -R sub2 -m "sub2 import"
8 8
9 9 Preparing the 'sub1' repo which depends on the subrepo 'sub2'
10 10
11 11 $ hg init sub1
12 12 $ echo sub1 > sub1/sub1
13 13 $ echo "sub2 = ../sub2" > sub1/.hgsub
14 14 $ hg clone sub2 sub1/sub2
15 15 updating to branch default
16 16 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
17 17 $ hg add -R sub1
18 18 adding sub1/.hgsub (glob)
19 19 adding sub1/sub1 (glob)
20 20 $ hg commit -R sub1 -m "sub1 import"
21 21
22 22 Preparing the 'main' repo which depends on the subrepo 'sub1'
23 23
24 24 $ hg init main
25 25 $ echo main > main/main
26 26 $ echo "sub1 = ../sub1" > main/.hgsub
27 27 $ hg clone sub1 main/sub1
28 28 updating to branch default
29 29 cloning subrepo sub2 from $TESTTMP/sub2
30 30 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
31 31 $ hg add -R main
32 32 adding main/.hgsub (glob)
33 33 adding main/main (glob)
34 34 $ hg commit -R main -m "main import"
35 35
36 36 Cleaning both repositories, just as a clone -U
37 37
38 38 $ hg up -C -R sub2 null
39 39 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
40 40 $ hg up -C -R sub1 null
41 41 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
42 42 $ hg up -C -R main null
43 43 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
44 44 $ rm -rf main/sub1
45 45 $ rm -rf sub1/sub2
46 46
47 47 Clone main
48 48
49 49 $ hg --config extensions.largefiles= clone main cloned
50 50 updating to branch default
51 51 cloning subrepo sub1 from $TESTTMP/sub1
52 52 cloning subrepo sub1/sub2 from $TESTTMP/sub2 (glob)
53 53 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 54
55 55 Largefiles is NOT enabled in the clone if the source repo doesn't require it
56 56 $ cat cloned/.hg/hgrc
57 57 # example repository config (see "hg help config" for more info)
58 58 [paths]
59 59 default = $TESTTMP/main (glob)
60 60
61 61 # path aliases to other clones of this repo in URLs or filesystem paths
62 62 # (see "hg help config.paths" for more info)
63 63 #
64 64 # default-push = ssh://jdoe@example.net/hg/jdoes-fork
65 65 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
66 66 # my-clone = /home/jdoe/jdoes-clone
67 67
68 68 [ui]
69 69 # name and email (local to this repository, optional), e.g.
70 70 # username = Jane Doe <jdoe@example.com>
71 71
72 72 Checking cloned repo ids
73 73
74 74 $ printf "cloned " ; hg id -R cloned
75 75 cloned 7f491f53a367 tip
76 76 $ printf "cloned/sub1 " ; hg id -R cloned/sub1
77 77 cloned/sub1 fc3b4ce2696f tip
78 78 $ printf "cloned/sub1/sub2 " ; hg id -R cloned/sub1/sub2
79 79 cloned/sub1/sub2 c57a0840e3ba tip
80 80
81 81 debugsub output for main and sub1
82 82
83 83 $ hg debugsub -R cloned
84 84 path sub1
85 85 source ../sub1
86 86 revision fc3b4ce2696f7741438c79207583768f2ce6b0dd
87 87 $ hg debugsub -R cloned/sub1
88 88 path sub2
89 89 source ../sub2
90 90 revision c57a0840e3badd667ef3c3ef65471609acb2ba3c
91 91
92 92 Modifying deeply nested 'sub2'
93 93
94 94 $ echo modified > cloned/sub1/sub2/sub2
95 95 $ hg commit --subrepos -m "deep nested modif should trigger a commit" -R cloned
96 96 committing subrepository sub1
97 97 committing subrepository sub1/sub2 (glob)
98 98
99 99 Checking modified node ids
100 100
101 101 $ printf "cloned " ; hg id -R cloned
102 102 cloned ffe6649062fe tip
103 103 $ printf "cloned/sub1 " ; hg id -R cloned/sub1
104 104 cloned/sub1 2ecb03bf44a9 tip
105 105 $ printf "cloned/sub1/sub2 " ; hg id -R cloned/sub1/sub2
106 106 cloned/sub1/sub2 53dd3430bcaf tip
107 107
108 108 debugsub output for main and sub1
109 109
110 110 $ hg debugsub -R cloned
111 111 path sub1
112 112 source ../sub1
113 113 revision 2ecb03bf44a94e749e8669481dd9069526ce7cb9
114 114 $ hg debugsub -R cloned/sub1
115 115 path sub2
116 116 source ../sub2
117 117 revision 53dd3430bcaf5ab4a7c48262bcad6d441f510487
118 118
119 119 Check that deep archiving works
120 120
121 121 $ cd cloned
122 122 $ echo 'test' > sub1/sub2/test.txt
123 123 $ hg --config extensions.largefiles=! add sub1/sub2/test.txt
124 124 $ mkdir sub1/sub2/folder
125 125 $ echo 'subfolder' > sub1/sub2/folder/test.txt
126 126 $ hg ci -ASm "add test.txt"
127 127 adding sub1/sub2/folder/test.txt
128 128 committing subrepository sub1
129 129 committing subrepository sub1/sub2 (glob)
130 130
131 131 .. but first take a detour through some deep removal testing
132 132
133 133 $ hg remove -S -I 're:.*.txt' .
134 134 removing sub1/sub2/folder/test.txt (glob)
135 135 removing sub1/sub2/test.txt (glob)
136 136 $ hg status -S
137 137 R sub1/sub2/folder/test.txt
138 138 R sub1/sub2/test.txt
139 139 $ hg update -Cq
140 140 $ hg remove -I 're:.*.txt' sub1
141 141 $ hg status -S
142 142 $ hg remove sub1/sub2/folder/test.txt
143 143 $ hg remove sub1/.hgsubstate
144 144 $ hg status -S
145 145 R sub1/.hgsubstate
146 146 R sub1/sub2/folder/test.txt
147 147 $ hg update -Cq
148 148 $ touch sub1/foo
149 149 $ hg forget sub1/sub2/folder/test.txt
150 150 $ rm sub1/sub2/test.txt
151 151
152 152 Test relative path printing + subrepos
153 153 $ mkdir -p foo/bar
154 154 $ cd foo
155 155 $ touch bar/abc
156 156 $ hg addremove -S ..
157 157 adding ../sub1/sub2/folder/test.txt (glob)
158 158 removing ../sub1/sub2/test.txt (glob)
159 159 adding ../sub1/foo (glob)
160 160 adding bar/abc (glob)
161 161 $ cd ..
162 162 $ hg status -S
163 163 A foo/bar/abc
164 164 A sub1/foo
165 165 R sub1/sub2/test.txt
166 166 $ hg update -Cq
167 167 $ touch sub1/sub2/folder/bar
168 168 $ hg addremove sub1/sub2
169 169 adding sub1/sub2/folder/bar (glob)
170 170 $ hg status -S
171 171 A sub1/sub2/folder/bar
172 172 ? foo/bar/abc
173 173 ? sub1/foo
174 174 $ hg update -Cq
175 175 $ hg addremove sub1
176 176 adding sub1/sub2/folder/bar (glob)
177 177 adding sub1/foo (glob)
178 178 $ hg update -Cq
179 179 $ rm sub1/sub2/folder/test.txt
180 180 $ rm sub1/sub2/test.txt
181 181 $ hg ci -ASm "remove test.txt"
182 182 adding sub1/sub2/folder/bar
183 183 removing sub1/sub2/folder/test.txt
184 184 removing sub1/sub2/test.txt
185 185 adding sub1/foo
186 186 adding foo/bar/abc
187 187 committing subrepository sub1
188 188 committing subrepository sub1/sub2 (glob)
189 189
190 190 $ hg forget sub1/sub2/sub2
191 191 $ echo x > sub1/sub2/x.txt
192 192 $ hg add sub1/sub2/x.txt
193 193
194 194 Files sees uncommitted adds and removes in subrepos
195 195 $ hg files -S
196 196 .hgsub
197 197 .hgsubstate
198 198 foo/bar/abc (glob)
199 199 main
200 200 sub1/.hgsub (glob)
201 201 sub1/.hgsubstate (glob)
202 202 sub1/foo (glob)
203 203 sub1/sub1 (glob)
204 204 sub1/sub2/folder/bar (glob)
205 205 sub1/sub2/x.txt (glob)
206 206
207 207 $ hg rollback -q
208 208 $ hg up -Cq
209 209
210 210 $ hg --config extensions.largefiles=! archive -S ../archive_all
211 211 $ find ../archive_all | sort
212 212 ../archive_all
213 213 ../archive_all/.hg_archival.txt
214 214 ../archive_all/.hgsub
215 215 ../archive_all/.hgsubstate
216 216 ../archive_all/main
217 217 ../archive_all/sub1
218 218 ../archive_all/sub1/.hgsub
219 219 ../archive_all/sub1/.hgsubstate
220 220 ../archive_all/sub1/sub1
221 221 ../archive_all/sub1/sub2
222 222 ../archive_all/sub1/sub2/folder
223 223 ../archive_all/sub1/sub2/folder/test.txt
224 224 ../archive_all/sub1/sub2/sub2
225 225 ../archive_all/sub1/sub2/test.txt
226 226
227 227 Check that archive -X works in deep subrepos
228 228
229 229 $ hg --config extensions.largefiles=! archive -S -X '**test*' ../archive_exclude
230 230 $ find ../archive_exclude | sort
231 231 ../archive_exclude
232 232 ../archive_exclude/.hg_archival.txt
233 233 ../archive_exclude/.hgsub
234 234 ../archive_exclude/.hgsubstate
235 235 ../archive_exclude/main
236 236 ../archive_exclude/sub1
237 237 ../archive_exclude/sub1/.hgsub
238 238 ../archive_exclude/sub1/.hgsubstate
239 239 ../archive_exclude/sub1/sub1
240 240 ../archive_exclude/sub1/sub2
241 241 ../archive_exclude/sub1/sub2/sub2
242 242
243 243 $ hg --config extensions.largefiles=! archive -S -I '**test*' ../archive_include
244 244 $ find ../archive_include | sort
245 245 ../archive_include
246 246 ../archive_include/sub1
247 247 ../archive_include/sub1/sub2
248 248 ../archive_include/sub1/sub2/folder
249 249 ../archive_include/sub1/sub2/folder/test.txt
250 250 ../archive_include/sub1/sub2/test.txt
251 251
252 252 Check that deep archive works with largefiles (which overrides hgsubrepo impl)
253 253 This also tests the repo.ui regression in 43fb170a23bd, and that lf subrepo
254 254 subrepos are archived properly.
255 255 Note that add --large through a subrepo currently adds the file as a normal file
256 256
257 257 $ echo "large" > sub1/sub2/large.bin
258 258 $ hg --config extensions.largefiles= add --large -R sub1/sub2 sub1/sub2/large.bin
259 259 $ echo "large" > large.bin
260 260 $ hg --config extensions.largefiles= add --large large.bin
261 261 $ hg --config extensions.largefiles= ci -S -m "add large files"
262 262 committing subrepository sub1
263 263 committing subrepository sub1/sub2 (glob)
264 264
265 265 $ hg --config extensions.largefiles= archive -S ../archive_lf
266 266 $ find ../archive_lf | sort
267 267 ../archive_lf
268 268 ../archive_lf/.hg_archival.txt
269 269 ../archive_lf/.hgsub
270 270 ../archive_lf/.hgsubstate
271 271 ../archive_lf/large.bin
272 272 ../archive_lf/main
273 273 ../archive_lf/sub1
274 274 ../archive_lf/sub1/.hgsub
275 275 ../archive_lf/sub1/.hgsubstate
276 276 ../archive_lf/sub1/sub1
277 277 ../archive_lf/sub1/sub2
278 278 ../archive_lf/sub1/sub2/folder
279 279 ../archive_lf/sub1/sub2/folder/test.txt
280 280 ../archive_lf/sub1/sub2/large.bin
281 281 ../archive_lf/sub1/sub2/sub2
282 282 ../archive_lf/sub1/sub2/test.txt
283 283 $ rm -rf ../archive_lf
284 284
285 285 Exclude large files from main and sub-sub repo
286 286
287 287 $ hg --config extensions.largefiles= archive -S -X '**.bin' ../archive_lf
288 288 $ find ../archive_lf | sort
289 289 ../archive_lf
290 290 ../archive_lf/.hg_archival.txt
291 291 ../archive_lf/.hgsub
292 292 ../archive_lf/.hgsubstate
293 293 ../archive_lf/main
294 294 ../archive_lf/sub1
295 295 ../archive_lf/sub1/.hgsub
296 296 ../archive_lf/sub1/.hgsubstate
297 297 ../archive_lf/sub1/sub1
298 298 ../archive_lf/sub1/sub2
299 299 ../archive_lf/sub1/sub2/folder
300 300 ../archive_lf/sub1/sub2/folder/test.txt
301 301 ../archive_lf/sub1/sub2/sub2
302 302 ../archive_lf/sub1/sub2/test.txt
303 303 $ rm -rf ../archive_lf
304 304
305 305 Exclude normal files from main and sub-sub repo
306 306
307 307 $ hg --config extensions.largefiles= archive -S -X '**.txt' ../archive_lf
308 308 $ find ../archive_lf | sort
309 309 ../archive_lf
310 310 ../archive_lf/.hgsub
311 311 ../archive_lf/.hgsubstate
312 312 ../archive_lf/large.bin
313 313 ../archive_lf/main
314 314 ../archive_lf/sub1
315 315 ../archive_lf/sub1/.hgsub
316 316 ../archive_lf/sub1/.hgsubstate
317 317 ../archive_lf/sub1/sub1
318 318 ../archive_lf/sub1/sub2
319 319 ../archive_lf/sub1/sub2/large.bin
320 320 ../archive_lf/sub1/sub2/sub2
321 321 $ rm -rf ../archive_lf
322 322
323 323 Include normal files from within a largefiles subrepo
324 324
325 325 $ hg --config extensions.largefiles= archive -S -I '**.txt' ../archive_lf
326 326 $ find ../archive_lf | sort
327 327 ../archive_lf
328 328 ../archive_lf/.hg_archival.txt
329 329 ../archive_lf/sub1
330 330 ../archive_lf/sub1/sub2
331 331 ../archive_lf/sub1/sub2/folder
332 332 ../archive_lf/sub1/sub2/folder/test.txt
333 333 ../archive_lf/sub1/sub2/test.txt
334 334 $ rm -rf ../archive_lf
335 335
336 336 Include large files from within a largefiles subrepo
337 337
338 338 $ hg --config extensions.largefiles= archive -S -I '**.bin' ../archive_lf
339 339 $ find ../archive_lf | sort
340 340 ../archive_lf
341 341 ../archive_lf/large.bin
342 342 ../archive_lf/sub1
343 343 ../archive_lf/sub1/sub2
344 344 ../archive_lf/sub1/sub2/large.bin
345 345 $ rm -rf ../archive_lf
346 346
347 347 Find an exact largefile match in a largefiles subrepo
348 348
349 349 $ hg --config extensions.largefiles= archive -S -I 'sub1/sub2/large.bin' ../archive_lf
350 350 $ find ../archive_lf | sort
351 351 ../archive_lf
352 352 ../archive_lf/sub1
353 353 ../archive_lf/sub1/sub2
354 354 ../archive_lf/sub1/sub2/large.bin
355 355 $ rm -rf ../archive_lf
356 356
357 357 The local repo enables largefiles if a largefiles repo is cloned
358 358 $ hg showconfig extensions
359 359 abort: repository requires features unknown to this Mercurial: largefiles!
360 360 (see http://mercurial.selenic.com/wiki/MissingRequirement for more information)
361 361 [255]
362 362 $ hg --config extensions.largefiles= clone -qU . ../lfclone
363 363 $ cat ../lfclone/.hg/hgrc
364 364 # example repository config (see "hg help config" for more info)
365 365 [paths]
366 366 default = $TESTTMP/cloned (glob)
367 367
368 368 # path aliases to other clones of this repo in URLs or filesystem paths
369 369 # (see "hg help config.paths" for more info)
370 370 #
371 371 # default-push = ssh://jdoe@example.net/hg/jdoes-fork
372 372 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
373 373 # my-clone = /home/jdoe/jdoes-clone
374 374
375 375 [ui]
376 376 # name and email (local to this repository, optional), e.g.
377 377 # username = Jane Doe <jdoe@example.com>
378 378
379 379 [extensions]
380 380 largefiles=
381 381
382 382 Find an exact match to a standin (should archive nothing)
383 383 $ hg --config extensions.largefiles= archive -S -I 'sub/sub2/.hglf/large.bin' ../archive_lf
384 384 $ find ../archive_lf 2> /dev/null | sort
385 385
386 386 $ cat >> $HGRCPATH <<EOF
387 387 > [extensions]
388 388 > largefiles=
389 389 > [largefiles]
390 390 > patterns=glob:**.dat
391 391 > EOF
392 392
393 393 Test forget through a deep subrepo with the largefiles extension, both a
394 394 largefile and a normal file. Then a largefile that hasn't been committed yet.
395 395 $ touch sub1/sub2/untracked.txt
396 396 $ touch sub1/sub2/large.dat
397 397 $ hg forget sub1/sub2/large.bin sub1/sub2/test.txt sub1/sub2/untracked.txt
398 398 not removing sub1/sub2/untracked.txt: file is already untracked (glob)
399 399 [1]
400 400 $ hg add --large --dry-run -v sub1/sub2/untracked.txt
401 401 adding sub1/sub2/untracked.txt as a largefile (glob)
402 402 $ hg add --large -v sub1/sub2/untracked.txt
403 403 adding sub1/sub2/untracked.txt as a largefile (glob)
404 404 $ hg add --normal -v sub1/sub2/large.dat
405 405 adding sub1/sub2/large.dat (glob)
406 406 $ hg forget -v sub1/sub2/untracked.txt
407 407 removing sub1/sub2/untracked.txt (glob)
408 408 $ hg status -S
409 409 A sub1/sub2/large.dat
410 410 R sub1/sub2/large.bin
411 411 R sub1/sub2/test.txt
412 412 ? foo/bar/abc
413 413 ? sub1/sub2/untracked.txt
414 414 ? sub1/sub2/x.txt
415 415 $ hg add sub1/sub2
416 416 $ hg ci -Sqm 'forget testing'
417 417
418 418 Test issue4330: commit a directory where only normal files have changed
419 419 $ touch foo/bar/large.dat
420 420 $ hg add --large foo/bar/large.dat
421 421 $ hg ci -m 'add foo/bar/large.dat'
422 422 $ touch a.txt
423 423 $ touch a.dat
424 424 $ hg add -v foo/bar/abc a.txt a.dat
425 425 adding a.dat as a largefile
426 426 adding a.txt
427 427 adding foo/bar/abc (glob)
428 428 $ hg ci -m 'dir commit with only normal file deltas' foo/bar
429 429 $ hg status
430 430 A a.dat
431 431 A a.txt
432 432
433 433 Test a directory commit with a changed largefile and a changed normal file
434 434 $ echo changed > foo/bar/large.dat
435 435 $ echo changed > foo/bar/abc
436 436 $ hg ci -m 'dir commit with normal and lf file deltas' foo
437 437 $ hg status
438 438 A a.dat
439 439 A a.txt
440 440
441 441 $ hg ci -m "add a.*"
442 442 $ hg mv a.dat b.dat
443 443 $ hg mv foo/bar/abc foo/bar/def
444 444 $ hg status -C
445 445 A b.dat
446 446 a.dat
447 447 A foo/bar/def
448 448 foo/bar/abc
449 449 R a.dat
450 450 R foo/bar/abc
451 451
452 452 $ hg ci -m "move large and normal"
453 453 $ hg status -C --rev '.^' --rev .
454 454 A b.dat
455 455 a.dat
456 456 A foo/bar/def
457 457 foo/bar/abc
458 458 R a.dat
459 459 R foo/bar/abc
460 460
461
462 $ echo foo > main
463 $ hg ci -m "mod parent only"
464 $ hg init sub3
465 $ echo "sub3 = sub3" >> .hgsub
466 $ echo xyz > sub3/a.txt
467 $ hg add sub3/a.txt
468 $ hg ci -Sm "add sub3"
469 committing subrepository sub3
470 $ cat .hgsub | grep -v sub3 > .hgsub1
471 $ mv .hgsub1 .hgsub
472 $ hg ci -m "remove sub3"
473
474 $ hg log -r "subrepo()" --style compact
475 0 7f491f53a367 1970-01-01 00:00 +0000 test
476 main import
477
478 1 ffe6649062fe 1970-01-01 00:00 +0000 test
479 deep nested modif should trigger a commit
480
481 2 9bb10eebee29 1970-01-01 00:00 +0000 test
482 add test.txt
483
484 3 7c64f035294f 1970-01-01 00:00 +0000 test
485 add large files
486
487 4 f734a59e2e35 1970-01-01 00:00 +0000 test
488 forget testing
489
490 11 9685a22af5db 1970-01-01 00:00 +0000 test
491 add sub3
492
493 12[tip] 2e0485b475b9 1970-01-01 00:00 +0000 test
494 remove sub3
495
496 $ hg log -r "subrepo('sub3')" --style compact
497 11 9685a22af5db 1970-01-01 00:00 +0000 test
498 add sub3
499
500 12[tip] 2e0485b475b9 1970-01-01 00:00 +0000 test
501 remove sub3
502
503 $ hg log -r "subrepo('bogus')" --style compact
504
505
506 Test .hgsubstate in the R state
507
508 $ hg rm .hgsub .hgsubstate
509 $ hg ci -m 'trash subrepo tracking'
510
511 $ hg log -r "subrepo('re:sub\d+')" --style compact
512 0 7f491f53a367 1970-01-01 00:00 +0000 test
513 main import
514
515 1 ffe6649062fe 1970-01-01 00:00 +0000 test
516 deep nested modif should trigger a commit
517
518 2 9bb10eebee29 1970-01-01 00:00 +0000 test
519 add test.txt
520
521 3 7c64f035294f 1970-01-01 00:00 +0000 test
522 add large files
523
524 4 f734a59e2e35 1970-01-01 00:00 +0000 test
525 forget testing
526
527 11 9685a22af5db 1970-01-01 00:00 +0000 test
528 add sub3
529
530 12 2e0485b475b9 1970-01-01 00:00 +0000 test
531 remove sub3
532
533 13[tip] a68b2c361653 1970-01-01 00:00 +0000 test
534 trash subrepo tracking
535
536
537 Restore the trashed subrepo tracking
538
539 $ hg rollback -q
540 $ hg update -Cq .
541
461 542 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now