##// END OF EJS Templates
dagop: extend filectxancestors() to walk multiple files
Yuya Nishihara -
r35277:205c3c6c default
parent child Browse files
Show More
@@ -1,541 +1,543 b''
1 1 # dagop.py - graph ancestry and topology algorithm for revset
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11
12 12 from . import (
13 13 error,
14 14 mdiff,
15 15 node,
16 16 patch,
17 17 smartset,
18 18 )
19 19
20 20 baseset = smartset.baseset
21 21 generatorset = smartset.generatorset
22 22
23 23 # possible maximum depth between null and wdir()
24 24 _maxlogdepth = 0x80000000
25 25
26 26 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
27 27 """Walk DAG using 'pfunc' from the given 'revs' nodes
28 28
29 29 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
30 30 if 'reverse' is True/False respectively.
31 31
32 32 Scan ends at the stopdepth (exlusive) if specified. Revisions found
33 33 earlier than the startdepth are omitted.
34 34 """
35 35 if startdepth is None:
36 36 startdepth = 0
37 37 if stopdepth is None:
38 38 stopdepth = _maxlogdepth
39 39 if stopdepth == 0:
40 40 return
41 41 if stopdepth < 0:
42 42 raise error.ProgrammingError('negative stopdepth')
43 43 if reverse:
44 44 heapsign = -1 # max heap
45 45 else:
46 46 heapsign = +1 # min heap
47 47
48 48 # load input revs lazily to heap so earlier revisions can be yielded
49 49 # without fully computing the input revs
50 50 revs.sort(reverse)
51 51 irevs = iter(revs)
52 52 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
53 53
54 54 inputrev = next(irevs, None)
55 55 if inputrev is not None:
56 56 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
57 57
58 58 lastrev = None
59 59 while pendingheap:
60 60 currev, curdepth = heapq.heappop(pendingheap)
61 61 currev = heapsign * currev
62 62 if currev == inputrev:
63 63 inputrev = next(irevs, None)
64 64 if inputrev is not None:
65 65 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
66 66 # rescan parents until curdepth >= startdepth because queued entries
67 67 # of the same revision are iterated from the lowest depth
68 68 foundnew = (currev != lastrev)
69 69 if foundnew and curdepth >= startdepth:
70 70 lastrev = currev
71 71 yield currev
72 72 pdepth = curdepth + 1
73 73 if foundnew and pdepth < stopdepth:
74 74 for prev in pfunc(currev):
75 75 if prev != node.nullrev:
76 76 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
77 77
78 def filectxancestors(fctx, followfirst=False):
79 """Like filectx.ancestors(), but includes the given fctx itself"""
78 def filectxancestors(fctxs, followfirst=False):
79 """Like filectx.ancestors(), but can walk from multiple files/revisions,
80 and includes the given fctxs themselves"""
80 81 visit = {}
81 82 def addvisit(fctx):
82 83 rev = fctx.rev()
83 84 if rev not in visit:
84 85 visit[rev] = set()
85 86 visit[rev].add(fctx)
86 87
87 88 if followfirst:
88 89 cut = 1
89 90 else:
90 91 cut = None
91 92
92 addvisit(fctx)
93 for c in fctxs:
94 addvisit(c)
93 95 while visit:
94 96 rev = max(visit)
95 97 c = visit[rev].pop()
96 98 if not visit[rev]:
97 99 del visit[rev]
98 100 yield c
99 101 for parent in c.parents()[:cut]:
100 102 addvisit(parent)
101 103
102 104 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
103 105 if followfirst:
104 106 cut = 1
105 107 else:
106 108 cut = None
107 109 cl = repo.changelog
108 110 def plainpfunc(rev):
109 111 try:
110 112 return cl.parentrevs(rev)[:cut]
111 113 except error.WdirUnsupported:
112 114 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
113 115 if cutfunc is None:
114 116 pfunc = plainpfunc
115 117 else:
116 118 pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
117 119 revs = revs.filter(lambda rev: not cutfunc(rev))
118 120 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
119 121
120 122 def revancestors(repo, revs, followfirst=False, startdepth=None,
121 123 stopdepth=None, cutfunc=None):
122 124 """Like revlog.ancestors(), but supports additional options, includes
123 125 the given revs themselves, and returns a smartset
124 126
125 127 Scan ends at the stopdepth (exlusive) if specified. Revisions found
126 128 earlier than the startdepth are omitted.
127 129
128 130 If cutfunc is provided, it will be used to cut the traversal of the DAG.
129 131 When cutfunc(X) returns True, the DAG traversal stops - revision X and
130 132 X's ancestors in the traversal path will be skipped. This could be an
131 133 optimization sometimes.
132 134
133 135 Note: if Y is an ancestor of X, cutfunc(X) returning True does not
134 136 necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
135 137 return True in this case. For example,
136 138
137 139 D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
138 140 |\ # will include "A", because the path D -> C -> A was not cut.
139 141 B C # If "B" gets cut, "A" might want to be cut too.
140 142 |/
141 143 A
142 144 """
143 145 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth,
144 146 cutfunc)
145 147 return generatorset(gen, iterasc=False)
146 148
147 149 def _genrevdescendants(repo, revs, followfirst):
148 150 if followfirst:
149 151 cut = 1
150 152 else:
151 153 cut = None
152 154
153 155 cl = repo.changelog
154 156 first = revs.min()
155 157 nullrev = node.nullrev
156 158 if first == nullrev:
157 159 # Are there nodes with a null first parent and a non-null
158 160 # second one? Maybe. Do we care? Probably not.
159 161 yield first
160 162 for i in cl:
161 163 yield i
162 164 else:
163 165 seen = set(revs)
164 166 for i in cl.revs(first):
165 167 if i in seen:
166 168 yield i
167 169 continue
168 170 for x in cl.parentrevs(i)[:cut]:
169 171 if x != nullrev and x in seen:
170 172 seen.add(i)
171 173 yield i
172 174 break
173 175
174 176 def _builddescendantsmap(repo, startrev, followfirst):
175 177 """Build map of 'rev -> child revs', offset from startrev"""
176 178 cl = repo.changelog
177 179 nullrev = node.nullrev
178 180 descmap = [[] for _rev in xrange(startrev, len(cl))]
179 181 for currev in cl.revs(startrev + 1):
180 182 p1rev, p2rev = cl.parentrevs(currev)
181 183 if p1rev >= startrev:
182 184 descmap[p1rev - startrev].append(currev)
183 185 if not followfirst and p2rev != nullrev and p2rev >= startrev:
184 186 descmap[p2rev - startrev].append(currev)
185 187 return descmap
186 188
187 189 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
188 190 startrev = revs.min()
189 191 descmap = _builddescendantsmap(repo, startrev, followfirst)
190 192 def pfunc(rev):
191 193 return descmap[rev - startrev]
192 194 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
193 195
194 196 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
195 197 """Like revlog.descendants() but supports additional options, includes
196 198 the given revs themselves, and returns a smartset
197 199
198 200 Scan ends at the stopdepth (exlusive) if specified. Revisions found
199 201 earlier than the startdepth are omitted.
200 202 """
201 203 if startdepth is None and stopdepth is None:
202 204 gen = _genrevdescendants(repo, revs, followfirst)
203 205 else:
204 206 gen = _genrevdescendantsofdepth(repo, revs, followfirst,
205 207 startdepth, stopdepth)
206 208 return generatorset(gen, iterasc=True)
207 209
208 210 def _reachablerootspure(repo, minroot, roots, heads, includepath):
209 211 """return (heads(::<roots> and ::<heads>))
210 212
211 213 If includepath is True, return (<roots>::<heads>)."""
212 214 if not roots:
213 215 return []
214 216 parentrevs = repo.changelog.parentrevs
215 217 roots = set(roots)
216 218 visit = list(heads)
217 219 reachable = set()
218 220 seen = {}
219 221 # prefetch all the things! (because python is slow)
220 222 reached = reachable.add
221 223 dovisit = visit.append
222 224 nextvisit = visit.pop
223 225 # open-code the post-order traversal due to the tiny size of
224 226 # sys.getrecursionlimit()
225 227 while visit:
226 228 rev = nextvisit()
227 229 if rev in roots:
228 230 reached(rev)
229 231 if not includepath:
230 232 continue
231 233 parents = parentrevs(rev)
232 234 seen[rev] = parents
233 235 for parent in parents:
234 236 if parent >= minroot and parent not in seen:
235 237 dovisit(parent)
236 238 if not reachable:
237 239 return baseset()
238 240 if not includepath:
239 241 return reachable
240 242 for rev in sorted(seen):
241 243 for parent in seen[rev]:
242 244 if parent in reachable:
243 245 reached(rev)
244 246 return reachable
245 247
246 248 def reachableroots(repo, roots, heads, includepath=False):
247 249 """return (heads(::<roots> and ::<heads>))
248 250
249 251 If includepath is True, return (<roots>::<heads>)."""
250 252 if not roots:
251 253 return baseset()
252 254 minroot = roots.min()
253 255 roots = list(roots)
254 256 heads = list(heads)
255 257 try:
256 258 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
257 259 except AttributeError:
258 260 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
259 261 revs = baseset(revs)
260 262 revs.sort()
261 263 return revs
262 264
263 265 def _changesrange(fctx1, fctx2, linerange2, diffopts):
264 266 """Return `(diffinrange, linerange1)` where `diffinrange` is True
265 267 if diff from fctx2 to fctx1 has changes in linerange2 and
266 268 `linerange1` is the new line range for fctx1.
267 269 """
268 270 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
269 271 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
270 272 diffinrange = any(stype == '!' for _, stype in filteredblocks)
271 273 return diffinrange, linerange1
272 274
273 275 def blockancestors(fctx, fromline, toline, followfirst=False):
274 276 """Yield ancestors of `fctx` with respect to the block of lines within
275 277 `fromline`-`toline` range.
276 278 """
277 279 diffopts = patch.diffopts(fctx._repo.ui)
278 280 fctx = fctx.introfilectx()
279 281 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
280 282 while visit:
281 283 c, linerange2 = visit.pop(max(visit))
282 284 pl = c.parents()
283 285 if followfirst:
284 286 pl = pl[:1]
285 287 if not pl:
286 288 # The block originates from the initial revision.
287 289 yield c, linerange2
288 290 continue
289 291 inrange = False
290 292 for p in pl:
291 293 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
292 294 inrange = inrange or inrangep
293 295 if linerange1[0] == linerange1[1]:
294 296 # Parent's linerange is empty, meaning that the block got
295 297 # introduced in this revision; no need to go futher in this
296 298 # branch.
297 299 continue
298 300 # Set _descendantrev with 'c' (a known descendant) so that, when
299 301 # _adjustlinkrev is called for 'p', it receives this descendant
300 302 # (as srcrev) instead possibly topmost introrev.
301 303 p._descendantrev = c.rev()
302 304 visit[p.linkrev(), p.filenode()] = p, linerange1
303 305 if inrange:
304 306 yield c, linerange2
305 307
306 308 def blockdescendants(fctx, fromline, toline):
307 309 """Yield descendants of `fctx` with respect to the block of lines within
308 310 `fromline`-`toline` range.
309 311 """
310 312 # First possibly yield 'fctx' if it has changes in range with respect to
311 313 # its parents.
312 314 try:
313 315 c, linerange1 = next(blockancestors(fctx, fromline, toline))
314 316 except StopIteration:
315 317 pass
316 318 else:
317 319 if c == fctx:
318 320 yield c, linerange1
319 321
320 322 diffopts = patch.diffopts(fctx._repo.ui)
321 323 fl = fctx.filelog()
322 324 seen = {fctx.filerev(): (fctx, (fromline, toline))}
323 325 for i in fl.descendants([fctx.filerev()]):
324 326 c = fctx.filectx(i)
325 327 inrange = False
326 328 for x in fl.parentrevs(i):
327 329 try:
328 330 p, linerange2 = seen[x]
329 331 except KeyError:
330 332 # nullrev or other branch
331 333 continue
332 334 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
333 335 inrange = inrange or inrangep
334 336 # If revision 'i' has been seen (it's a merge) and the line range
335 337 # previously computed differs from the one we just got, we take the
336 338 # surrounding interval. This is conservative but avoids loosing
337 339 # information.
338 340 if i in seen and seen[i][1] != linerange1:
339 341 lbs, ubs = zip(linerange1, seen[i][1])
340 342 linerange1 = min(lbs), max(ubs)
341 343 seen[i] = c, linerange1
342 344 if inrange:
343 345 yield c, linerange1
344 346
345 347 def toposort(revs, parentsfunc, firstbranch=()):
346 348 """Yield revisions from heads to roots one (topo) branch at a time.
347 349
348 350 This function aims to be used by a graph generator that wishes to minimize
349 351 the number of parallel branches and their interleaving.
350 352
351 353 Example iteration order (numbers show the "true" order in a changelog):
352 354
353 355 o 4
354 356 |
355 357 o 1
356 358 |
357 359 | o 3
358 360 | |
359 361 | o 2
360 362 |/
361 363 o 0
362 364
363 365 Note that the ancestors of merges are understood by the current
364 366 algorithm to be on the same branch. This means no reordering will
365 367 occur behind a merge.
366 368 """
367 369
368 370 ### Quick summary of the algorithm
369 371 #
370 372 # This function is based around a "retention" principle. We keep revisions
371 373 # in memory until we are ready to emit a whole branch that immediately
372 374 # "merges" into an existing one. This reduces the number of parallel
373 375 # branches with interleaved revisions.
374 376 #
375 377 # During iteration revs are split into two groups:
376 378 # A) revision already emitted
377 379 # B) revision in "retention". They are stored as different subgroups.
378 380 #
379 381 # for each REV, we do the following logic:
380 382 #
381 383 # 1) if REV is a parent of (A), we will emit it. If there is a
382 384 # retention group ((B) above) that is blocked on REV being
383 385 # available, we emit all the revisions out of that retention
384 386 # group first.
385 387 #
386 388 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
387 389 # available, if such subgroup exist, we add REV to it and the subgroup is
388 390 # now awaiting for REV.parents() to be available.
389 391 #
390 392 # 3) finally if no such group existed in (B), we create a new subgroup.
391 393 #
392 394 #
393 395 # To bootstrap the algorithm, we emit the tipmost revision (which
394 396 # puts it in group (A) from above).
395 397
396 398 revs.sort(reverse=True)
397 399
398 400 # Set of parents of revision that have been emitted. They can be considered
399 401 # unblocked as the graph generator is already aware of them so there is no
400 402 # need to delay the revisions that reference them.
401 403 #
402 404 # If someone wants to prioritize a branch over the others, pre-filling this
403 405 # set will force all other branches to wait until this branch is ready to be
404 406 # emitted.
405 407 unblocked = set(firstbranch)
406 408
407 409 # list of groups waiting to be displayed, each group is defined by:
408 410 #
409 411 # (revs: lists of revs waiting to be displayed,
410 412 # blocked: set of that cannot be displayed before those in 'revs')
411 413 #
412 414 # The second value ('blocked') correspond to parents of any revision in the
413 415 # group ('revs') that is not itself contained in the group. The main idea
414 416 # of this algorithm is to delay as much as possible the emission of any
415 417 # revision. This means waiting for the moment we are about to display
416 418 # these parents to display the revs in a group.
417 419 #
418 420 # This first implementation is smart until it encounters a merge: it will
419 421 # emit revs as soon as any parent is about to be emitted and can grow an
420 422 # arbitrary number of revs in 'blocked'. In practice this mean we properly
421 423 # retains new branches but gives up on any special ordering for ancestors
422 424 # of merges. The implementation can be improved to handle this better.
423 425 #
424 426 # The first subgroup is special. It corresponds to all the revision that
425 427 # were already emitted. The 'revs' lists is expected to be empty and the
426 428 # 'blocked' set contains the parents revisions of already emitted revision.
427 429 #
428 430 # You could pre-seed the <parents> set of groups[0] to a specific
429 431 # changesets to select what the first emitted branch should be.
430 432 groups = [([], unblocked)]
431 433 pendingheap = []
432 434 pendingset = set()
433 435
434 436 heapq.heapify(pendingheap)
435 437 heappop = heapq.heappop
436 438 heappush = heapq.heappush
437 439 for currentrev in revs:
438 440 # Heap works with smallest element, we want highest so we invert
439 441 if currentrev not in pendingset:
440 442 heappush(pendingheap, -currentrev)
441 443 pendingset.add(currentrev)
442 444 # iterates on pending rev until after the current rev have been
443 445 # processed.
444 446 rev = None
445 447 while rev != currentrev:
446 448 rev = -heappop(pendingheap)
447 449 pendingset.remove(rev)
448 450
449 451 # Seek for a subgroup blocked, waiting for the current revision.
450 452 matching = [i for i, g in enumerate(groups) if rev in g[1]]
451 453
452 454 if matching:
453 455 # The main idea is to gather together all sets that are blocked
454 456 # on the same revision.
455 457 #
456 458 # Groups are merged when a common blocking ancestor is
457 459 # observed. For example, given two groups:
458 460 #
459 461 # revs [5, 4] waiting for 1
460 462 # revs [3, 2] waiting for 1
461 463 #
462 464 # These two groups will be merged when we process
463 465 # 1. In theory, we could have merged the groups when
464 466 # we added 2 to the group it is now in (we could have
465 467 # noticed the groups were both blocked on 1 then), but
466 468 # the way it works now makes the algorithm simpler.
467 469 #
468 470 # We also always keep the oldest subgroup first. We can
469 471 # probably improve the behavior by having the longest set
470 472 # first. That way, graph algorithms could minimise the length
471 473 # of parallel lines their drawing. This is currently not done.
472 474 targetidx = matching.pop(0)
473 475 trevs, tparents = groups[targetidx]
474 476 for i in matching:
475 477 gr = groups[i]
476 478 trevs.extend(gr[0])
477 479 tparents |= gr[1]
478 480 # delete all merged subgroups (except the one we kept)
479 481 # (starting from the last subgroup for performance and
480 482 # sanity reasons)
481 483 for i in reversed(matching):
482 484 del groups[i]
483 485 else:
484 486 # This is a new head. We create a new subgroup for it.
485 487 targetidx = len(groups)
486 488 groups.append(([], {rev}))
487 489
488 490 gr = groups[targetidx]
489 491
490 492 # We now add the current nodes to this subgroups. This is done
491 493 # after the subgroup merging because all elements from a subgroup
492 494 # that relied on this rev must precede it.
493 495 #
494 496 # we also update the <parents> set to include the parents of the
495 497 # new nodes.
496 498 if rev == currentrev: # only display stuff in rev
497 499 gr[0].append(rev)
498 500 gr[1].remove(rev)
499 501 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
500 502 gr[1].update(parents)
501 503 for p in parents:
502 504 if p not in pendingset:
503 505 pendingset.add(p)
504 506 heappush(pendingheap, -p)
505 507
506 508 # Look for a subgroup to display
507 509 #
508 510 # When unblocked is empty (if clause), we were not waiting for any
509 511 # revisions during the first iteration (if no priority was given) or
510 512 # if we emitted a whole disconnected set of the graph (reached a
511 513 # root). In that case we arbitrarily take the oldest known
512 514 # subgroup. The heuristic could probably be better.
513 515 #
514 516 # Otherwise (elif clause) if the subgroup is blocked on
515 517 # a revision we just emitted, we can safely emit it as
516 518 # well.
517 519 if not unblocked:
518 520 if len(groups) > 1: # display other subset
519 521 targetidx = 1
520 522 gr = groups[1]
521 523 elif not gr[1] & unblocked:
522 524 gr = None
523 525
524 526 if gr is not None:
525 527 # update the set of awaited revisions with the one from the
526 528 # subgroup
527 529 unblocked |= gr[1]
528 530 # output all revisions in the subgroup
529 531 for r in gr[0]:
530 532 yield r
531 533 # delete the subgroup that you just output
532 534 # unless it is groups[0] in which case you just empty it.
533 535 if targetidx:
534 536 del groups[targetidx]
535 537 else:
536 538 gr[0][:] = []
537 539 # Check if we have some subgroup waiting for revisions we are not going to
538 540 # iterate over
539 541 for g in groups:
540 542 for r in g[0]:
541 543 yield r
@@ -1,2224 +1,2222 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 dagop,
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 obsutil,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 revsetlang,
28 28 scmutil,
29 29 smartset,
30 30 util,
31 31 )
32 32
33 33 # helpers for processing parsed tree
34 34 getsymbol = revsetlang.getsymbol
35 35 getstring = revsetlang.getstring
36 36 getinteger = revsetlang.getinteger
37 37 getboolean = revsetlang.getboolean
38 38 getlist = revsetlang.getlist
39 39 getrange = revsetlang.getrange
40 40 getargs = revsetlang.getargs
41 41 getargsdict = revsetlang.getargsdict
42 42
43 43 baseset = smartset.baseset
44 44 generatorset = smartset.generatorset
45 45 spanset = smartset.spanset
46 46 fullreposet = smartset.fullreposet
47 47
48 48 # Constants for ordering requirement, used in getset():
49 49 #
50 50 # If 'define', any nested functions and operations MAY change the ordering of
51 51 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
52 52 # it). If 'follow', any nested functions and operations MUST take the ordering
53 53 # specified by the first operand to the '&' operator.
54 54 #
55 55 # For instance,
56 56 #
57 57 # X & (Y | Z)
58 58 # ^ ^^^^^^^
59 59 # | follow
60 60 # define
61 61 #
62 62 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
63 63 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
64 64 #
65 65 # 'any' means the order doesn't matter. For instance,
66 66 #
67 67 # (X & !Y) | ancestors(Z)
68 68 # ^ ^
69 69 # any any
70 70 #
71 71 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
72 72 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
73 73 # since 'ancestors' does not care about the order of its argument.
74 74 #
75 75 # Currently, most revsets do not care about the order, so 'define' is
76 76 # equivalent to 'follow' for them, and the resulting order is based on the
77 77 # 'subset' parameter passed down to them:
78 78 #
79 79 # m = revset.match(...)
80 80 # m(repo, subset, order=defineorder)
81 81 # ^^^^^^
82 82 # For most revsets, 'define' means using the order this subset provides
83 83 #
84 84 # There are a few revsets that always redefine the order if 'define' is
85 85 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
86 86 anyorder = 'any' # don't care the order, could be even random-shuffled
87 87 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
88 88 followorder = 'follow' # MUST follow the current order
89 89
90 90 # helpers
91 91
92 92 def getset(repo, subset, x, order=defineorder):
93 93 if not x:
94 94 raise error.ParseError(_("missing argument"))
95 95 return methods[x[0]](repo, subset, *x[1:], order=order)
96 96
97 97 def _getrevsource(repo, r):
98 98 extra = repo[r].extra()
99 99 for label in ('source', 'transplant_source', 'rebase_source'):
100 100 if label in extra:
101 101 try:
102 102 return repo[extra[label]].rev()
103 103 except error.RepoLookupError:
104 104 pass
105 105 return None
106 106
107 107 # operator methods
108 108
109 109 def stringset(repo, subset, x, order):
110 110 x = scmutil.intrev(repo[x])
111 111 if (x in subset
112 112 or x == node.nullrev and isinstance(subset, fullreposet)):
113 113 return baseset([x])
114 114 return baseset()
115 115
116 116 def rangeset(repo, subset, x, y, order):
117 117 m = getset(repo, fullreposet(repo), x)
118 118 n = getset(repo, fullreposet(repo), y)
119 119
120 120 if not m or not n:
121 121 return baseset()
122 122 return _makerangeset(repo, subset, m.first(), n.last(), order)
123 123
124 124 def rangeall(repo, subset, x, order):
125 125 assert x is None
126 126 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
127 127
128 128 def rangepre(repo, subset, y, order):
129 129 # ':y' can't be rewritten to '0:y' since '0' may be hidden
130 130 n = getset(repo, fullreposet(repo), y)
131 131 if not n:
132 132 return baseset()
133 133 return _makerangeset(repo, subset, 0, n.last(), order)
134 134
135 135 def rangepost(repo, subset, x, order):
136 136 m = getset(repo, fullreposet(repo), x)
137 137 if not m:
138 138 return baseset()
139 139 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
140 140
141 141 def _makerangeset(repo, subset, m, n, order):
142 142 if m == n:
143 143 r = baseset([m])
144 144 elif n == node.wdirrev:
145 145 r = spanset(repo, m, len(repo)) + baseset([n])
146 146 elif m == node.wdirrev:
147 147 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
148 148 elif m < n:
149 149 r = spanset(repo, m, n + 1)
150 150 else:
151 151 r = spanset(repo, m, n - 1)
152 152
153 153 if order == defineorder:
154 154 return r & subset
155 155 else:
156 156 # carrying the sorting over when possible would be more efficient
157 157 return subset & r
158 158
159 159 def dagrange(repo, subset, x, y, order):
160 160 r = fullreposet(repo)
161 161 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
162 162 includepath=True)
163 163 return subset & xs
164 164
165 165 def andset(repo, subset, x, y, order):
166 166 if order == anyorder:
167 167 yorder = anyorder
168 168 else:
169 169 yorder = followorder
170 170 return getset(repo, getset(repo, subset, x, order), y, yorder)
171 171
172 172 def andsmallyset(repo, subset, x, y, order):
173 173 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
174 174 if order == anyorder:
175 175 yorder = anyorder
176 176 else:
177 177 yorder = followorder
178 178 return getset(repo, getset(repo, subset, y, yorder), x, order)
179 179
180 180 def differenceset(repo, subset, x, y, order):
181 181 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
182 182
183 183 def _orsetlist(repo, subset, xs, order):
184 184 assert xs
185 185 if len(xs) == 1:
186 186 return getset(repo, subset, xs[0], order)
187 187 p = len(xs) // 2
188 188 a = _orsetlist(repo, subset, xs[:p], order)
189 189 b = _orsetlist(repo, subset, xs[p:], order)
190 190 return a + b
191 191
192 192 def orset(repo, subset, x, order):
193 193 xs = getlist(x)
194 194 if order == followorder:
195 195 # slow path to take the subset order
196 196 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
197 197 else:
198 198 return _orsetlist(repo, subset, xs, order)
199 199
200 200 def notset(repo, subset, x, order):
201 201 return subset - getset(repo, subset, x, anyorder)
202 202
203 203 def relationset(repo, subset, x, y, order):
204 204 raise error.ParseError(_("can't use a relation in this context"))
205 205
206 206 def relsubscriptset(repo, subset, x, y, z, order):
207 207 # this is pretty basic implementation of 'x#y[z]' operator, still
208 208 # experimental so undocumented. see the wiki for further ideas.
209 209 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
210 210 rel = getsymbol(y)
211 211 n = getinteger(z, _("relation subscript must be an integer"))
212 212
213 213 # TODO: perhaps this should be a table of relation functions
214 214 if rel in ('g', 'generations'):
215 215 # TODO: support range, rewrite tests, and drop startdepth argument
216 216 # from ancestors() and descendants() predicates
217 217 if n <= 0:
218 218 n = -n
219 219 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
220 220 else:
221 221 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
222 222
223 223 raise error.UnknownIdentifier(rel, ['generations'])
224 224
225 225 def subscriptset(repo, subset, x, y, order):
226 226 raise error.ParseError(_("can't use a subscript in this context"))
227 227
228 228 def listset(repo, subset, *xs, **opts):
229 229 raise error.ParseError(_("can't use a list in this context"),
230 230 hint=_('see hg help "revsets.x or y"'))
231 231
232 232 def keyvaluepair(repo, subset, k, v, order):
233 233 raise error.ParseError(_("can't use a key-value pair in this context"))
234 234
235 235 def func(repo, subset, a, b, order):
236 236 f = getsymbol(a)
237 237 if f in symbols:
238 238 func = symbols[f]
239 239 if getattr(func, '_takeorder', False):
240 240 return func(repo, subset, b, order)
241 241 return func(repo, subset, b)
242 242
243 243 keep = lambda fn: getattr(fn, '__doc__', None) is not None
244 244
245 245 syms = [s for (s, fn) in symbols.items() if keep(fn)]
246 246 raise error.UnknownIdentifier(f, syms)
247 247
248 248 # functions
249 249
250 250 # symbols are callables like:
251 251 # fn(repo, subset, x)
252 252 # with:
253 253 # repo - current repository instance
254 254 # subset - of revisions to be examined
255 255 # x - argument in tree form
256 256 symbols = revsetlang.symbols
257 257
258 258 # symbols which can't be used for a DoS attack for any given input
259 259 # (e.g. those which accept regexes as plain strings shouldn't be included)
260 260 # functions that just return a lot of changesets (like all) don't count here
261 261 safesymbols = set()
262 262
263 263 predicate = registrar.revsetpredicate()
264 264
265 265 @predicate('_destupdate')
266 266 def _destupdate(repo, subset, x):
267 267 # experimental revset for update destination
268 268 args = getargsdict(x, 'limit', 'clean')
269 269 return subset & baseset([destutil.destupdate(repo, **args)[0]])
270 270
271 271 @predicate('_destmerge')
272 272 def _destmerge(repo, subset, x):
273 273 # experimental revset for merge destination
274 274 sourceset = None
275 275 if x is not None:
276 276 sourceset = getset(repo, fullreposet(repo), x)
277 277 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
278 278
279 279 @predicate('adds(pattern)', safe=True, weight=30)
280 280 def adds(repo, subset, x):
281 281 """Changesets that add a file matching pattern.
282 282
283 283 The pattern without explicit kind like ``glob:`` is expected to be
284 284 relative to the current directory and match against a file or a
285 285 directory.
286 286 """
287 287 # i18n: "adds" is a keyword
288 288 pat = getstring(x, _("adds requires a pattern"))
289 289 return checkstatus(repo, subset, pat, 1)
290 290
291 291 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
292 292 def ancestor(repo, subset, x):
293 293 """A greatest common ancestor of the changesets.
294 294
295 295 Accepts 0 or more changesets.
296 296 Will return empty list when passed no args.
297 297 Greatest common ancestor of a single changeset is that changeset.
298 298 """
299 299 # i18n: "ancestor" is a keyword
300 300 l = getlist(x)
301 301 rl = fullreposet(repo)
302 302 anc = None
303 303
304 304 # (getset(repo, rl, i) for i in l) generates a list of lists
305 305 for revs in (getset(repo, rl, i) for i in l):
306 306 for r in revs:
307 307 if anc is None:
308 308 anc = repo[r]
309 309 else:
310 310 anc = anc.ancestor(repo[r])
311 311
312 312 if anc is not None and anc.rev() in subset:
313 313 return baseset([anc.rev()])
314 314 return baseset()
315 315
316 316 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
317 317 stopdepth=None):
318 318 heads = getset(repo, fullreposet(repo), x)
319 319 if not heads:
320 320 return baseset()
321 321 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
322 322 return subset & s
323 323
324 324 @predicate('ancestors(set[, depth])', safe=True)
325 325 def ancestors(repo, subset, x):
326 326 """Changesets that are ancestors of changesets in set, including the
327 327 given changesets themselves.
328 328
329 329 If depth is specified, the result only includes changesets up to
330 330 the specified generation.
331 331 """
332 332 # startdepth is for internal use only until we can decide the UI
333 333 args = getargsdict(x, 'ancestors', 'set depth startdepth')
334 334 if 'set' not in args:
335 335 # i18n: "ancestors" is a keyword
336 336 raise error.ParseError(_('ancestors takes at least 1 argument'))
337 337 startdepth = stopdepth = None
338 338 if 'startdepth' in args:
339 339 n = getinteger(args['startdepth'],
340 340 "ancestors expects an integer startdepth")
341 341 if n < 0:
342 342 raise error.ParseError("negative startdepth")
343 343 startdepth = n
344 344 if 'depth' in args:
345 345 # i18n: "ancestors" is a keyword
346 346 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
347 347 if n < 0:
348 348 raise error.ParseError(_("negative depth"))
349 349 stopdepth = n + 1
350 350 return _ancestors(repo, subset, args['set'],
351 351 startdepth=startdepth, stopdepth=stopdepth)
352 352
353 353 @predicate('_firstancestors', safe=True)
354 354 def _firstancestors(repo, subset, x):
355 355 # ``_firstancestors(set)``
356 356 # Like ``ancestors(set)`` but follows only the first parents.
357 357 return _ancestors(repo, subset, x, followfirst=True)
358 358
359 359 def _childrenspec(repo, subset, x, n, order):
360 360 """Changesets that are the Nth child of a changeset
361 361 in set.
362 362 """
363 363 cs = set()
364 364 for r in getset(repo, fullreposet(repo), x):
365 365 for i in range(n):
366 366 c = repo[r].children()
367 367 if len(c) == 0:
368 368 break
369 369 if len(c) > 1:
370 370 raise error.RepoLookupError(
371 371 _("revision in set has more than one child"))
372 372 r = c[0].rev()
373 373 else:
374 374 cs.add(r)
375 375 return subset & cs
376 376
377 377 def ancestorspec(repo, subset, x, n, order):
378 378 """``set~n``
379 379 Changesets that are the Nth ancestor (first parents only) of a changeset
380 380 in set.
381 381 """
382 382 n = getinteger(n, _("~ expects a number"))
383 383 if n < 0:
384 384 # children lookup
385 385 return _childrenspec(repo, subset, x, -n, order)
386 386 ps = set()
387 387 cl = repo.changelog
388 388 for r in getset(repo, fullreposet(repo), x):
389 389 for i in range(n):
390 390 try:
391 391 r = cl.parentrevs(r)[0]
392 392 except error.WdirUnsupported:
393 393 r = repo[r].parents()[0].rev()
394 394 ps.add(r)
395 395 return subset & ps
396 396
397 397 @predicate('author(string)', safe=True, weight=10)
398 398 def author(repo, subset, x):
399 399 """Alias for ``user(string)``.
400 400 """
401 401 # i18n: "author" is a keyword
402 402 n = getstring(x, _("author requires a string"))
403 403 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
404 404 return subset.filter(lambda x: matcher(repo[x].user()),
405 405 condrepr=('<user %r>', n))
406 406
407 407 @predicate('bisect(string)', safe=True)
408 408 def bisect(repo, subset, x):
409 409 """Changesets marked in the specified bisect status:
410 410
411 411 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
412 412 - ``goods``, ``bads`` : csets topologically good/bad
413 413 - ``range`` : csets taking part in the bisection
414 414 - ``pruned`` : csets that are goods, bads or skipped
415 415 - ``untested`` : csets whose fate is yet unknown
416 416 - ``ignored`` : csets ignored due to DAG topology
417 417 - ``current`` : the cset currently being bisected
418 418 """
419 419 # i18n: "bisect" is a keyword
420 420 status = getstring(x, _("bisect requires a string")).lower()
421 421 state = set(hbisect.get(repo, status))
422 422 return subset & state
423 423
424 424 # Backward-compatibility
425 425 # - no help entry so that we do not advertise it any more
426 426 @predicate('bisected', safe=True)
427 427 def bisected(repo, subset, x):
428 428 return bisect(repo, subset, x)
429 429
430 430 @predicate('bookmark([name])', safe=True)
431 431 def bookmark(repo, subset, x):
432 432 """The named bookmark or all bookmarks.
433 433
434 434 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
435 435 """
436 436 # i18n: "bookmark" is a keyword
437 437 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
438 438 if args:
439 439 bm = getstring(args[0],
440 440 # i18n: "bookmark" is a keyword
441 441 _('the argument to bookmark must be a string'))
442 442 kind, pattern, matcher = util.stringmatcher(bm)
443 443 bms = set()
444 444 if kind == 'literal':
445 445 bmrev = repo._bookmarks.get(pattern, None)
446 446 if not bmrev:
447 447 raise error.RepoLookupError(_("bookmark '%s' does not exist")
448 448 % pattern)
449 449 bms.add(repo[bmrev].rev())
450 450 else:
451 451 matchrevs = set()
452 452 for name, bmrev in repo._bookmarks.iteritems():
453 453 if matcher(name):
454 454 matchrevs.add(bmrev)
455 455 if not matchrevs:
456 456 raise error.RepoLookupError(_("no bookmarks exist"
457 457 " that match '%s'") % pattern)
458 458 for bmrev in matchrevs:
459 459 bms.add(repo[bmrev].rev())
460 460 else:
461 461 bms = {repo[r].rev() for r in repo._bookmarks.values()}
462 462 bms -= {node.nullrev}
463 463 return subset & bms
464 464
465 465 @predicate('branch(string or set)', safe=True, weight=10)
466 466 def branch(repo, subset, x):
467 467 """
468 468 All changesets belonging to the given branch or the branches of the given
469 469 changesets.
470 470
471 471 Pattern matching is supported for `string`. See
472 472 :hg:`help revisions.patterns`.
473 473 """
474 474 getbi = repo.revbranchcache().branchinfo
475 475 def getbranch(r):
476 476 try:
477 477 return getbi(r)[0]
478 478 except error.WdirUnsupported:
479 479 return repo[r].branch()
480 480
481 481 try:
482 482 b = getstring(x, '')
483 483 except error.ParseError:
484 484 # not a string, but another revspec, e.g. tip()
485 485 pass
486 486 else:
487 487 kind, pattern, matcher = util.stringmatcher(b)
488 488 if kind == 'literal':
489 489 # note: falls through to the revspec case if no branch with
490 490 # this name exists and pattern kind is not specified explicitly
491 491 if pattern in repo.branchmap():
492 492 return subset.filter(lambda r: matcher(getbranch(r)),
493 493 condrepr=('<branch %r>', b))
494 494 if b.startswith('literal:'):
495 495 raise error.RepoLookupError(_("branch '%s' does not exist")
496 496 % pattern)
497 497 else:
498 498 return subset.filter(lambda r: matcher(getbranch(r)),
499 499 condrepr=('<branch %r>', b))
500 500
501 501 s = getset(repo, fullreposet(repo), x)
502 502 b = set()
503 503 for r in s:
504 504 b.add(getbranch(r))
505 505 c = s.__contains__
506 506 return subset.filter(lambda r: c(r) or getbranch(r) in b,
507 507 condrepr=lambda: '<branch %r>' % sorted(b))
508 508
509 509 @predicate('bumped()', safe=True)
510 510 def bumped(repo, subset, x):
511 511 msg = ("'bumped()' is deprecated, "
512 512 "use 'phasedivergent()'")
513 513 repo.ui.deprecwarn(msg, '4.4')
514 514
515 515 return phasedivergent(repo, subset, x)
516 516
517 517 @predicate('phasedivergent()', safe=True)
518 518 def phasedivergent(repo, subset, x):
519 519 """Mutable changesets marked as successors of public changesets.
520 520
521 521 Only non-public and non-obsolete changesets can be `phasedivergent`.
522 522 (EXPERIMENTAL)
523 523 """
524 524 # i18n: "phasedivergent" is a keyword
525 525 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
526 526 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
527 527 return subset & phasedivergent
528 528
529 529 @predicate('bundle()', safe=True)
530 530 def bundle(repo, subset, x):
531 531 """Changesets in the bundle.
532 532
533 533 Bundle must be specified by the -R option."""
534 534
535 535 try:
536 536 bundlerevs = repo.changelog.bundlerevs
537 537 except AttributeError:
538 538 raise error.Abort(_("no bundle provided - specify with -R"))
539 539 return subset & bundlerevs
540 540
541 541 def checkstatus(repo, subset, pat, field):
542 542 hasset = matchmod.patkind(pat) == 'set'
543 543
544 544 mcache = [None]
545 545 def matches(x):
546 546 c = repo[x]
547 547 if not mcache[0] or hasset:
548 548 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
549 549 m = mcache[0]
550 550 fname = None
551 551 if not m.anypats() and len(m.files()) == 1:
552 552 fname = m.files()[0]
553 553 if fname is not None:
554 554 if fname not in c.files():
555 555 return False
556 556 else:
557 557 for f in c.files():
558 558 if m(f):
559 559 break
560 560 else:
561 561 return False
562 562 files = repo.status(c.p1().node(), c.node())[field]
563 563 if fname is not None:
564 564 if fname in files:
565 565 return True
566 566 else:
567 567 for f in files:
568 568 if m(f):
569 569 return True
570 570
571 571 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
572 572
573 573 def _children(repo, subset, parentset):
574 574 if not parentset:
575 575 return baseset()
576 576 cs = set()
577 577 pr = repo.changelog.parentrevs
578 578 minrev = parentset.min()
579 579 nullrev = node.nullrev
580 580 for r in subset:
581 581 if r <= minrev:
582 582 continue
583 583 p1, p2 = pr(r)
584 584 if p1 in parentset:
585 585 cs.add(r)
586 586 if p2 != nullrev and p2 in parentset:
587 587 cs.add(r)
588 588 return baseset(cs)
589 589
590 590 @predicate('children(set)', safe=True)
591 591 def children(repo, subset, x):
592 592 """Child changesets of changesets in set.
593 593 """
594 594 s = getset(repo, fullreposet(repo), x)
595 595 cs = _children(repo, subset, s)
596 596 return subset & cs
597 597
598 598 @predicate('closed()', safe=True, weight=10)
599 599 def closed(repo, subset, x):
600 600 """Changeset is closed.
601 601 """
602 602 # i18n: "closed" is a keyword
603 603 getargs(x, 0, 0, _("closed takes no arguments"))
604 604 return subset.filter(lambda r: repo[r].closesbranch(),
605 605 condrepr='<branch closed>')
606 606
607 607 @predicate('contains(pattern)', weight=100)
608 608 def contains(repo, subset, x):
609 609 """The revision's manifest contains a file matching pattern (but might not
610 610 modify it). See :hg:`help patterns` for information about file patterns.
611 611
612 612 The pattern without explicit kind like ``glob:`` is expected to be
613 613 relative to the current directory and match against a file exactly
614 614 for efficiency.
615 615 """
616 616 # i18n: "contains" is a keyword
617 617 pat = getstring(x, _("contains requires a pattern"))
618 618
619 619 def matches(x):
620 620 if not matchmod.patkind(pat):
621 621 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
622 622 if pats in repo[x]:
623 623 return True
624 624 else:
625 625 c = repo[x]
626 626 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
627 627 for f in c.manifest():
628 628 if m(f):
629 629 return True
630 630 return False
631 631
632 632 return subset.filter(matches, condrepr=('<contains %r>', pat))
633 633
634 634 @predicate('converted([id])', safe=True)
635 635 def converted(repo, subset, x):
636 636 """Changesets converted from the given identifier in the old repository if
637 637 present, or all converted changesets if no identifier is specified.
638 638 """
639 639
640 640 # There is exactly no chance of resolving the revision, so do a simple
641 641 # string compare and hope for the best
642 642
643 643 rev = None
644 644 # i18n: "converted" is a keyword
645 645 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
646 646 if l:
647 647 # i18n: "converted" is a keyword
648 648 rev = getstring(l[0], _('converted requires a revision'))
649 649
650 650 def _matchvalue(r):
651 651 source = repo[r].extra().get('convert_revision', None)
652 652 return source is not None and (rev is None or source.startswith(rev))
653 653
654 654 return subset.filter(lambda r: _matchvalue(r),
655 655 condrepr=('<converted %r>', rev))
656 656
657 657 @predicate('date(interval)', safe=True, weight=10)
658 658 def date(repo, subset, x):
659 659 """Changesets within the interval, see :hg:`help dates`.
660 660 """
661 661 # i18n: "date" is a keyword
662 662 ds = getstring(x, _("date requires a string"))
663 663 dm = util.matchdate(ds)
664 664 return subset.filter(lambda x: dm(repo[x].date()[0]),
665 665 condrepr=('<date %r>', ds))
666 666
667 667 @predicate('desc(string)', safe=True, weight=10)
668 668 def desc(repo, subset, x):
669 669 """Search commit message for string. The match is case-insensitive.
670 670
671 671 Pattern matching is supported for `string`. See
672 672 :hg:`help revisions.patterns`.
673 673 """
674 674 # i18n: "desc" is a keyword
675 675 ds = getstring(x, _("desc requires a string"))
676 676
677 677 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
678 678
679 679 return subset.filter(lambda r: matcher(repo[r].description()),
680 680 condrepr=('<desc %r>', ds))
681 681
682 682 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
683 683 stopdepth=None):
684 684 roots = getset(repo, fullreposet(repo), x)
685 685 if not roots:
686 686 return baseset()
687 687 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
688 688 return subset & s
689 689
690 690 @predicate('descendants(set[, depth])', safe=True)
691 691 def descendants(repo, subset, x):
692 692 """Changesets which are descendants of changesets in set, including the
693 693 given changesets themselves.
694 694
695 695 If depth is specified, the result only includes changesets up to
696 696 the specified generation.
697 697 """
698 698 # startdepth is for internal use only until we can decide the UI
699 699 args = getargsdict(x, 'descendants', 'set depth startdepth')
700 700 if 'set' not in args:
701 701 # i18n: "descendants" is a keyword
702 702 raise error.ParseError(_('descendants takes at least 1 argument'))
703 703 startdepth = stopdepth = None
704 704 if 'startdepth' in args:
705 705 n = getinteger(args['startdepth'],
706 706 "descendants expects an integer startdepth")
707 707 if n < 0:
708 708 raise error.ParseError("negative startdepth")
709 709 startdepth = n
710 710 if 'depth' in args:
711 711 # i18n: "descendants" is a keyword
712 712 n = getinteger(args['depth'], _("descendants expects an integer depth"))
713 713 if n < 0:
714 714 raise error.ParseError(_("negative depth"))
715 715 stopdepth = n + 1
716 716 return _descendants(repo, subset, args['set'],
717 717 startdepth=startdepth, stopdepth=stopdepth)
718 718
719 719 @predicate('_firstdescendants', safe=True)
720 720 def _firstdescendants(repo, subset, x):
721 721 # ``_firstdescendants(set)``
722 722 # Like ``descendants(set)`` but follows only the first parents.
723 723 return _descendants(repo, subset, x, followfirst=True)
724 724
725 725 @predicate('destination([set])', safe=True, weight=10)
726 726 def destination(repo, subset, x):
727 727 """Changesets that were created by a graft, transplant or rebase operation,
728 728 with the given revisions specified as the source. Omitting the optional set
729 729 is the same as passing all().
730 730 """
731 731 if x is not None:
732 732 sources = getset(repo, fullreposet(repo), x)
733 733 else:
734 734 sources = fullreposet(repo)
735 735
736 736 dests = set()
737 737
738 738 # subset contains all of the possible destinations that can be returned, so
739 739 # iterate over them and see if their source(s) were provided in the arg set.
740 740 # Even if the immediate src of r is not in the arg set, src's source (or
741 741 # further back) may be. Scanning back further than the immediate src allows
742 742 # transitive transplants and rebases to yield the same results as transitive
743 743 # grafts.
744 744 for r in subset:
745 745 src = _getrevsource(repo, r)
746 746 lineage = None
747 747
748 748 while src is not None:
749 749 if lineage is None:
750 750 lineage = list()
751 751
752 752 lineage.append(r)
753 753
754 754 # The visited lineage is a match if the current source is in the arg
755 755 # set. Since every candidate dest is visited by way of iterating
756 756 # subset, any dests further back in the lineage will be tested by a
757 757 # different iteration over subset. Likewise, if the src was already
758 758 # selected, the current lineage can be selected without going back
759 759 # further.
760 760 if src in sources or src in dests:
761 761 dests.update(lineage)
762 762 break
763 763
764 764 r = src
765 765 src = _getrevsource(repo, r)
766 766
767 767 return subset.filter(dests.__contains__,
768 768 condrepr=lambda: '<destination %r>' % sorted(dests))
769 769
770 770 @predicate('divergent()', safe=True)
771 771 def divergent(repo, subset, x):
772 772 msg = ("'divergent()' is deprecated, "
773 773 "use 'contentdivergent()'")
774 774 repo.ui.deprecwarn(msg, '4.4')
775 775
776 776 return contentdivergent(repo, subset, x)
777 777
778 778 @predicate('contentdivergent()', safe=True)
779 779 def contentdivergent(repo, subset, x):
780 780 """
781 781 Final successors of changesets with an alternative set of final
782 782 successors. (EXPERIMENTAL)
783 783 """
784 784 # i18n: "contentdivergent" is a keyword
785 785 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
786 786 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
787 787 return subset & contentdivergent
788 788
789 789 @predicate('extdata(source)', safe=False, weight=100)
790 790 def extdata(repo, subset, x):
791 791 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
792 792 # i18n: "extdata" is a keyword
793 793 args = getargsdict(x, 'extdata', 'source')
794 794 source = getstring(args.get('source'),
795 795 # i18n: "extdata" is a keyword
796 796 _('extdata takes at least 1 string argument'))
797 797 data = scmutil.extdatasource(repo, source)
798 798 return subset & baseset(data)
799 799
800 800 @predicate('extinct()', safe=True)
801 801 def extinct(repo, subset, x):
802 802 """Obsolete changesets with obsolete descendants only.
803 803 """
804 804 # i18n: "extinct" is a keyword
805 805 getargs(x, 0, 0, _("extinct takes no arguments"))
806 806 extincts = obsmod.getrevs(repo, 'extinct')
807 807 return subset & extincts
808 808
809 809 @predicate('extra(label, [value])', safe=True)
810 810 def extra(repo, subset, x):
811 811 """Changesets with the given label in the extra metadata, with the given
812 812 optional value.
813 813
814 814 Pattern matching is supported for `value`. See
815 815 :hg:`help revisions.patterns`.
816 816 """
817 817 args = getargsdict(x, 'extra', 'label value')
818 818 if 'label' not in args:
819 819 # i18n: "extra" is a keyword
820 820 raise error.ParseError(_('extra takes at least 1 argument'))
821 821 # i18n: "extra" is a keyword
822 822 label = getstring(args['label'], _('first argument to extra must be '
823 823 'a string'))
824 824 value = None
825 825
826 826 if 'value' in args:
827 827 # i18n: "extra" is a keyword
828 828 value = getstring(args['value'], _('second argument to extra must be '
829 829 'a string'))
830 830 kind, value, matcher = util.stringmatcher(value)
831 831
832 832 def _matchvalue(r):
833 833 extra = repo[r].extra()
834 834 return label in extra and (value is None or matcher(extra[label]))
835 835
836 836 return subset.filter(lambda r: _matchvalue(r),
837 837 condrepr=('<extra[%r] %r>', label, value))
838 838
839 839 @predicate('filelog(pattern)', safe=True)
840 840 def filelog(repo, subset, x):
841 841 """Changesets connected to the specified filelog.
842 842
843 843 For performance reasons, visits only revisions mentioned in the file-level
844 844 filelog, rather than filtering through all changesets (much faster, but
845 845 doesn't include deletes or duplicate changes). For a slower, more accurate
846 846 result, use ``file()``.
847 847
848 848 The pattern without explicit kind like ``glob:`` is expected to be
849 849 relative to the current directory and match against a file exactly
850 850 for efficiency.
851 851
852 852 If some linkrev points to revisions filtered by the current repoview, we'll
853 853 work around it to return a non-filtered value.
854 854 """
855 855
856 856 # i18n: "filelog" is a keyword
857 857 pat = getstring(x, _("filelog requires a pattern"))
858 858 s = set()
859 859 cl = repo.changelog
860 860
861 861 if not matchmod.patkind(pat):
862 862 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
863 863 files = [f]
864 864 else:
865 865 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
866 866 files = (f for f in repo[None] if m(f))
867 867
868 868 for f in files:
869 869 fl = repo.file(f)
870 870 known = {}
871 871 scanpos = 0
872 872 for fr in list(fl):
873 873 fn = fl.node(fr)
874 874 if fn in known:
875 875 s.add(known[fn])
876 876 continue
877 877
878 878 lr = fl.linkrev(fr)
879 879 if lr in cl:
880 880 s.add(lr)
881 881 elif scanpos is not None:
882 882 # lowest matching changeset is filtered, scan further
883 883 # ahead in changelog
884 884 start = max(lr, scanpos) + 1
885 885 scanpos = None
886 886 for r in cl.revs(start):
887 887 # minimize parsing of non-matching entries
888 888 if f in cl.revision(r) and f in cl.readfiles(r):
889 889 try:
890 890 # try to use manifest delta fastpath
891 891 n = repo[r].filenode(f)
892 892 if n not in known:
893 893 if n == fn:
894 894 s.add(r)
895 895 scanpos = r
896 896 break
897 897 else:
898 898 known[n] = r
899 899 except error.ManifestLookupError:
900 900 # deletion in changelog
901 901 continue
902 902
903 903 return subset & s
904 904
905 905 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
906 906 def first(repo, subset, x, order):
907 907 """An alias for limit().
908 908 """
909 909 return limit(repo, subset, x, order)
910 910
911 911 def _follow(repo, subset, x, name, followfirst=False):
912 912 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
913 913 "and an optional revset") % name)
914 914 c = repo['.']
915 915 if l:
916 916 x = getstring(l[0], _("%s expected a pattern") % name)
917 917 rev = None
918 918 if len(l) >= 2:
919 919 revs = getset(repo, fullreposet(repo), l[1])
920 920 if len(revs) != 1:
921 921 raise error.RepoLookupError(
922 922 _("%s expected one starting revision") % name)
923 923 rev = revs.last()
924 924 c = repo[rev]
925 925 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
926 926 ctx=repo[rev], default='path')
927 927
928 928 files = c.manifest().walk(matcher)
929 929
930 s = set()
931 for fname in files:
932 fctx = c[fname].introfilectx()
933 a = dagop.filectxancestors(fctx, followfirst)
934 s = s.union(set(c.rev() for c in a))
930 fctxs = [c[f].introfilectx() for f in files]
931 a = dagop.filectxancestors(fctxs, followfirst)
932 s = set(c.rev() for c in a)
935 933 else:
936 934 s = dagop.revancestors(repo, baseset([c.rev()]), followfirst)
937 935
938 936 return subset & s
939 937
940 938 @predicate('follow([pattern[, startrev]])', safe=True)
941 939 def follow(repo, subset, x):
942 940 """
943 941 An alias for ``::.`` (ancestors of the working directory's first parent).
944 942 If pattern is specified, the histories of files matching given
945 943 pattern in the revision given by startrev are followed, including copies.
946 944 """
947 945 return _follow(repo, subset, x, 'follow')
948 946
949 947 @predicate('_followfirst', safe=True)
950 948 def _followfirst(repo, subset, x):
951 949 # ``followfirst([pattern[, startrev]])``
952 950 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
953 951 # of every revisions or files revisions.
954 952 return _follow(repo, subset, x, '_followfirst', followfirst=True)
955 953
956 954 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
957 955 safe=True)
958 956 def followlines(repo, subset, x):
959 957 """Changesets modifying `file` in line range ('fromline', 'toline').
960 958
961 959 Line range corresponds to 'file' content at 'startrev' and should hence be
962 960 consistent with file size. If startrev is not specified, working directory's
963 961 parent is used.
964 962
965 963 By default, ancestors of 'startrev' are returned. If 'descend' is True,
966 964 descendants of 'startrev' are returned though renames are (currently) not
967 965 followed in this direction.
968 966 """
969 967 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
970 968 if len(args['lines']) != 1:
971 969 raise error.ParseError(_("followlines requires a line range"))
972 970
973 971 rev = '.'
974 972 if 'startrev' in args:
975 973 revs = getset(repo, fullreposet(repo), args['startrev'])
976 974 if len(revs) != 1:
977 975 raise error.ParseError(
978 976 # i18n: "followlines" is a keyword
979 977 _("followlines expects exactly one revision"))
980 978 rev = revs.last()
981 979
982 980 pat = getstring(args['file'], _("followlines requires a pattern"))
983 981 # i18n: "followlines" is a keyword
984 982 msg = _("followlines expects exactly one file")
985 983 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
986 984 # i18n: "followlines" is a keyword
987 985 lr = getrange(args['lines'][0], _("followlines expects a line range"))
988 986 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
989 987 for a in lr]
990 988 fromline, toline = util.processlinerange(fromline, toline)
991 989
992 990 fctx = repo[rev].filectx(fname)
993 991 descend = False
994 992 if 'descend' in args:
995 993 descend = getboolean(args['descend'],
996 994 # i18n: "descend" is a keyword
997 995 _("descend argument must be a boolean"))
998 996 if descend:
999 997 rs = generatorset(
1000 998 (c.rev() for c, _linerange
1001 999 in dagop.blockdescendants(fctx, fromline, toline)),
1002 1000 iterasc=True)
1003 1001 else:
1004 1002 rs = generatorset(
1005 1003 (c.rev() for c, _linerange
1006 1004 in dagop.blockancestors(fctx, fromline, toline)),
1007 1005 iterasc=False)
1008 1006 return subset & rs
1009 1007
1010 1008 @predicate('all()', safe=True)
1011 1009 def getall(repo, subset, x):
1012 1010 """All changesets, the same as ``0:tip``.
1013 1011 """
1014 1012 # i18n: "all" is a keyword
1015 1013 getargs(x, 0, 0, _("all takes no arguments"))
1016 1014 return subset & spanset(repo) # drop "null" if any
1017 1015
1018 1016 @predicate('grep(regex)', weight=10)
1019 1017 def grep(repo, subset, x):
1020 1018 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1021 1019 to ensure special escape characters are handled correctly. Unlike
1022 1020 ``keyword(string)``, the match is case-sensitive.
1023 1021 """
1024 1022 try:
1025 1023 # i18n: "grep" is a keyword
1026 1024 gr = re.compile(getstring(x, _("grep requires a string")))
1027 1025 except re.error as e:
1028 1026 raise error.ParseError(_('invalid match pattern: %s') % e)
1029 1027
1030 1028 def matches(x):
1031 1029 c = repo[x]
1032 1030 for e in c.files() + [c.user(), c.description()]:
1033 1031 if gr.search(e):
1034 1032 return True
1035 1033 return False
1036 1034
1037 1035 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1038 1036
1039 1037 @predicate('_matchfiles', safe=True)
1040 1038 def _matchfiles(repo, subset, x):
1041 1039 # _matchfiles takes a revset list of prefixed arguments:
1042 1040 #
1043 1041 # [p:foo, i:bar, x:baz]
1044 1042 #
1045 1043 # builds a match object from them and filters subset. Allowed
1046 1044 # prefixes are 'p:' for regular patterns, 'i:' for include
1047 1045 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1048 1046 # a revision identifier, or the empty string to reference the
1049 1047 # working directory, from which the match object is
1050 1048 # initialized. Use 'd:' to set the default matching mode, default
1051 1049 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1052 1050
1053 1051 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1054 1052 pats, inc, exc = [], [], []
1055 1053 rev, default = None, None
1056 1054 for arg in l:
1057 1055 s = getstring(arg, "_matchfiles requires string arguments")
1058 1056 prefix, value = s[:2], s[2:]
1059 1057 if prefix == 'p:':
1060 1058 pats.append(value)
1061 1059 elif prefix == 'i:':
1062 1060 inc.append(value)
1063 1061 elif prefix == 'x:':
1064 1062 exc.append(value)
1065 1063 elif prefix == 'r:':
1066 1064 if rev is not None:
1067 1065 raise error.ParseError('_matchfiles expected at most one '
1068 1066 'revision')
1069 1067 if value != '': # empty means working directory; leave rev as None
1070 1068 rev = value
1071 1069 elif prefix == 'd:':
1072 1070 if default is not None:
1073 1071 raise error.ParseError('_matchfiles expected at most one '
1074 1072 'default mode')
1075 1073 default = value
1076 1074 else:
1077 1075 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1078 1076 if not default:
1079 1077 default = 'glob'
1080 1078
1081 1079 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1082 1080 exclude=exc, ctx=repo[rev], default=default)
1083 1081
1084 1082 # This directly read the changelog data as creating changectx for all
1085 1083 # revisions is quite expensive.
1086 1084 getfiles = repo.changelog.readfiles
1087 1085 wdirrev = node.wdirrev
1088 1086 def matches(x):
1089 1087 if x == wdirrev:
1090 1088 files = repo[x].files()
1091 1089 else:
1092 1090 files = getfiles(x)
1093 1091 for f in files:
1094 1092 if m(f):
1095 1093 return True
1096 1094 return False
1097 1095
1098 1096 return subset.filter(matches,
1099 1097 condrepr=('<matchfiles patterns=%r, include=%r '
1100 1098 'exclude=%r, default=%r, rev=%r>',
1101 1099 pats, inc, exc, default, rev))
1102 1100
1103 1101 @predicate('file(pattern)', safe=True, weight=10)
1104 1102 def hasfile(repo, subset, x):
1105 1103 """Changesets affecting files matched by pattern.
1106 1104
1107 1105 For a faster but less accurate result, consider using ``filelog()``
1108 1106 instead.
1109 1107
1110 1108 This predicate uses ``glob:`` as the default kind of pattern.
1111 1109 """
1112 1110 # i18n: "file" is a keyword
1113 1111 pat = getstring(x, _("file requires a pattern"))
1114 1112 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1115 1113
1116 1114 @predicate('head()', safe=True)
1117 1115 def head(repo, subset, x):
1118 1116 """Changeset is a named branch head.
1119 1117 """
1120 1118 # i18n: "head" is a keyword
1121 1119 getargs(x, 0, 0, _("head takes no arguments"))
1122 1120 hs = set()
1123 1121 cl = repo.changelog
1124 1122 for ls in repo.branchmap().itervalues():
1125 1123 hs.update(cl.rev(h) for h in ls)
1126 1124 return subset & baseset(hs)
1127 1125
1128 1126 @predicate('heads(set)', safe=True)
1129 1127 def heads(repo, subset, x):
1130 1128 """Members of set with no children in set.
1131 1129 """
1132 1130 s = getset(repo, subset, x)
1133 1131 ps = parents(repo, subset, x)
1134 1132 return s - ps
1135 1133
1136 1134 @predicate('hidden()', safe=True)
1137 1135 def hidden(repo, subset, x):
1138 1136 """Hidden changesets.
1139 1137 """
1140 1138 # i18n: "hidden" is a keyword
1141 1139 getargs(x, 0, 0, _("hidden takes no arguments"))
1142 1140 hiddenrevs = repoview.filterrevs(repo, 'visible')
1143 1141 return subset & hiddenrevs
1144 1142
1145 1143 @predicate('keyword(string)', safe=True, weight=10)
1146 1144 def keyword(repo, subset, x):
1147 1145 """Search commit message, user name, and names of changed files for
1148 1146 string. The match is case-insensitive.
1149 1147
1150 1148 For a regular expression or case sensitive search of these fields, use
1151 1149 ``grep(regex)``.
1152 1150 """
1153 1151 # i18n: "keyword" is a keyword
1154 1152 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1155 1153
1156 1154 def matches(r):
1157 1155 c = repo[r]
1158 1156 return any(kw in encoding.lower(t)
1159 1157 for t in c.files() + [c.user(), c.description()])
1160 1158
1161 1159 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1162 1160
1163 1161 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1164 1162 def limit(repo, subset, x, order):
1165 1163 """First n members of set, defaulting to 1, starting from offset.
1166 1164 """
1167 1165 args = getargsdict(x, 'limit', 'set n offset')
1168 1166 if 'set' not in args:
1169 1167 # i18n: "limit" is a keyword
1170 1168 raise error.ParseError(_("limit requires one to three arguments"))
1171 1169 # i18n: "limit" is a keyword
1172 1170 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1173 1171 if lim < 0:
1174 1172 raise error.ParseError(_("negative number to select"))
1175 1173 # i18n: "limit" is a keyword
1176 1174 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1177 1175 if ofs < 0:
1178 1176 raise error.ParseError(_("negative offset"))
1179 1177 os = getset(repo, fullreposet(repo), args['set'])
1180 1178 ls = os.slice(ofs, ofs + lim)
1181 1179 if order == followorder and lim > 1:
1182 1180 return subset & ls
1183 1181 return ls & subset
1184 1182
1185 1183 @predicate('last(set, [n])', safe=True, takeorder=True)
1186 1184 def last(repo, subset, x, order):
1187 1185 """Last n members of set, defaulting to 1.
1188 1186 """
1189 1187 # i18n: "last" is a keyword
1190 1188 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1191 1189 lim = 1
1192 1190 if len(l) == 2:
1193 1191 # i18n: "last" is a keyword
1194 1192 lim = getinteger(l[1], _("last expects a number"))
1195 1193 if lim < 0:
1196 1194 raise error.ParseError(_("negative number to select"))
1197 1195 os = getset(repo, fullreposet(repo), l[0])
1198 1196 os.reverse()
1199 1197 ls = os.slice(0, lim)
1200 1198 if order == followorder and lim > 1:
1201 1199 return subset & ls
1202 1200 ls.reverse()
1203 1201 return ls & subset
1204 1202
1205 1203 @predicate('max(set)', safe=True)
1206 1204 def maxrev(repo, subset, x):
1207 1205 """Changeset with highest revision number in set.
1208 1206 """
1209 1207 os = getset(repo, fullreposet(repo), x)
1210 1208 try:
1211 1209 m = os.max()
1212 1210 if m in subset:
1213 1211 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1214 1212 except ValueError:
1215 1213 # os.max() throws a ValueError when the collection is empty.
1216 1214 # Same as python's max().
1217 1215 pass
1218 1216 return baseset(datarepr=('<max %r, %r>', subset, os))
1219 1217
1220 1218 @predicate('merge()', safe=True)
1221 1219 def merge(repo, subset, x):
1222 1220 """Changeset is a merge changeset.
1223 1221 """
1224 1222 # i18n: "merge" is a keyword
1225 1223 getargs(x, 0, 0, _("merge takes no arguments"))
1226 1224 cl = repo.changelog
1227 1225 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1228 1226 condrepr='<merge>')
1229 1227
1230 1228 @predicate('branchpoint()', safe=True)
1231 1229 def branchpoint(repo, subset, x):
1232 1230 """Changesets with more than one child.
1233 1231 """
1234 1232 # i18n: "branchpoint" is a keyword
1235 1233 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1236 1234 cl = repo.changelog
1237 1235 if not subset:
1238 1236 return baseset()
1239 1237 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1240 1238 # (and if it is not, it should.)
1241 1239 baserev = min(subset)
1242 1240 parentscount = [0]*(len(repo) - baserev)
1243 1241 for r in cl.revs(start=baserev + 1):
1244 1242 for p in cl.parentrevs(r):
1245 1243 if p >= baserev:
1246 1244 parentscount[p - baserev] += 1
1247 1245 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1248 1246 condrepr='<branchpoint>')
1249 1247
1250 1248 @predicate('min(set)', safe=True)
1251 1249 def minrev(repo, subset, x):
1252 1250 """Changeset with lowest revision number in set.
1253 1251 """
1254 1252 os = getset(repo, fullreposet(repo), x)
1255 1253 try:
1256 1254 m = os.min()
1257 1255 if m in subset:
1258 1256 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1259 1257 except ValueError:
1260 1258 # os.min() throws a ValueError when the collection is empty.
1261 1259 # Same as python's min().
1262 1260 pass
1263 1261 return baseset(datarepr=('<min %r, %r>', subset, os))
1264 1262
1265 1263 @predicate('modifies(pattern)', safe=True, weight=30)
1266 1264 def modifies(repo, subset, x):
1267 1265 """Changesets modifying files matched by pattern.
1268 1266
1269 1267 The pattern without explicit kind like ``glob:`` is expected to be
1270 1268 relative to the current directory and match against a file or a
1271 1269 directory.
1272 1270 """
1273 1271 # i18n: "modifies" is a keyword
1274 1272 pat = getstring(x, _("modifies requires a pattern"))
1275 1273 return checkstatus(repo, subset, pat, 0)
1276 1274
1277 1275 @predicate('named(namespace)')
1278 1276 def named(repo, subset, x):
1279 1277 """The changesets in a given namespace.
1280 1278
1281 1279 Pattern matching is supported for `namespace`. See
1282 1280 :hg:`help revisions.patterns`.
1283 1281 """
1284 1282 # i18n: "named" is a keyword
1285 1283 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1286 1284
1287 1285 ns = getstring(args[0],
1288 1286 # i18n: "named" is a keyword
1289 1287 _('the argument to named must be a string'))
1290 1288 kind, pattern, matcher = util.stringmatcher(ns)
1291 1289 namespaces = set()
1292 1290 if kind == 'literal':
1293 1291 if pattern not in repo.names:
1294 1292 raise error.RepoLookupError(_("namespace '%s' does not exist")
1295 1293 % ns)
1296 1294 namespaces.add(repo.names[pattern])
1297 1295 else:
1298 1296 for name, ns in repo.names.iteritems():
1299 1297 if matcher(name):
1300 1298 namespaces.add(ns)
1301 1299 if not namespaces:
1302 1300 raise error.RepoLookupError(_("no namespace exists"
1303 1301 " that match '%s'") % pattern)
1304 1302
1305 1303 names = set()
1306 1304 for ns in namespaces:
1307 1305 for name in ns.listnames(repo):
1308 1306 if name not in ns.deprecated:
1309 1307 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1310 1308
1311 1309 names -= {node.nullrev}
1312 1310 return subset & names
1313 1311
1314 1312 @predicate('id(string)', safe=True)
1315 1313 def node_(repo, subset, x):
1316 1314 """Revision non-ambiguously specified by the given hex string prefix.
1317 1315 """
1318 1316 # i18n: "id" is a keyword
1319 1317 l = getargs(x, 1, 1, _("id requires one argument"))
1320 1318 # i18n: "id" is a keyword
1321 1319 n = getstring(l[0], _("id requires a string"))
1322 1320 if len(n) == 40:
1323 1321 try:
1324 1322 rn = repo.changelog.rev(node.bin(n))
1325 1323 except error.WdirUnsupported:
1326 1324 rn = node.wdirrev
1327 1325 except (LookupError, TypeError):
1328 1326 rn = None
1329 1327 else:
1330 1328 rn = None
1331 1329 try:
1332 1330 pm = repo.changelog._partialmatch(n)
1333 1331 if pm is not None:
1334 1332 rn = repo.changelog.rev(pm)
1335 1333 except error.WdirUnsupported:
1336 1334 rn = node.wdirrev
1337 1335
1338 1336 if rn is None:
1339 1337 return baseset()
1340 1338 result = baseset([rn])
1341 1339 return result & subset
1342 1340
1343 1341 @predicate('obsolete()', safe=True)
1344 1342 def obsolete(repo, subset, x):
1345 1343 """Mutable changeset with a newer version."""
1346 1344 # i18n: "obsolete" is a keyword
1347 1345 getargs(x, 0, 0, _("obsolete takes no arguments"))
1348 1346 obsoletes = obsmod.getrevs(repo, 'obsolete')
1349 1347 return subset & obsoletes
1350 1348
1351 1349 @predicate('only(set, [set])', safe=True)
1352 1350 def only(repo, subset, x):
1353 1351 """Changesets that are ancestors of the first set that are not ancestors
1354 1352 of any other head in the repo. If a second set is specified, the result
1355 1353 is ancestors of the first set that are not ancestors of the second set
1356 1354 (i.e. ::<set1> - ::<set2>).
1357 1355 """
1358 1356 cl = repo.changelog
1359 1357 # i18n: "only" is a keyword
1360 1358 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1361 1359 include = getset(repo, fullreposet(repo), args[0])
1362 1360 if len(args) == 1:
1363 1361 if not include:
1364 1362 return baseset()
1365 1363
1366 1364 descendants = set(dagop.revdescendants(repo, include, False))
1367 1365 exclude = [rev for rev in cl.headrevs()
1368 1366 if not rev in descendants and not rev in include]
1369 1367 else:
1370 1368 exclude = getset(repo, fullreposet(repo), args[1])
1371 1369
1372 1370 results = set(cl.findmissingrevs(common=exclude, heads=include))
1373 1371 # XXX we should turn this into a baseset instead of a set, smartset may do
1374 1372 # some optimizations from the fact this is a baseset.
1375 1373 return subset & results
1376 1374
1377 1375 @predicate('origin([set])', safe=True)
1378 1376 def origin(repo, subset, x):
1379 1377 """
1380 1378 Changesets that were specified as a source for the grafts, transplants or
1381 1379 rebases that created the given revisions. Omitting the optional set is the
1382 1380 same as passing all(). If a changeset created by these operations is itself
1383 1381 specified as a source for one of these operations, only the source changeset
1384 1382 for the first operation is selected.
1385 1383 """
1386 1384 if x is not None:
1387 1385 dests = getset(repo, fullreposet(repo), x)
1388 1386 else:
1389 1387 dests = fullreposet(repo)
1390 1388
1391 1389 def _firstsrc(rev):
1392 1390 src = _getrevsource(repo, rev)
1393 1391 if src is None:
1394 1392 return None
1395 1393
1396 1394 while True:
1397 1395 prev = _getrevsource(repo, src)
1398 1396
1399 1397 if prev is None:
1400 1398 return src
1401 1399 src = prev
1402 1400
1403 1401 o = {_firstsrc(r) for r in dests}
1404 1402 o -= {None}
1405 1403 # XXX we should turn this into a baseset instead of a set, smartset may do
1406 1404 # some optimizations from the fact this is a baseset.
1407 1405 return subset & o
1408 1406
1409 1407 @predicate('outgoing([path])', safe=False, weight=10)
1410 1408 def outgoing(repo, subset, x):
1411 1409 """Changesets not found in the specified destination repository, or the
1412 1410 default push location.
1413 1411 """
1414 1412 # Avoid cycles.
1415 1413 from . import (
1416 1414 discovery,
1417 1415 hg,
1418 1416 )
1419 1417 # i18n: "outgoing" is a keyword
1420 1418 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1421 1419 # i18n: "outgoing" is a keyword
1422 1420 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1423 1421 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1424 1422 dest, branches = hg.parseurl(dest)
1425 1423 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1426 1424 if revs:
1427 1425 revs = [repo.lookup(rev) for rev in revs]
1428 1426 other = hg.peer(repo, {}, dest)
1429 1427 repo.ui.pushbuffer()
1430 1428 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1431 1429 repo.ui.popbuffer()
1432 1430 cl = repo.changelog
1433 1431 o = {cl.rev(r) for r in outgoing.missing}
1434 1432 return subset & o
1435 1433
1436 1434 @predicate('p1([set])', safe=True)
1437 1435 def p1(repo, subset, x):
1438 1436 """First parent of changesets in set, or the working directory.
1439 1437 """
1440 1438 if x is None:
1441 1439 p = repo[x].p1().rev()
1442 1440 if p >= 0:
1443 1441 return subset & baseset([p])
1444 1442 return baseset()
1445 1443
1446 1444 ps = set()
1447 1445 cl = repo.changelog
1448 1446 for r in getset(repo, fullreposet(repo), x):
1449 1447 try:
1450 1448 ps.add(cl.parentrevs(r)[0])
1451 1449 except error.WdirUnsupported:
1452 1450 ps.add(repo[r].parents()[0].rev())
1453 1451 ps -= {node.nullrev}
1454 1452 # XXX we should turn this into a baseset instead of a set, smartset may do
1455 1453 # some optimizations from the fact this is a baseset.
1456 1454 return subset & ps
1457 1455
1458 1456 @predicate('p2([set])', safe=True)
1459 1457 def p2(repo, subset, x):
1460 1458 """Second parent of changesets in set, or the working directory.
1461 1459 """
1462 1460 if x is None:
1463 1461 ps = repo[x].parents()
1464 1462 try:
1465 1463 p = ps[1].rev()
1466 1464 if p >= 0:
1467 1465 return subset & baseset([p])
1468 1466 return baseset()
1469 1467 except IndexError:
1470 1468 return baseset()
1471 1469
1472 1470 ps = set()
1473 1471 cl = repo.changelog
1474 1472 for r in getset(repo, fullreposet(repo), x):
1475 1473 try:
1476 1474 ps.add(cl.parentrevs(r)[1])
1477 1475 except error.WdirUnsupported:
1478 1476 parents = repo[r].parents()
1479 1477 if len(parents) == 2:
1480 1478 ps.add(parents[1])
1481 1479 ps -= {node.nullrev}
1482 1480 # XXX we should turn this into a baseset instead of a set, smartset may do
1483 1481 # some optimizations from the fact this is a baseset.
1484 1482 return subset & ps
1485 1483
1486 1484 def parentpost(repo, subset, x, order):
1487 1485 return p1(repo, subset, x)
1488 1486
1489 1487 @predicate('parents([set])', safe=True)
1490 1488 def parents(repo, subset, x):
1491 1489 """
1492 1490 The set of all parents for all changesets in set, or the working directory.
1493 1491 """
1494 1492 if x is None:
1495 1493 ps = set(p.rev() for p in repo[x].parents())
1496 1494 else:
1497 1495 ps = set()
1498 1496 cl = repo.changelog
1499 1497 up = ps.update
1500 1498 parentrevs = cl.parentrevs
1501 1499 for r in getset(repo, fullreposet(repo), x):
1502 1500 try:
1503 1501 up(parentrevs(r))
1504 1502 except error.WdirUnsupported:
1505 1503 up(p.rev() for p in repo[r].parents())
1506 1504 ps -= {node.nullrev}
1507 1505 return subset & ps
1508 1506
1509 1507 def _phase(repo, subset, *targets):
1510 1508 """helper to select all rev in <targets> phases"""
1511 1509 s = repo._phasecache.getrevset(repo, targets)
1512 1510 return subset & s
1513 1511
1514 1512 @predicate('draft()', safe=True)
1515 1513 def draft(repo, subset, x):
1516 1514 """Changeset in draft phase."""
1517 1515 # i18n: "draft" is a keyword
1518 1516 getargs(x, 0, 0, _("draft takes no arguments"))
1519 1517 target = phases.draft
1520 1518 return _phase(repo, subset, target)
1521 1519
1522 1520 @predicate('secret()', safe=True)
1523 1521 def secret(repo, subset, x):
1524 1522 """Changeset in secret phase."""
1525 1523 # i18n: "secret" is a keyword
1526 1524 getargs(x, 0, 0, _("secret takes no arguments"))
1527 1525 target = phases.secret
1528 1526 return _phase(repo, subset, target)
1529 1527
1530 1528 def parentspec(repo, subset, x, n, order):
1531 1529 """``set^0``
1532 1530 The set.
1533 1531 ``set^1`` (or ``set^``), ``set^2``
1534 1532 First or second parent, respectively, of all changesets in set.
1535 1533 """
1536 1534 try:
1537 1535 n = int(n[1])
1538 1536 if n not in (0, 1, 2):
1539 1537 raise ValueError
1540 1538 except (TypeError, ValueError):
1541 1539 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1542 1540 ps = set()
1543 1541 cl = repo.changelog
1544 1542 for r in getset(repo, fullreposet(repo), x):
1545 1543 if n == 0:
1546 1544 ps.add(r)
1547 1545 elif n == 1:
1548 1546 try:
1549 1547 ps.add(cl.parentrevs(r)[0])
1550 1548 except error.WdirUnsupported:
1551 1549 ps.add(repo[r].parents()[0].rev())
1552 1550 else:
1553 1551 try:
1554 1552 parents = cl.parentrevs(r)
1555 1553 if parents[1] != node.nullrev:
1556 1554 ps.add(parents[1])
1557 1555 except error.WdirUnsupported:
1558 1556 parents = repo[r].parents()
1559 1557 if len(parents) == 2:
1560 1558 ps.add(parents[1].rev())
1561 1559 return subset & ps
1562 1560
1563 1561 @predicate('present(set)', safe=True, takeorder=True)
1564 1562 def present(repo, subset, x, order):
1565 1563 """An empty set, if any revision in set isn't found; otherwise,
1566 1564 all revisions in set.
1567 1565
1568 1566 If any of specified revisions is not present in the local repository,
1569 1567 the query is normally aborted. But this predicate allows the query
1570 1568 to continue even in such cases.
1571 1569 """
1572 1570 try:
1573 1571 return getset(repo, subset, x, order)
1574 1572 except error.RepoLookupError:
1575 1573 return baseset()
1576 1574
1577 1575 # for internal use
1578 1576 @predicate('_notpublic', safe=True)
1579 1577 def _notpublic(repo, subset, x):
1580 1578 getargs(x, 0, 0, "_notpublic takes no arguments")
1581 1579 return _phase(repo, subset, phases.draft, phases.secret)
1582 1580
1583 1581 # for internal use
1584 1582 @predicate('_phaseandancestors(phasename, set)', safe=True)
1585 1583 def _phaseandancestors(repo, subset, x):
1586 1584 # equivalent to (phasename() & ancestors(set)) but more efficient
1587 1585 # phasename could be one of 'draft', 'secret', or '_notpublic'
1588 1586 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1589 1587 phasename = getsymbol(args[0])
1590 1588 s = getset(repo, fullreposet(repo), args[1])
1591 1589
1592 1590 draft = phases.draft
1593 1591 secret = phases.secret
1594 1592 phasenamemap = {
1595 1593 '_notpublic': draft,
1596 1594 'draft': draft, # follow secret's ancestors
1597 1595 'secret': secret,
1598 1596 }
1599 1597 if phasename not in phasenamemap:
1600 1598 raise error.ParseError('%r is not a valid phasename' % phasename)
1601 1599
1602 1600 minimalphase = phasenamemap[phasename]
1603 1601 getphase = repo._phasecache.phase
1604 1602
1605 1603 def cutfunc(rev):
1606 1604 return getphase(repo, rev) < minimalphase
1607 1605
1608 1606 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1609 1607
1610 1608 if phasename == 'draft': # need to remove secret changesets
1611 1609 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1612 1610 return subset & revs
1613 1611
1614 1612 @predicate('public()', safe=True)
1615 1613 def public(repo, subset, x):
1616 1614 """Changeset in public phase."""
1617 1615 # i18n: "public" is a keyword
1618 1616 getargs(x, 0, 0, _("public takes no arguments"))
1619 1617 phase = repo._phasecache.phase
1620 1618 target = phases.public
1621 1619 condition = lambda r: phase(repo, r) == target
1622 1620 return subset.filter(condition, condrepr=('<phase %r>', target),
1623 1621 cache=False)
1624 1622
1625 1623 @predicate('remote([id [,path]])', safe=False)
1626 1624 def remote(repo, subset, x):
1627 1625 """Local revision that corresponds to the given identifier in a
1628 1626 remote repository, if present. Here, the '.' identifier is a
1629 1627 synonym for the current local branch.
1630 1628 """
1631 1629
1632 1630 from . import hg # avoid start-up nasties
1633 1631 # i18n: "remote" is a keyword
1634 1632 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1635 1633
1636 1634 q = '.'
1637 1635 if len(l) > 0:
1638 1636 # i18n: "remote" is a keyword
1639 1637 q = getstring(l[0], _("remote requires a string id"))
1640 1638 if q == '.':
1641 1639 q = repo['.'].branch()
1642 1640
1643 1641 dest = ''
1644 1642 if len(l) > 1:
1645 1643 # i18n: "remote" is a keyword
1646 1644 dest = getstring(l[1], _("remote requires a repository path"))
1647 1645 dest = repo.ui.expandpath(dest or 'default')
1648 1646 dest, branches = hg.parseurl(dest)
1649 1647 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1650 1648 if revs:
1651 1649 revs = [repo.lookup(rev) for rev in revs]
1652 1650 other = hg.peer(repo, {}, dest)
1653 1651 n = other.lookup(q)
1654 1652 if n in repo:
1655 1653 r = repo[n].rev()
1656 1654 if r in subset:
1657 1655 return baseset([r])
1658 1656 return baseset()
1659 1657
1660 1658 @predicate('removes(pattern)', safe=True, weight=30)
1661 1659 def removes(repo, subset, x):
1662 1660 """Changesets which remove files matching pattern.
1663 1661
1664 1662 The pattern without explicit kind like ``glob:`` is expected to be
1665 1663 relative to the current directory and match against a file or a
1666 1664 directory.
1667 1665 """
1668 1666 # i18n: "removes" is a keyword
1669 1667 pat = getstring(x, _("removes requires a pattern"))
1670 1668 return checkstatus(repo, subset, pat, 2)
1671 1669
1672 1670 @predicate('rev(number)', safe=True)
1673 1671 def rev(repo, subset, x):
1674 1672 """Revision with the given numeric identifier.
1675 1673 """
1676 1674 # i18n: "rev" is a keyword
1677 1675 l = getargs(x, 1, 1, _("rev requires one argument"))
1678 1676 try:
1679 1677 # i18n: "rev" is a keyword
1680 1678 l = int(getstring(l[0], _("rev requires a number")))
1681 1679 except (TypeError, ValueError):
1682 1680 # i18n: "rev" is a keyword
1683 1681 raise error.ParseError(_("rev expects a number"))
1684 1682 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1685 1683 return baseset()
1686 1684 return subset & baseset([l])
1687 1685
1688 1686 @predicate('matching(revision [, field])', safe=True)
1689 1687 def matching(repo, subset, x):
1690 1688 """Changesets in which a given set of fields match the set of fields in the
1691 1689 selected revision or set.
1692 1690
1693 1691 To match more than one field pass the list of fields to match separated
1694 1692 by spaces (e.g. ``author description``).
1695 1693
1696 1694 Valid fields are most regular revision fields and some special fields.
1697 1695
1698 1696 Regular revision fields are ``description``, ``author``, ``branch``,
1699 1697 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1700 1698 and ``diff``.
1701 1699 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1702 1700 contents of the revision. Two revisions matching their ``diff`` will
1703 1701 also match their ``files``.
1704 1702
1705 1703 Special fields are ``summary`` and ``metadata``:
1706 1704 ``summary`` matches the first line of the description.
1707 1705 ``metadata`` is equivalent to matching ``description user date``
1708 1706 (i.e. it matches the main metadata fields).
1709 1707
1710 1708 ``metadata`` is the default field which is used when no fields are
1711 1709 specified. You can match more than one field at a time.
1712 1710 """
1713 1711 # i18n: "matching" is a keyword
1714 1712 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1715 1713
1716 1714 revs = getset(repo, fullreposet(repo), l[0])
1717 1715
1718 1716 fieldlist = ['metadata']
1719 1717 if len(l) > 1:
1720 1718 fieldlist = getstring(l[1],
1721 1719 # i18n: "matching" is a keyword
1722 1720 _("matching requires a string "
1723 1721 "as its second argument")).split()
1724 1722
1725 1723 # Make sure that there are no repeated fields,
1726 1724 # expand the 'special' 'metadata' field type
1727 1725 # and check the 'files' whenever we check the 'diff'
1728 1726 fields = []
1729 1727 for field in fieldlist:
1730 1728 if field == 'metadata':
1731 1729 fields += ['user', 'description', 'date']
1732 1730 elif field == 'diff':
1733 1731 # a revision matching the diff must also match the files
1734 1732 # since matching the diff is very costly, make sure to
1735 1733 # also match the files first
1736 1734 fields += ['files', 'diff']
1737 1735 else:
1738 1736 if field == 'author':
1739 1737 field = 'user'
1740 1738 fields.append(field)
1741 1739 fields = set(fields)
1742 1740 if 'summary' in fields and 'description' in fields:
1743 1741 # If a revision matches its description it also matches its summary
1744 1742 fields.discard('summary')
1745 1743
1746 1744 # We may want to match more than one field
1747 1745 # Not all fields take the same amount of time to be matched
1748 1746 # Sort the selected fields in order of increasing matching cost
1749 1747 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1750 1748 'files', 'description', 'substate', 'diff']
1751 1749 def fieldkeyfunc(f):
1752 1750 try:
1753 1751 return fieldorder.index(f)
1754 1752 except ValueError:
1755 1753 # assume an unknown field is very costly
1756 1754 return len(fieldorder)
1757 1755 fields = list(fields)
1758 1756 fields.sort(key=fieldkeyfunc)
1759 1757
1760 1758 # Each field will be matched with its own "getfield" function
1761 1759 # which will be added to the getfieldfuncs array of functions
1762 1760 getfieldfuncs = []
1763 1761 _funcs = {
1764 1762 'user': lambda r: repo[r].user(),
1765 1763 'branch': lambda r: repo[r].branch(),
1766 1764 'date': lambda r: repo[r].date(),
1767 1765 'description': lambda r: repo[r].description(),
1768 1766 'files': lambda r: repo[r].files(),
1769 1767 'parents': lambda r: repo[r].parents(),
1770 1768 'phase': lambda r: repo[r].phase(),
1771 1769 'substate': lambda r: repo[r].substate,
1772 1770 'summary': lambda r: repo[r].description().splitlines()[0],
1773 1771 'diff': lambda r: list(repo[r].diff(git=True),)
1774 1772 }
1775 1773 for info in fields:
1776 1774 getfield = _funcs.get(info, None)
1777 1775 if getfield is None:
1778 1776 raise error.ParseError(
1779 1777 # i18n: "matching" is a keyword
1780 1778 _("unexpected field name passed to matching: %s") % info)
1781 1779 getfieldfuncs.append(getfield)
1782 1780 # convert the getfield array of functions into a "getinfo" function
1783 1781 # which returns an array of field values (or a single value if there
1784 1782 # is only one field to match)
1785 1783 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1786 1784
1787 1785 def matches(x):
1788 1786 for rev in revs:
1789 1787 target = getinfo(rev)
1790 1788 match = True
1791 1789 for n, f in enumerate(getfieldfuncs):
1792 1790 if target[n] != f(x):
1793 1791 match = False
1794 1792 if match:
1795 1793 return True
1796 1794 return False
1797 1795
1798 1796 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1799 1797
1800 1798 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1801 1799 def reverse(repo, subset, x, order):
1802 1800 """Reverse order of set.
1803 1801 """
1804 1802 l = getset(repo, subset, x, order)
1805 1803 if order == defineorder:
1806 1804 l.reverse()
1807 1805 return l
1808 1806
1809 1807 @predicate('roots(set)', safe=True)
1810 1808 def roots(repo, subset, x):
1811 1809 """Changesets in set with no parent changeset in set.
1812 1810 """
1813 1811 s = getset(repo, fullreposet(repo), x)
1814 1812 parents = repo.changelog.parentrevs
1815 1813 def filter(r):
1816 1814 for p in parents(r):
1817 1815 if 0 <= p and p in s:
1818 1816 return False
1819 1817 return True
1820 1818 return subset & s.filter(filter, condrepr='<roots>')
1821 1819
1822 1820 _sortkeyfuncs = {
1823 1821 'rev': lambda c: c.rev(),
1824 1822 'branch': lambda c: c.branch(),
1825 1823 'desc': lambda c: c.description(),
1826 1824 'user': lambda c: c.user(),
1827 1825 'author': lambda c: c.user(),
1828 1826 'date': lambda c: c.date()[0],
1829 1827 }
1830 1828
1831 1829 def _getsortargs(x):
1832 1830 """Parse sort options into (set, [(key, reverse)], opts)"""
1833 1831 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1834 1832 if 'set' not in args:
1835 1833 # i18n: "sort" is a keyword
1836 1834 raise error.ParseError(_('sort requires one or two arguments'))
1837 1835 keys = "rev"
1838 1836 if 'keys' in args:
1839 1837 # i18n: "sort" is a keyword
1840 1838 keys = getstring(args['keys'], _("sort spec must be a string"))
1841 1839
1842 1840 keyflags = []
1843 1841 for k in keys.split():
1844 1842 fk = k
1845 1843 reverse = (k[0] == '-')
1846 1844 if reverse:
1847 1845 k = k[1:]
1848 1846 if k not in _sortkeyfuncs and k != 'topo':
1849 1847 raise error.ParseError(_("unknown sort key %r") % fk)
1850 1848 keyflags.append((k, reverse))
1851 1849
1852 1850 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1853 1851 # i18n: "topo" is a keyword
1854 1852 raise error.ParseError(_('topo sort order cannot be combined '
1855 1853 'with other sort keys'))
1856 1854
1857 1855 opts = {}
1858 1856 if 'topo.firstbranch' in args:
1859 1857 if any(k == 'topo' for k, reverse in keyflags):
1860 1858 opts['topo.firstbranch'] = args['topo.firstbranch']
1861 1859 else:
1862 1860 # i18n: "topo" and "topo.firstbranch" are keywords
1863 1861 raise error.ParseError(_('topo.firstbranch can only be used '
1864 1862 'when using the topo sort key'))
1865 1863
1866 1864 return args['set'], keyflags, opts
1867 1865
1868 1866 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
1869 1867 weight=10)
1870 1868 def sort(repo, subset, x, order):
1871 1869 """Sort set by keys. The default sort order is ascending, specify a key
1872 1870 as ``-key`` to sort in descending order.
1873 1871
1874 1872 The keys can be:
1875 1873
1876 1874 - ``rev`` for the revision number,
1877 1875 - ``branch`` for the branch name,
1878 1876 - ``desc`` for the commit message (description),
1879 1877 - ``user`` for user name (``author`` can be used as an alias),
1880 1878 - ``date`` for the commit date
1881 1879 - ``topo`` for a reverse topographical sort
1882 1880
1883 1881 The ``topo`` sort order cannot be combined with other sort keys. This sort
1884 1882 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1885 1883 specifies what topographical branches to prioritize in the sort.
1886 1884
1887 1885 """
1888 1886 s, keyflags, opts = _getsortargs(x)
1889 1887 revs = getset(repo, subset, s, order)
1890 1888
1891 1889 if not keyflags or order != defineorder:
1892 1890 return revs
1893 1891 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1894 1892 revs.sort(reverse=keyflags[0][1])
1895 1893 return revs
1896 1894 elif keyflags[0][0] == "topo":
1897 1895 firstbranch = ()
1898 1896 if 'topo.firstbranch' in opts:
1899 1897 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1900 1898 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1901 1899 firstbranch),
1902 1900 istopo=True)
1903 1901 if keyflags[0][1]:
1904 1902 revs.reverse()
1905 1903 return revs
1906 1904
1907 1905 # sort() is guaranteed to be stable
1908 1906 ctxs = [repo[r] for r in revs]
1909 1907 for k, reverse in reversed(keyflags):
1910 1908 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1911 1909 return baseset([c.rev() for c in ctxs])
1912 1910
1913 1911 @predicate('subrepo([pattern])')
1914 1912 def subrepo(repo, subset, x):
1915 1913 """Changesets that add, modify or remove the given subrepo. If no subrepo
1916 1914 pattern is named, any subrepo changes are returned.
1917 1915 """
1918 1916 # i18n: "subrepo" is a keyword
1919 1917 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1920 1918 pat = None
1921 1919 if len(args) != 0:
1922 1920 pat = getstring(args[0], _("subrepo requires a pattern"))
1923 1921
1924 1922 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1925 1923
1926 1924 def submatches(names):
1927 1925 k, p, m = util.stringmatcher(pat)
1928 1926 for name in names:
1929 1927 if m(name):
1930 1928 yield name
1931 1929
1932 1930 def matches(x):
1933 1931 c = repo[x]
1934 1932 s = repo.status(c.p1().node(), c.node(), match=m)
1935 1933
1936 1934 if pat is None:
1937 1935 return s.added or s.modified or s.removed
1938 1936
1939 1937 if s.added:
1940 1938 return any(submatches(c.substate.keys()))
1941 1939
1942 1940 if s.modified:
1943 1941 subs = set(c.p1().substate.keys())
1944 1942 subs.update(c.substate.keys())
1945 1943
1946 1944 for path in submatches(subs):
1947 1945 if c.p1().substate.get(path) != c.substate.get(path):
1948 1946 return True
1949 1947
1950 1948 if s.removed:
1951 1949 return any(submatches(c.p1().substate.keys()))
1952 1950
1953 1951 return False
1954 1952
1955 1953 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1956 1954
1957 1955 def _mapbynodefunc(repo, s, f):
1958 1956 """(repo, smartset, [node] -> [node]) -> smartset
1959 1957
1960 1958 Helper method to map a smartset to another smartset given a function only
1961 1959 talking about nodes. Handles converting between rev numbers and nodes, and
1962 1960 filtering.
1963 1961 """
1964 1962 cl = repo.unfiltered().changelog
1965 1963 torev = cl.rev
1966 1964 tonode = cl.node
1967 1965 nodemap = cl.nodemap
1968 1966 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
1969 1967 return smartset.baseset(result - repo.changelog.filteredrevs)
1970 1968
1971 1969 @predicate('successors(set)', safe=True)
1972 1970 def successors(repo, subset, x):
1973 1971 """All successors for set, including the given set themselves"""
1974 1972 s = getset(repo, fullreposet(repo), x)
1975 1973 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
1976 1974 d = _mapbynodefunc(repo, s, f)
1977 1975 return subset & d
1978 1976
1979 1977 def _substringmatcher(pattern, casesensitive=True):
1980 1978 kind, pattern, matcher = util.stringmatcher(pattern,
1981 1979 casesensitive=casesensitive)
1982 1980 if kind == 'literal':
1983 1981 if not casesensitive:
1984 1982 pattern = encoding.lower(pattern)
1985 1983 matcher = lambda s: pattern in encoding.lower(s)
1986 1984 else:
1987 1985 matcher = lambda s: pattern in s
1988 1986 return kind, pattern, matcher
1989 1987
1990 1988 @predicate('tag([name])', safe=True)
1991 1989 def tag(repo, subset, x):
1992 1990 """The specified tag by name, or all tagged revisions if no name is given.
1993 1991
1994 1992 Pattern matching is supported for `name`. See
1995 1993 :hg:`help revisions.patterns`.
1996 1994 """
1997 1995 # i18n: "tag" is a keyword
1998 1996 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1999 1997 cl = repo.changelog
2000 1998 if args:
2001 1999 pattern = getstring(args[0],
2002 2000 # i18n: "tag" is a keyword
2003 2001 _('the argument to tag must be a string'))
2004 2002 kind, pattern, matcher = util.stringmatcher(pattern)
2005 2003 if kind == 'literal':
2006 2004 # avoid resolving all tags
2007 2005 tn = repo._tagscache.tags.get(pattern, None)
2008 2006 if tn is None:
2009 2007 raise error.RepoLookupError(_("tag '%s' does not exist")
2010 2008 % pattern)
2011 2009 s = {repo[tn].rev()}
2012 2010 else:
2013 2011 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2014 2012 else:
2015 2013 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2016 2014 return subset & s
2017 2015
2018 2016 @predicate('tagged', safe=True)
2019 2017 def tagged(repo, subset, x):
2020 2018 return tag(repo, subset, x)
2021 2019
2022 2020 @predicate('unstable()', safe=True)
2023 2021 def unstable(repo, subset, x):
2024 2022 msg = ("'unstable()' is deprecated, "
2025 2023 "use 'orphan()'")
2026 2024 repo.ui.deprecwarn(msg, '4.4')
2027 2025
2028 2026 return orphan(repo, subset, x)
2029 2027
2030 2028 @predicate('orphan()', safe=True)
2031 2029 def orphan(repo, subset, x):
2032 2030 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2033 2031 """
2034 2032 # i18n: "orphan" is a keyword
2035 2033 getargs(x, 0, 0, _("orphan takes no arguments"))
2036 2034 orphan = obsmod.getrevs(repo, 'orphan')
2037 2035 return subset & orphan
2038 2036
2039 2037
2040 2038 @predicate('user(string)', safe=True, weight=10)
2041 2039 def user(repo, subset, x):
2042 2040 """User name contains string. The match is case-insensitive.
2043 2041
2044 2042 Pattern matching is supported for `string`. See
2045 2043 :hg:`help revisions.patterns`.
2046 2044 """
2047 2045 return author(repo, subset, x)
2048 2046
2049 2047 @predicate('wdir()', safe=True, weight=0)
2050 2048 def wdir(repo, subset, x):
2051 2049 """Working directory. (EXPERIMENTAL)"""
2052 2050 # i18n: "wdir" is a keyword
2053 2051 getargs(x, 0, 0, _("wdir takes no arguments"))
2054 2052 if node.wdirrev in subset or isinstance(subset, fullreposet):
2055 2053 return baseset([node.wdirrev])
2056 2054 return baseset()
2057 2055
2058 2056 def _orderedlist(repo, subset, x):
2059 2057 s = getstring(x, "internal error")
2060 2058 if not s:
2061 2059 return baseset()
2062 2060 # remove duplicates here. it's difficult for caller to deduplicate sets
2063 2061 # because different symbols can point to the same rev.
2064 2062 cl = repo.changelog
2065 2063 ls = []
2066 2064 seen = set()
2067 2065 for t in s.split('\0'):
2068 2066 try:
2069 2067 # fast path for integer revision
2070 2068 r = int(t)
2071 2069 if str(r) != t or r not in cl:
2072 2070 raise ValueError
2073 2071 revs = [r]
2074 2072 except ValueError:
2075 2073 revs = stringset(repo, subset, t, defineorder)
2076 2074
2077 2075 for r in revs:
2078 2076 if r in seen:
2079 2077 continue
2080 2078 if (r in subset
2081 2079 or r == node.nullrev and isinstance(subset, fullreposet)):
2082 2080 ls.append(r)
2083 2081 seen.add(r)
2084 2082 return baseset(ls)
2085 2083
2086 2084 # for internal use
2087 2085 @predicate('_list', safe=True, takeorder=True)
2088 2086 def _list(repo, subset, x, order):
2089 2087 if order == followorder:
2090 2088 # slow path to take the subset order
2091 2089 return subset & _orderedlist(repo, fullreposet(repo), x)
2092 2090 else:
2093 2091 return _orderedlist(repo, subset, x)
2094 2092
2095 2093 def _orderedintlist(repo, subset, x):
2096 2094 s = getstring(x, "internal error")
2097 2095 if not s:
2098 2096 return baseset()
2099 2097 ls = [int(r) for r in s.split('\0')]
2100 2098 s = subset
2101 2099 return baseset([r for r in ls if r in s])
2102 2100
2103 2101 # for internal use
2104 2102 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2105 2103 def _intlist(repo, subset, x, order):
2106 2104 if order == followorder:
2107 2105 # slow path to take the subset order
2108 2106 return subset & _orderedintlist(repo, fullreposet(repo), x)
2109 2107 else:
2110 2108 return _orderedintlist(repo, subset, x)
2111 2109
2112 2110 def _orderedhexlist(repo, subset, x):
2113 2111 s = getstring(x, "internal error")
2114 2112 if not s:
2115 2113 return baseset()
2116 2114 cl = repo.changelog
2117 2115 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2118 2116 s = subset
2119 2117 return baseset([r for r in ls if r in s])
2120 2118
2121 2119 # for internal use
2122 2120 @predicate('_hexlist', safe=True, takeorder=True)
2123 2121 def _hexlist(repo, subset, x, order):
2124 2122 if order == followorder:
2125 2123 # slow path to take the subset order
2126 2124 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2127 2125 else:
2128 2126 return _orderedhexlist(repo, subset, x)
2129 2127
2130 2128 methods = {
2131 2129 "range": rangeset,
2132 2130 "rangeall": rangeall,
2133 2131 "rangepre": rangepre,
2134 2132 "rangepost": rangepost,
2135 2133 "dagrange": dagrange,
2136 2134 "string": stringset,
2137 2135 "symbol": stringset,
2138 2136 "and": andset,
2139 2137 "andsmally": andsmallyset,
2140 2138 "or": orset,
2141 2139 "not": notset,
2142 2140 "difference": differenceset,
2143 2141 "relation": relationset,
2144 2142 "relsubscript": relsubscriptset,
2145 2143 "subscript": subscriptset,
2146 2144 "list": listset,
2147 2145 "keyvalue": keyvaluepair,
2148 2146 "func": func,
2149 2147 "ancestor": ancestorspec,
2150 2148 "parent": parentspec,
2151 2149 "parentpost": parentpost,
2152 2150 }
2153 2151
2154 2152 def posttreebuilthook(tree, repo):
2155 2153 # hook for extensions to execute code on the optimized tree
2156 2154 pass
2157 2155
2158 2156 def match(ui, spec, repo=None):
2159 2157 """Create a matcher for a single revision spec"""
2160 2158 return matchany(ui, [spec], repo=repo)
2161 2159
2162 2160 def matchany(ui, specs, repo=None, localalias=None):
2163 2161 """Create a matcher that will include any revisions matching one of the
2164 2162 given specs
2165 2163
2166 2164 If localalias is not None, it is a dict {name: definitionstring}. It takes
2167 2165 precedence over [revsetalias] config section.
2168 2166 """
2169 2167 if not specs:
2170 2168 def mfunc(repo, subset=None):
2171 2169 return baseset()
2172 2170 return mfunc
2173 2171 if not all(specs):
2174 2172 raise error.ParseError(_("empty query"))
2175 2173 lookup = None
2176 2174 if repo:
2177 2175 lookup = repo.__contains__
2178 2176 if len(specs) == 1:
2179 2177 tree = revsetlang.parse(specs[0], lookup)
2180 2178 else:
2181 2179 tree = ('or',
2182 2180 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2183 2181
2184 2182 aliases = []
2185 2183 warn = None
2186 2184 if ui:
2187 2185 aliases.extend(ui.configitems('revsetalias'))
2188 2186 warn = ui.warn
2189 2187 if localalias:
2190 2188 aliases.extend(localalias.items())
2191 2189 if aliases:
2192 2190 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2193 2191 tree = revsetlang.foldconcat(tree)
2194 2192 tree = revsetlang.analyze(tree)
2195 2193 tree = revsetlang.optimize(tree)
2196 2194 posttreebuilthook(tree, repo)
2197 2195 return makematcher(tree)
2198 2196
2199 2197 def makematcher(tree):
2200 2198 """Create a matcher from an evaluatable tree"""
2201 2199 def mfunc(repo, subset=None, order=None):
2202 2200 if order is None:
2203 2201 if subset is None:
2204 2202 order = defineorder # 'x'
2205 2203 else:
2206 2204 order = followorder # 'subset & x'
2207 2205 if subset is None:
2208 2206 subset = fullreposet(repo)
2209 2207 return getset(repo, subset, tree, order)
2210 2208 return mfunc
2211 2209
2212 2210 def loadpredicate(ui, extname, registrarobj):
2213 2211 """Load revset predicates from specified registrarobj
2214 2212 """
2215 2213 for name, func in registrarobj._table.iteritems():
2216 2214 symbols[name] = func
2217 2215 if func._safe:
2218 2216 safesymbols.add(name)
2219 2217
2220 2218 # load built-in predicates explicitly to setup safesymbols
2221 2219 loadpredicate(None, None, predicate)
2222 2220
2223 2221 # tell hggettext to extract docstrings from these functions:
2224 2222 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now