##// END OF EJS Templates
revset: move groupbranchiter over from graphmod...
Martijn Pieters -
r29347:98535ad4 default
parent child Browse files
Show More
@@ -1,682 +1,482 b''
1 # Revision graph generator for Mercurial
1 # Revision graph generator for Mercurial
2 #
2 #
3 # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
3 # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
4 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
4 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """supports walking the history as DAGs suitable for graphical output
9 """supports walking the history as DAGs suitable for graphical output
10
10
11 The most basic format we use is that of::
11 The most basic format we use is that of::
12
12
13 (id, type, data, [parentids])
13 (id, type, data, [parentids])
14
14
15 The node and parent ids are arbitrary integers which identify a node in the
15 The node and parent ids are arbitrary integers which identify a node in the
16 context of the graph returned. Type is a constant specifying the node type.
16 context of the graph returned. Type is a constant specifying the node type.
17 Data depends on type.
17 Data depends on type.
18 """
18 """
19
19
20 from __future__ import absolute_import
20 from __future__ import absolute_import
21
21
22 import heapq
23
24 from .node import nullrev
22 from .node import nullrev
25 from . import (
23 from . import (
26 revset,
24 revset,
27 util,
25 util,
28 )
26 )
29
27
30 CHANGESET = 'C'
28 CHANGESET = 'C'
31 PARENT = 'P'
29 PARENT = 'P'
32 GRANDPARENT = 'G'
30 GRANDPARENT = 'G'
33 MISSINGPARENT = 'M'
31 MISSINGPARENT = 'M'
34 # Style of line to draw. None signals a line that ends and is removed at this
32 # Style of line to draw. None signals a line that ends and is removed at this
35 # point. A number prefix means only the last N characters of the current block
33 # point. A number prefix means only the last N characters of the current block
36 # will use that style, the rest will use the PARENT style. Add a - sign
34 # will use that style, the rest will use the PARENT style. Add a - sign
37 # (so making N negative) and all but the first N characters use that style.
35 # (so making N negative) and all but the first N characters use that style.
38 EDGES = {PARENT: '|', GRANDPARENT: ':', MISSINGPARENT: None}
36 EDGES = {PARENT: '|', GRANDPARENT: ':', MISSINGPARENT: None}
39
37
40 def groupbranchiter(revs, parentsfunc, firstbranch=()):
41 """Yield revisions from heads to roots one (topo) branch at a time.
42
43 This function aims to be used by a graph generator that wishes to minimize
44 the number of parallel branches and their interleaving.
45
46 Example iteration order (numbers show the "true" order in a changelog):
47
48 o 4
49 |
50 o 1
51 |
52 | o 3
53 | |
54 | o 2
55 |/
56 o 0
57
58 Note that the ancestors of merges are understood by the current
59 algorithm to be on the same branch. This means no reordering will
60 occur behind a merge.
61 """
62
63 ### Quick summary of the algorithm
64 #
65 # This function is based around a "retention" principle. We keep revisions
66 # in memory until we are ready to emit a whole branch that immediately
67 # "merges" into an existing one. This reduces the number of parallel
68 # branches with interleaved revisions.
69 #
70 # During iteration revs are split into two groups:
71 # A) revision already emitted
72 # B) revision in "retention". They are stored as different subgroups.
73 #
74 # for each REV, we do the following logic:
75 #
76 # 1) if REV is a parent of (A), we will emit it. If there is a
77 # retention group ((B) above) that is blocked on REV being
78 # available, we emit all the revisions out of that retention
79 # group first.
80 #
81 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
82 # available, if such subgroup exist, we add REV to it and the subgroup is
83 # now awaiting for REV.parents() to be available.
84 #
85 # 3) finally if no such group existed in (B), we create a new subgroup.
86 #
87 #
88 # To bootstrap the algorithm, we emit the tipmost revision (which
89 # puts it in group (A) from above).
90
91 revs.sort(reverse=True)
92
93 # Set of parents of revision that have been emitted. They can be considered
94 # unblocked as the graph generator is already aware of them so there is no
95 # need to delay the revisions that reference them.
96 #
97 # If someone wants to prioritize a branch over the others, pre-filling this
98 # set will force all other branches to wait until this branch is ready to be
99 # emitted.
100 unblocked = set(firstbranch)
101
102 # list of groups waiting to be displayed, each group is defined by:
103 #
104 # (revs: lists of revs waiting to be displayed,
105 # blocked: set of that cannot be displayed before those in 'revs')
106 #
107 # The second value ('blocked') correspond to parents of any revision in the
108 # group ('revs') that is not itself contained in the group. The main idea
109 # of this algorithm is to delay as much as possible the emission of any
110 # revision. This means waiting for the moment we are about to display
111 # these parents to display the revs in a group.
112 #
113 # This first implementation is smart until it encounters a merge: it will
114 # emit revs as soon as any parent is about to be emitted and can grow an
115 # arbitrary number of revs in 'blocked'. In practice this mean we properly
116 # retains new branches but gives up on any special ordering for ancestors
117 # of merges. The implementation can be improved to handle this better.
118 #
119 # The first subgroup is special. It corresponds to all the revision that
120 # were already emitted. The 'revs' lists is expected to be empty and the
121 # 'blocked' set contains the parents revisions of already emitted revision.
122 #
123 # You could pre-seed the <parents> set of groups[0] to a specific
124 # changesets to select what the first emitted branch should be.
125 groups = [([], unblocked)]
126 pendingheap = []
127 pendingset = set()
128
129 heapq.heapify(pendingheap)
130 heappop = heapq.heappop
131 heappush = heapq.heappush
132 for currentrev in revs:
133 # Heap works with smallest element, we want highest so we invert
134 if currentrev not in pendingset:
135 heappush(pendingheap, -currentrev)
136 pendingset.add(currentrev)
137 # iterates on pending rev until after the current rev have been
138 # processed.
139 rev = None
140 while rev != currentrev:
141 rev = -heappop(pendingheap)
142 pendingset.remove(rev)
143
144 # Seek for a subgroup blocked, waiting for the current revision.
145 matching = [i for i, g in enumerate(groups) if rev in g[1]]
146
147 if matching:
148 # The main idea is to gather together all sets that are blocked
149 # on the same revision.
150 #
151 # Groups are merged when a common blocking ancestor is
152 # observed. For example, given two groups:
153 #
154 # revs [5, 4] waiting for 1
155 # revs [3, 2] waiting for 1
156 #
157 # These two groups will be merged when we process
158 # 1. In theory, we could have merged the groups when
159 # we added 2 to the group it is now in (we could have
160 # noticed the groups were both blocked on 1 then), but
161 # the way it works now makes the algorithm simpler.
162 #
163 # We also always keep the oldest subgroup first. We can
164 # probably improve the behavior by having the longest set
165 # first. That way, graph algorithms could minimise the length
166 # of parallel lines their drawing. This is currently not done.
167 targetidx = matching.pop(0)
168 trevs, tparents = groups[targetidx]
169 for i in matching:
170 gr = groups[i]
171 trevs.extend(gr[0])
172 tparents |= gr[1]
173 # delete all merged subgroups (except the one we kept)
174 # (starting from the last subgroup for performance and
175 # sanity reasons)
176 for i in reversed(matching):
177 del groups[i]
178 else:
179 # This is a new head. We create a new subgroup for it.
180 targetidx = len(groups)
181 groups.append(([], set([rev])))
182
183 gr = groups[targetidx]
184
185 # We now add the current nodes to this subgroups. This is done
186 # after the subgroup merging because all elements from a subgroup
187 # that relied on this rev must precede it.
188 #
189 # we also update the <parents> set to include the parents of the
190 # new nodes.
191 if rev == currentrev: # only display stuff in rev
192 gr[0].append(rev)
193 gr[1].remove(rev)
194 parents = [p for p in parentsfunc(rev) if p > nullrev]
195 gr[1].update(parents)
196 for p in parents:
197 if p not in pendingset:
198 pendingset.add(p)
199 heappush(pendingheap, -p)
200
201 # Look for a subgroup to display
202 #
203 # When unblocked is empty (if clause), we were not waiting for any
204 # revisions during the first iteration (if no priority was given) or
205 # if we emitted a whole disconnected set of the graph (reached a
206 # root). In that case we arbitrarily take the oldest known
207 # subgroup. The heuristic could probably be better.
208 #
209 # Otherwise (elif clause) if the subgroup is blocked on
210 # a revision we just emitted, we can safely emit it as
211 # well.
212 if not unblocked:
213 if len(groups) > 1: # display other subset
214 targetidx = 1
215 gr = groups[1]
216 elif not gr[1] & unblocked:
217 gr = None
218
219 if gr is not None:
220 # update the set of awaited revisions with the one from the
221 # subgroup
222 unblocked |= gr[1]
223 # output all revisions in the subgroup
224 for r in gr[0]:
225 yield r
226 # delete the subgroup that you just output
227 # unless it is groups[0] in which case you just empty it.
228 if targetidx:
229 del groups[targetidx]
230 else:
231 gr[0][:] = []
232 # Check if we have some subgroup waiting for revisions we are not going to
233 # iterate over
234 for g in groups:
235 for r in g[0]:
236 yield r
237
238 def dagwalker(repo, revs):
38 def dagwalker(repo, revs):
239 """cset DAG generator yielding (id, CHANGESET, ctx, [parentinfo]) tuples
39 """cset DAG generator yielding (id, CHANGESET, ctx, [parentinfo]) tuples
240
40
241 This generator function walks through revisions (which should be ordered
41 This generator function walks through revisions (which should be ordered
242 from bigger to lower). It returns a tuple for each node.
42 from bigger to lower). It returns a tuple for each node.
243
43
244 Each parentinfo entry is a tuple with (edgetype, parentid), where edgetype
44 Each parentinfo entry is a tuple with (edgetype, parentid), where edgetype
245 is one of PARENT, GRANDPARENT or MISSINGPARENT. The node and parent ids
45 is one of PARENT, GRANDPARENT or MISSINGPARENT. The node and parent ids
246 are arbitrary integers which identify a node in the context of the graph
46 are arbitrary integers which identify a node in the context of the graph
247 returned.
47 returned.
248
48
249 """
49 """
250 if not revs:
50 if not revs:
251 return
51 return
252
52
253 gpcache = {}
53 gpcache = {}
254
54
255 if repo.ui.configbool('experimental', 'graph-group-branches', False):
55 if repo.ui.configbool('experimental', 'graph-group-branches', False):
256 firstbranch = ()
56 firstbranch = ()
257 firstbranchrevset = repo.ui.config(
57 firstbranchrevset = repo.ui.config(
258 'experimental', 'graph-group-branches.firstbranch', '')
58 'experimental', 'graph-group-branches.firstbranch', '')
259 if firstbranchrevset:
59 if firstbranchrevset:
260 firstbranch = repo.revs(firstbranchrevset)
60 firstbranch = repo.revs(firstbranchrevset)
261 parentrevs = repo.changelog.parentrevs
61 parentrevs = repo.changelog.parentrevs
262 revs = groupbranchiter(revs, parentrevs, firstbranch)
62 revs = revset.groupbranchiter(revs, parentrevs, firstbranch)
263 revs = revset.baseset(revs)
63 revs = revset.baseset(revs)
264
64
265 for rev in revs:
65 for rev in revs:
266 ctx = repo[rev]
66 ctx = repo[rev]
267 # partition into parents in the rev set and missing parents, then
67 # partition into parents in the rev set and missing parents, then
268 # augment the lists with markers, to inform graph drawing code about
68 # augment the lists with markers, to inform graph drawing code about
269 # what kind of edge to draw between nodes.
69 # what kind of edge to draw between nodes.
270 pset = set(p.rev() for p in ctx.parents() if p.rev() in revs)
70 pset = set(p.rev() for p in ctx.parents() if p.rev() in revs)
271 mpars = [p.rev() for p in ctx.parents()
71 mpars = [p.rev() for p in ctx.parents()
272 if p.rev() != nullrev and p.rev() not in pset]
72 if p.rev() != nullrev and p.rev() not in pset]
273 parents = [(PARENT, p) for p in sorted(pset)]
73 parents = [(PARENT, p) for p in sorted(pset)]
274
74
275 for mpar in mpars:
75 for mpar in mpars:
276 gp = gpcache.get(mpar)
76 gp = gpcache.get(mpar)
277 if gp is None:
77 if gp is None:
278 # precompute slow query as we know reachableroots() goes
78 # precompute slow query as we know reachableroots() goes
279 # through all revs (issue4782)
79 # through all revs (issue4782)
280 if not isinstance(revs, revset.baseset):
80 if not isinstance(revs, revset.baseset):
281 revs = revset.baseset(revs)
81 revs = revset.baseset(revs)
282 gp = gpcache[mpar] = sorted(set(revset.reachableroots(
82 gp = gpcache[mpar] = sorted(set(revset.reachableroots(
283 repo, revs, [mpar])))
83 repo, revs, [mpar])))
284 if not gp:
84 if not gp:
285 parents.append((MISSINGPARENT, mpar))
85 parents.append((MISSINGPARENT, mpar))
286 pset.add(mpar)
86 pset.add(mpar)
287 else:
87 else:
288 parents.extend((GRANDPARENT, g) for g in gp if g not in pset)
88 parents.extend((GRANDPARENT, g) for g in gp if g not in pset)
289 pset.update(gp)
89 pset.update(gp)
290
90
291 yield (ctx.rev(), CHANGESET, ctx, parents)
91 yield (ctx.rev(), CHANGESET, ctx, parents)
292
92
293 def nodes(repo, nodes):
93 def nodes(repo, nodes):
294 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
94 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
295
95
296 This generator function walks the given nodes. It only returns parents
96 This generator function walks the given nodes. It only returns parents
297 that are in nodes, too.
97 that are in nodes, too.
298 """
98 """
299 include = set(nodes)
99 include = set(nodes)
300 for node in nodes:
100 for node in nodes:
301 ctx = repo[node]
101 ctx = repo[node]
302 parents = set((PARENT, p.rev()) for p in ctx.parents()
102 parents = set((PARENT, p.rev()) for p in ctx.parents()
303 if p.node() in include)
103 if p.node() in include)
304 yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
104 yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
305
105
306 def colored(dag, repo):
106 def colored(dag, repo):
307 """annotates a DAG with colored edge information
107 """annotates a DAG with colored edge information
308
108
309 For each DAG node this function emits tuples::
109 For each DAG node this function emits tuples::
310
110
311 (id, type, data, (col, color), [(col, nextcol, color)])
111 (id, type, data, (col, color), [(col, nextcol, color)])
312
112
313 with the following new elements:
113 with the following new elements:
314
114
315 - Tuple (col, color) with column and color index for the current node
115 - Tuple (col, color) with column and color index for the current node
316 - A list of tuples indicating the edges between the current node and its
116 - A list of tuples indicating the edges between the current node and its
317 parents.
117 parents.
318 """
118 """
319 seen = []
119 seen = []
320 colors = {}
120 colors = {}
321 newcolor = 1
121 newcolor = 1
322 config = {}
122 config = {}
323
123
324 for key, val in repo.ui.configitems('graph'):
124 for key, val in repo.ui.configitems('graph'):
325 if '.' in key:
125 if '.' in key:
326 branch, setting = key.rsplit('.', 1)
126 branch, setting = key.rsplit('.', 1)
327 # Validation
127 # Validation
328 if setting == "width" and val.isdigit():
128 if setting == "width" and val.isdigit():
329 config.setdefault(branch, {})[setting] = int(val)
129 config.setdefault(branch, {})[setting] = int(val)
330 elif setting == "color" and val.isalnum():
130 elif setting == "color" and val.isalnum():
331 config.setdefault(branch, {})[setting] = val
131 config.setdefault(branch, {})[setting] = val
332
132
333 if config:
133 if config:
334 getconf = util.lrucachefunc(
134 getconf = util.lrucachefunc(
335 lambda rev: config.get(repo[rev].branch(), {}))
135 lambda rev: config.get(repo[rev].branch(), {}))
336 else:
136 else:
337 getconf = lambda rev: {}
137 getconf = lambda rev: {}
338
138
339 for (cur, type, data, parents) in dag:
139 for (cur, type, data, parents) in dag:
340
140
341 # Compute seen and next
141 # Compute seen and next
342 if cur not in seen:
142 if cur not in seen:
343 seen.append(cur) # new head
143 seen.append(cur) # new head
344 colors[cur] = newcolor
144 colors[cur] = newcolor
345 newcolor += 1
145 newcolor += 1
346
146
347 col = seen.index(cur)
147 col = seen.index(cur)
348 color = colors.pop(cur)
148 color = colors.pop(cur)
349 next = seen[:]
149 next = seen[:]
350
150
351 # Add parents to next
151 # Add parents to next
352 addparents = [p for pt, p in parents if p not in next]
152 addparents = [p for pt, p in parents if p not in next]
353 next[col:col + 1] = addparents
153 next[col:col + 1] = addparents
354
154
355 # Set colors for the parents
155 # Set colors for the parents
356 for i, p in enumerate(addparents):
156 for i, p in enumerate(addparents):
357 if not i:
157 if not i:
358 colors[p] = color
158 colors[p] = color
359 else:
159 else:
360 colors[p] = newcolor
160 colors[p] = newcolor
361 newcolor += 1
161 newcolor += 1
362
162
363 # Add edges to the graph
163 # Add edges to the graph
364 edges = []
164 edges = []
365 for ecol, eid in enumerate(seen):
165 for ecol, eid in enumerate(seen):
366 if eid in next:
166 if eid in next:
367 bconf = getconf(eid)
167 bconf = getconf(eid)
368 edges.append((
168 edges.append((
369 ecol, next.index(eid), colors[eid],
169 ecol, next.index(eid), colors[eid],
370 bconf.get('width', -1),
170 bconf.get('width', -1),
371 bconf.get('color', '')))
171 bconf.get('color', '')))
372 elif eid == cur:
172 elif eid == cur:
373 for ptype, p in parents:
173 for ptype, p in parents:
374 bconf = getconf(p)
174 bconf = getconf(p)
375 edges.append((
175 edges.append((
376 ecol, next.index(p), color,
176 ecol, next.index(p), color,
377 bconf.get('width', -1),
177 bconf.get('width', -1),
378 bconf.get('color', '')))
178 bconf.get('color', '')))
379
179
380 # Yield and move on
180 # Yield and move on
381 yield (cur, type, data, (col, color), edges)
181 yield (cur, type, data, (col, color), edges)
382 seen = next
182 seen = next
383
183
384 def asciiedges(type, char, lines, state, rev, parents):
184 def asciiedges(type, char, lines, state, rev, parents):
385 """adds edge info to changelog DAG walk suitable for ascii()"""
185 """adds edge info to changelog DAG walk suitable for ascii()"""
386 seen = state['seen']
186 seen = state['seen']
387 if rev not in seen:
187 if rev not in seen:
388 seen.append(rev)
188 seen.append(rev)
389 nodeidx = seen.index(rev)
189 nodeidx = seen.index(rev)
390
190
391 knownparents = []
191 knownparents = []
392 newparents = []
192 newparents = []
393 for ptype, parent in parents:
193 for ptype, parent in parents:
394 if parent in seen:
194 if parent in seen:
395 knownparents.append(parent)
195 knownparents.append(parent)
396 else:
196 else:
397 newparents.append(parent)
197 newparents.append(parent)
398 state['edges'][parent] = state['styles'].get(ptype, '|')
198 state['edges'][parent] = state['styles'].get(ptype, '|')
399
199
400 ncols = len(seen)
200 ncols = len(seen)
401 nextseen = seen[:]
201 nextseen = seen[:]
402 nextseen[nodeidx:nodeidx + 1] = newparents
202 nextseen[nodeidx:nodeidx + 1] = newparents
403 edges = [(nodeidx, nextseen.index(p))
203 edges = [(nodeidx, nextseen.index(p))
404 for p in knownparents if p != nullrev]
204 for p in knownparents if p != nullrev]
405
205
406 seen[:] = nextseen
206 seen[:] = nextseen
407 while len(newparents) > 2:
207 while len(newparents) > 2:
408 # ascii() only knows how to add or remove a single column between two
208 # ascii() only knows how to add or remove a single column between two
409 # calls. Nodes with more than two parents break this constraint so we
209 # calls. Nodes with more than two parents break this constraint so we
410 # introduce intermediate expansion lines to grow the active node list
210 # introduce intermediate expansion lines to grow the active node list
411 # slowly.
211 # slowly.
412 edges.append((nodeidx, nodeidx))
212 edges.append((nodeidx, nodeidx))
413 edges.append((nodeidx, nodeidx + 1))
213 edges.append((nodeidx, nodeidx + 1))
414 nmorecols = 1
214 nmorecols = 1
415 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
215 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
416 char = '\\'
216 char = '\\'
417 lines = []
217 lines = []
418 nodeidx += 1
218 nodeidx += 1
419 ncols += 1
219 ncols += 1
420 edges = []
220 edges = []
421 del newparents[0]
221 del newparents[0]
422
222
423 if len(newparents) > 0:
223 if len(newparents) > 0:
424 edges.append((nodeidx, nodeidx))
224 edges.append((nodeidx, nodeidx))
425 if len(newparents) > 1:
225 if len(newparents) > 1:
426 edges.append((nodeidx, nodeidx + 1))
226 edges.append((nodeidx, nodeidx + 1))
427 nmorecols = len(nextseen) - ncols
227 nmorecols = len(nextseen) - ncols
428 # remove current node from edge characters, no longer needed
228 # remove current node from edge characters, no longer needed
429 state['edges'].pop(rev, None)
229 state['edges'].pop(rev, None)
430 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
230 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
431
231
432 def _fixlongrightedges(edges):
232 def _fixlongrightedges(edges):
433 for (i, (start, end)) in enumerate(edges):
233 for (i, (start, end)) in enumerate(edges):
434 if end > start:
234 if end > start:
435 edges[i] = (start, end + 1)
235 edges[i] = (start, end + 1)
436
236
437 def _getnodelineedgestail(
237 def _getnodelineedgestail(
438 echars, idx, pidx, ncols, coldiff, pdiff, fix_tail):
238 echars, idx, pidx, ncols, coldiff, pdiff, fix_tail):
439 if fix_tail and coldiff == pdiff and coldiff != 0:
239 if fix_tail and coldiff == pdiff and coldiff != 0:
440 # Still going in the same non-vertical direction.
240 # Still going in the same non-vertical direction.
441 if coldiff == -1:
241 if coldiff == -1:
442 start = max(idx + 1, pidx)
242 start = max(idx + 1, pidx)
443 tail = echars[idx * 2:(start - 1) * 2]
243 tail = echars[idx * 2:(start - 1) * 2]
444 tail.extend(["/", " "] * (ncols - start))
244 tail.extend(["/", " "] * (ncols - start))
445 return tail
245 return tail
446 else:
246 else:
447 return ["\\", " "] * (ncols - idx - 1)
247 return ["\\", " "] * (ncols - idx - 1)
448 else:
248 else:
449 remainder = (ncols - idx - 1)
249 remainder = (ncols - idx - 1)
450 return echars[-(remainder * 2):] if remainder > 0 else []
250 return echars[-(remainder * 2):] if remainder > 0 else []
451
251
452 def _drawedges(echars, edges, nodeline, interline):
252 def _drawedges(echars, edges, nodeline, interline):
453 for (start, end) in edges:
253 for (start, end) in edges:
454 if start == end + 1:
254 if start == end + 1:
455 interline[2 * end + 1] = "/"
255 interline[2 * end + 1] = "/"
456 elif start == end - 1:
256 elif start == end - 1:
457 interline[2 * start + 1] = "\\"
257 interline[2 * start + 1] = "\\"
458 elif start == end:
258 elif start == end:
459 interline[2 * start] = echars[2 * start]
259 interline[2 * start] = echars[2 * start]
460 else:
260 else:
461 if 2 * end >= len(nodeline):
261 if 2 * end >= len(nodeline):
462 continue
262 continue
463 nodeline[2 * end] = "+"
263 nodeline[2 * end] = "+"
464 if start > end:
264 if start > end:
465 (start, end) = (end, start)
265 (start, end) = (end, start)
466 for i in range(2 * start + 1, 2 * end):
266 for i in range(2 * start + 1, 2 * end):
467 if nodeline[i] != "+":
267 if nodeline[i] != "+":
468 nodeline[i] = "-"
268 nodeline[i] = "-"
469
269
470 def _getpaddingline(echars, idx, ncols, edges):
270 def _getpaddingline(echars, idx, ncols, edges):
471 # all edges up to the current node
271 # all edges up to the current node
472 line = echars[:idx * 2]
272 line = echars[:idx * 2]
473 # an edge for the current node, if there is one
273 # an edge for the current node, if there is one
474 if (idx, idx - 1) in edges or (idx, idx) in edges:
274 if (idx, idx - 1) in edges or (idx, idx) in edges:
475 # (idx, idx - 1) (idx, idx)
275 # (idx, idx - 1) (idx, idx)
476 # | | | | | | | |
276 # | | | | | | | |
477 # +---o | | o---+
277 # +---o | | o---+
478 # | | X | | X | |
278 # | | X | | X | |
479 # | |/ / | |/ /
279 # | |/ / | |/ /
480 # | | | | | |
280 # | | | | | |
481 line.extend(echars[idx * 2:(idx + 1) * 2])
281 line.extend(echars[idx * 2:(idx + 1) * 2])
482 else:
282 else:
483 line.extend(' ')
283 line.extend(' ')
484 # all edges to the right of the current node
284 # all edges to the right of the current node
485 remainder = ncols - idx - 1
285 remainder = ncols - idx - 1
486 if remainder > 0:
286 if remainder > 0:
487 line.extend(echars[-(remainder * 2):])
287 line.extend(echars[-(remainder * 2):])
488 return line
288 return line
489
289
490 def _drawendinglines(lines, extra, edgemap, seen):
290 def _drawendinglines(lines, extra, edgemap, seen):
491 """Draw ending lines for missing parent edges
291 """Draw ending lines for missing parent edges
492
292
493 None indicates an edge that ends at between this node and the next
293 None indicates an edge that ends at between this node and the next
494 Replace with a short line ending in ~ and add / lines to any edges to
294 Replace with a short line ending in ~ and add / lines to any edges to
495 the right.
295 the right.
496
296
497 """
297 """
498 if None not in edgemap.values():
298 if None not in edgemap.values():
499 return
299 return
500
300
501 # Check for more edges to the right of our ending edges.
301 # Check for more edges to the right of our ending edges.
502 # We need enough space to draw adjustment lines for these.
302 # We need enough space to draw adjustment lines for these.
503 edgechars = extra[::2]
303 edgechars = extra[::2]
504 while edgechars and edgechars[-1] is None:
304 while edgechars and edgechars[-1] is None:
505 edgechars.pop()
305 edgechars.pop()
506 shift_size = max((edgechars.count(None) * 2) - 1, 0)
306 shift_size = max((edgechars.count(None) * 2) - 1, 0)
507 while len(lines) < 3 + shift_size:
307 while len(lines) < 3 + shift_size:
508 lines.append(extra[:])
308 lines.append(extra[:])
509
309
510 if shift_size:
310 if shift_size:
511 empties = []
311 empties = []
512 toshift = []
312 toshift = []
513 first_empty = extra.index(None)
313 first_empty = extra.index(None)
514 for i, c in enumerate(extra[first_empty::2], first_empty // 2):
314 for i, c in enumerate(extra[first_empty::2], first_empty // 2):
515 if c is None:
315 if c is None:
516 empties.append(i * 2)
316 empties.append(i * 2)
517 else:
317 else:
518 toshift.append(i * 2)
318 toshift.append(i * 2)
519 targets = list(range(first_empty, first_empty + len(toshift) * 2, 2))
319 targets = list(range(first_empty, first_empty + len(toshift) * 2, 2))
520 positions = toshift[:]
320 positions = toshift[:]
521 for line in lines[-shift_size:]:
321 for line in lines[-shift_size:]:
522 line[first_empty:] = [' '] * (len(line) - first_empty)
322 line[first_empty:] = [' '] * (len(line) - first_empty)
523 for i in range(len(positions)):
323 for i in range(len(positions)):
524 pos = positions[i] - 1
324 pos = positions[i] - 1
525 positions[i] = max(pos, targets[i])
325 positions[i] = max(pos, targets[i])
526 line[pos] = '/' if pos > targets[i] else extra[toshift[i]]
326 line[pos] = '/' if pos > targets[i] else extra[toshift[i]]
527
327
528 map = {1: '|', 2: '~'}
328 map = {1: '|', 2: '~'}
529 for i, line in enumerate(lines):
329 for i, line in enumerate(lines):
530 if None not in line:
330 if None not in line:
531 continue
331 continue
532 line[:] = [c or map.get(i, ' ') for c in line]
332 line[:] = [c or map.get(i, ' ') for c in line]
533
333
534 # remove edges that ended
334 # remove edges that ended
535 remove = [p for p, c in edgemap.items() if c is None]
335 remove = [p for p, c in edgemap.items() if c is None]
536 for parent in remove:
336 for parent in remove:
537 del edgemap[parent]
337 del edgemap[parent]
538 seen.remove(parent)
338 seen.remove(parent)
539
339
540 def asciistate():
340 def asciistate():
541 """returns the initial value for the "state" argument to ascii()"""
341 """returns the initial value for the "state" argument to ascii()"""
542 return {
342 return {
543 'seen': [],
343 'seen': [],
544 'edges': {},
344 'edges': {},
545 'lastcoldiff': 0,
345 'lastcoldiff': 0,
546 'lastindex': 0,
346 'lastindex': 0,
547 'styles': EDGES.copy(),
347 'styles': EDGES.copy(),
548 'graphshorten': False,
348 'graphshorten': False,
549 }
349 }
550
350
551 def ascii(ui, state, type, char, text, coldata):
351 def ascii(ui, state, type, char, text, coldata):
552 """prints an ASCII graph of the DAG
352 """prints an ASCII graph of the DAG
553
353
554 takes the following arguments (one call per node in the graph):
354 takes the following arguments (one call per node in the graph):
555
355
556 - ui to write to
356 - ui to write to
557 - Somewhere to keep the needed state in (init to asciistate())
357 - Somewhere to keep the needed state in (init to asciistate())
558 - Column of the current node in the set of ongoing edges.
358 - Column of the current node in the set of ongoing edges.
559 - Type indicator of node data, usually 'C' for changesets.
359 - Type indicator of node data, usually 'C' for changesets.
560 - Payload: (char, lines):
360 - Payload: (char, lines):
561 - Character to use as node's symbol.
361 - Character to use as node's symbol.
562 - List of lines to display as the node's text.
362 - List of lines to display as the node's text.
563 - Edges; a list of (col, next_col) indicating the edges between
363 - Edges; a list of (col, next_col) indicating the edges between
564 the current node and its parents.
364 the current node and its parents.
565 - Number of columns (ongoing edges) in the current revision.
365 - Number of columns (ongoing edges) in the current revision.
566 - The difference between the number of columns (ongoing edges)
366 - The difference between the number of columns (ongoing edges)
567 in the next revision and the number of columns (ongoing edges)
367 in the next revision and the number of columns (ongoing edges)
568 in the current revision. That is: -1 means one column removed;
368 in the current revision. That is: -1 means one column removed;
569 0 means no columns added or removed; 1 means one column added.
369 0 means no columns added or removed; 1 means one column added.
570 """
370 """
571 idx, edges, ncols, coldiff = coldata
371 idx, edges, ncols, coldiff = coldata
572 assert -2 < coldiff < 2
372 assert -2 < coldiff < 2
573
373
574 edgemap, seen = state['edges'], state['seen']
374 edgemap, seen = state['edges'], state['seen']
575 # Be tolerant of history issues; make sure we have at least ncols + coldiff
375 # Be tolerant of history issues; make sure we have at least ncols + coldiff
576 # elements to work with. See test-glog.t for broken history test cases.
376 # elements to work with. See test-glog.t for broken history test cases.
577 echars = [c for p in seen for c in (edgemap.get(p, '|'), ' ')]
377 echars = [c for p in seen for c in (edgemap.get(p, '|'), ' ')]
578 echars.extend(('|', ' ') * max(ncols + coldiff - len(seen), 0))
378 echars.extend(('|', ' ') * max(ncols + coldiff - len(seen), 0))
579
379
580 if coldiff == -1:
380 if coldiff == -1:
581 # Transform
381 # Transform
582 #
382 #
583 # | | | | | |
383 # | | | | | |
584 # o | | into o---+
384 # o | | into o---+
585 # |X / |/ /
385 # |X / |/ /
586 # | | | |
386 # | | | |
587 _fixlongrightedges(edges)
387 _fixlongrightedges(edges)
588
388
589 # add_padding_line says whether to rewrite
389 # add_padding_line says whether to rewrite
590 #
390 #
591 # | | | | | | | |
391 # | | | | | | | |
592 # | o---+ into | o---+
392 # | o---+ into | o---+
593 # | / / | | | # <--- padding line
393 # | / / | | | # <--- padding line
594 # o | | | / /
394 # o | | | / /
595 # o | |
395 # o | |
596 add_padding_line = (len(text) > 2 and coldiff == -1 and
396 add_padding_line = (len(text) > 2 and coldiff == -1 and
597 [x for (x, y) in edges if x + 1 < y])
397 [x for (x, y) in edges if x + 1 < y])
598
398
599 # fix_nodeline_tail says whether to rewrite
399 # fix_nodeline_tail says whether to rewrite
600 #
400 #
601 # | | o | | | | o | |
401 # | | o | | | | o | |
602 # | | |/ / | | |/ /
402 # | | |/ / | | |/ /
603 # | o | | into | o / / # <--- fixed nodeline tail
403 # | o | | into | o / / # <--- fixed nodeline tail
604 # | |/ / | |/ /
404 # | |/ / | |/ /
605 # o | | o | |
405 # o | | o | |
606 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
406 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
607
407
608 # nodeline is the line containing the node character (typically o)
408 # nodeline is the line containing the node character (typically o)
609 nodeline = echars[:idx * 2]
409 nodeline = echars[:idx * 2]
610 nodeline.extend([char, " "])
410 nodeline.extend([char, " "])
611
411
612 nodeline.extend(
412 nodeline.extend(
613 _getnodelineedgestail(
413 _getnodelineedgestail(
614 echars, idx, state['lastindex'], ncols, coldiff,
414 echars, idx, state['lastindex'], ncols, coldiff,
615 state['lastcoldiff'], fix_nodeline_tail))
415 state['lastcoldiff'], fix_nodeline_tail))
616
416
617 # shift_interline is the line containing the non-vertical
417 # shift_interline is the line containing the non-vertical
618 # edges between this entry and the next
418 # edges between this entry and the next
619 shift_interline = echars[:idx * 2]
419 shift_interline = echars[:idx * 2]
620 shift_interline.extend(' ' * (2 + coldiff))
420 shift_interline.extend(' ' * (2 + coldiff))
621 count = ncols - idx - 1
421 count = ncols - idx - 1
622 if coldiff == -1:
422 if coldiff == -1:
623 shift_interline.extend('/ ' * count)
423 shift_interline.extend('/ ' * count)
624 elif coldiff == 0:
424 elif coldiff == 0:
625 shift_interline.extend(echars[(idx + 1) * 2:ncols * 2])
425 shift_interline.extend(echars[(idx + 1) * 2:ncols * 2])
626 else:
426 else:
627 shift_interline.extend(r'\ ' * count)
427 shift_interline.extend(r'\ ' * count)
628
428
629 # draw edges from the current node to its parents
429 # draw edges from the current node to its parents
630 _drawedges(echars, edges, nodeline, shift_interline)
430 _drawedges(echars, edges, nodeline, shift_interline)
631
431
632 # lines is the list of all graph lines to print
432 # lines is the list of all graph lines to print
633 lines = [nodeline]
433 lines = [nodeline]
634 if add_padding_line:
434 if add_padding_line:
635 lines.append(_getpaddingline(echars, idx, ncols, edges))
435 lines.append(_getpaddingline(echars, idx, ncols, edges))
636
436
637 # If 'graphshorten' config, only draw shift_interline
437 # If 'graphshorten' config, only draw shift_interline
638 # when there is any non vertical flow in graph.
438 # when there is any non vertical flow in graph.
639 if state['graphshorten']:
439 if state['graphshorten']:
640 if any(c in '\/' for c in shift_interline if c):
440 if any(c in '\/' for c in shift_interline if c):
641 lines.append(shift_interline)
441 lines.append(shift_interline)
642 # Else, no 'graphshorten' config so draw shift_interline.
442 # Else, no 'graphshorten' config so draw shift_interline.
643 else:
443 else:
644 lines.append(shift_interline)
444 lines.append(shift_interline)
645
445
646 # make sure that there are as many graph lines as there are
446 # make sure that there are as many graph lines as there are
647 # log strings
447 # log strings
648 extra_interline = echars[:(ncols + coldiff) * 2]
448 extra_interline = echars[:(ncols + coldiff) * 2]
649 if len(lines) < len(text):
449 if len(lines) < len(text):
650 while len(lines) < len(text):
450 while len(lines) < len(text):
651 lines.append(extra_interline[:])
451 lines.append(extra_interline[:])
652
452
653 _drawendinglines(lines, extra_interline, edgemap, seen)
453 _drawendinglines(lines, extra_interline, edgemap, seen)
654
454
655 while len(text) < len(lines):
455 while len(text) < len(lines):
656 text.append("")
456 text.append("")
657
457
658 if any(len(char) > 1 for char in edgemap.values()):
458 if any(len(char) > 1 for char in edgemap.values()):
659 # limit drawing an edge to the first or last N lines of the current
459 # limit drawing an edge to the first or last N lines of the current
660 # section the rest of the edge is drawn like a parent line.
460 # section the rest of the edge is drawn like a parent line.
661 parent = state['styles'][PARENT][-1]
461 parent = state['styles'][PARENT][-1]
662 def _drawgp(char, i):
462 def _drawgp(char, i):
663 # should a grandparent character be drawn for this line?
463 # should a grandparent character be drawn for this line?
664 if len(char) < 2:
464 if len(char) < 2:
665 return True
465 return True
666 num = int(char[:-1])
466 num = int(char[:-1])
667 # either skip first num lines or take last num lines, based on sign
467 # either skip first num lines or take last num lines, based on sign
668 return -num <= i if num < 0 else (len(lines) - i) <= num
468 return -num <= i if num < 0 else (len(lines) - i) <= num
669 for i, line in enumerate(lines):
469 for i, line in enumerate(lines):
670 line[:] = [c[-1] if _drawgp(c, i) else parent for c in line]
470 line[:] = [c[-1] if _drawgp(c, i) else parent for c in line]
671 edgemap.update(
471 edgemap.update(
672 (e, (c if len(c) < 2 else parent)) for e, c in edgemap.items())
472 (e, (c if len(c) < 2 else parent)) for e, c in edgemap.items())
673
473
674 # print lines
474 # print lines
675 indentation_level = max(ncols, ncols + coldiff)
475 indentation_level = max(ncols, ncols + coldiff)
676 for (line, logstr) in zip(lines, text):
476 for (line, logstr) in zip(lines, text):
677 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
477 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
678 ui.write(ln.rstrip() + '\n')
478 ui.write(ln.rstrip() + '\n')
679
479
680 # ... and start over
480 # ... and start over
681 state['lastcoldiff'] = coldiff
481 state['lastcoldiff'] = coldiff
682 state['lastindex'] = idx
482 state['lastindex'] = idx
@@ -1,3436 +1,3634 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import re
11 import re
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 destutil,
15 destutil,
16 encoding,
16 encoding,
17 error,
17 error,
18 hbisect,
18 hbisect,
19 match as matchmod,
19 match as matchmod,
20 node,
20 node,
21 obsolete as obsmod,
21 obsolete as obsmod,
22 parser,
22 parser,
23 pathutil,
23 pathutil,
24 phases,
24 phases,
25 registrar,
25 registrar,
26 repoview,
26 repoview,
27 util,
27 util,
28 )
28 )
29
29
30 def _revancestors(repo, revs, followfirst):
30 def _revancestors(repo, revs, followfirst):
31 """Like revlog.ancestors(), but supports followfirst."""
31 """Like revlog.ancestors(), but supports followfirst."""
32 if followfirst:
32 if followfirst:
33 cut = 1
33 cut = 1
34 else:
34 else:
35 cut = None
35 cut = None
36 cl = repo.changelog
36 cl = repo.changelog
37
37
38 def iterate():
38 def iterate():
39 revs.sort(reverse=True)
39 revs.sort(reverse=True)
40 irevs = iter(revs)
40 irevs = iter(revs)
41 h = []
41 h = []
42
42
43 inputrev = next(irevs, None)
43 inputrev = next(irevs, None)
44 if inputrev is not None:
44 if inputrev is not None:
45 heapq.heappush(h, -inputrev)
45 heapq.heappush(h, -inputrev)
46
46
47 seen = set()
47 seen = set()
48 while h:
48 while h:
49 current = -heapq.heappop(h)
49 current = -heapq.heappop(h)
50 if current == inputrev:
50 if current == inputrev:
51 inputrev = next(irevs, None)
51 inputrev = next(irevs, None)
52 if inputrev is not None:
52 if inputrev is not None:
53 heapq.heappush(h, -inputrev)
53 heapq.heappush(h, -inputrev)
54 if current not in seen:
54 if current not in seen:
55 seen.add(current)
55 seen.add(current)
56 yield current
56 yield current
57 for parent in cl.parentrevs(current)[:cut]:
57 for parent in cl.parentrevs(current)[:cut]:
58 if parent != node.nullrev:
58 if parent != node.nullrev:
59 heapq.heappush(h, -parent)
59 heapq.heappush(h, -parent)
60
60
61 return generatorset(iterate(), iterasc=False)
61 return generatorset(iterate(), iterasc=False)
62
62
63 def _revdescendants(repo, revs, followfirst):
63 def _revdescendants(repo, revs, followfirst):
64 """Like revlog.descendants() but supports followfirst."""
64 """Like revlog.descendants() but supports followfirst."""
65 if followfirst:
65 if followfirst:
66 cut = 1
66 cut = 1
67 else:
67 else:
68 cut = None
68 cut = None
69
69
70 def iterate():
70 def iterate():
71 cl = repo.changelog
71 cl = repo.changelog
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 # smartset (and if it is not, it should.)
73 # smartset (and if it is not, it should.)
74 first = min(revs)
74 first = min(revs)
75 nullrev = node.nullrev
75 nullrev = node.nullrev
76 if first == nullrev:
76 if first == nullrev:
77 # Are there nodes with a null first parent and a non-null
77 # Are there nodes with a null first parent and a non-null
78 # second one? Maybe. Do we care? Probably not.
78 # second one? Maybe. Do we care? Probably not.
79 for i in cl:
79 for i in cl:
80 yield i
80 yield i
81 else:
81 else:
82 seen = set(revs)
82 seen = set(revs)
83 for i in cl.revs(first + 1):
83 for i in cl.revs(first + 1):
84 for x in cl.parentrevs(i)[:cut]:
84 for x in cl.parentrevs(i)[:cut]:
85 if x != nullrev and x in seen:
85 if x != nullrev and x in seen:
86 seen.add(i)
86 seen.add(i)
87 yield i
87 yield i
88 break
88 break
89
89
90 return generatorset(iterate(), iterasc=True)
90 return generatorset(iterate(), iterasc=True)
91
91
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 """return (heads(::<roots> and ::<heads>))
93 """return (heads(::<roots> and ::<heads>))
94
94
95 If includepath is True, return (<roots>::<heads>)."""
95 If includepath is True, return (<roots>::<heads>)."""
96 if not roots:
96 if not roots:
97 return []
97 return []
98 parentrevs = repo.changelog.parentrevs
98 parentrevs = repo.changelog.parentrevs
99 roots = set(roots)
99 roots = set(roots)
100 visit = list(heads)
100 visit = list(heads)
101 reachable = set()
101 reachable = set()
102 seen = {}
102 seen = {}
103 # prefetch all the things! (because python is slow)
103 # prefetch all the things! (because python is slow)
104 reached = reachable.add
104 reached = reachable.add
105 dovisit = visit.append
105 dovisit = visit.append
106 nextvisit = visit.pop
106 nextvisit = visit.pop
107 # open-code the post-order traversal due to the tiny size of
107 # open-code the post-order traversal due to the tiny size of
108 # sys.getrecursionlimit()
108 # sys.getrecursionlimit()
109 while visit:
109 while visit:
110 rev = nextvisit()
110 rev = nextvisit()
111 if rev in roots:
111 if rev in roots:
112 reached(rev)
112 reached(rev)
113 if not includepath:
113 if not includepath:
114 continue
114 continue
115 parents = parentrevs(rev)
115 parents = parentrevs(rev)
116 seen[rev] = parents
116 seen[rev] = parents
117 for parent in parents:
117 for parent in parents:
118 if parent >= minroot and parent not in seen:
118 if parent >= minroot and parent not in seen:
119 dovisit(parent)
119 dovisit(parent)
120 if not reachable:
120 if not reachable:
121 return baseset()
121 return baseset()
122 if not includepath:
122 if not includepath:
123 return reachable
123 return reachable
124 for rev in sorted(seen):
124 for rev in sorted(seen):
125 for parent in seen[rev]:
125 for parent in seen[rev]:
126 if parent in reachable:
126 if parent in reachable:
127 reached(rev)
127 reached(rev)
128 return reachable
128 return reachable
129
129
130 def reachableroots(repo, roots, heads, includepath=False):
130 def reachableroots(repo, roots, heads, includepath=False):
131 """return (heads(::<roots> and ::<heads>))
131 """return (heads(::<roots> and ::<heads>))
132
132
133 If includepath is True, return (<roots>::<heads>)."""
133 If includepath is True, return (<roots>::<heads>)."""
134 if not roots:
134 if not roots:
135 return baseset()
135 return baseset()
136 minroot = roots.min()
136 minroot = roots.min()
137 roots = list(roots)
137 roots = list(roots)
138 heads = list(heads)
138 heads = list(heads)
139 try:
139 try:
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 except AttributeError:
141 except AttributeError:
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 revs = baseset(revs)
143 revs = baseset(revs)
144 revs.sort()
144 revs.sort()
145 return revs
145 return revs
146
146
147 elements = {
147 elements = {
148 # token-type: binding-strength, primary, prefix, infix, suffix
148 # token-type: binding-strength, primary, prefix, infix, suffix
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 "##": (20, None, None, ("_concat", 20), None),
150 "##": (20, None, None, ("_concat", 20), None),
151 "~": (18, None, None, ("ancestor", 18), None),
151 "~": (18, None, None, ("ancestor", 18), None),
152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 ("dagrangepost", 17)),
155 ("dagrangepost", 17)),
156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 ("dagrangepost", 17)),
157 ("dagrangepost", 17)),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 "not": (10, None, ("not", 10), None, None),
159 "not": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
160 "!": (10, None, ("not", 10), None, None),
161 "and": (5, None, None, ("and", 5), None),
161 "and": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
162 "&": (5, None, None, ("and", 5), None),
163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 "or": (4, None, None, ("or", 4), None),
164 "or": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
165 "|": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
166 "+": (4, None, None, ("or", 4), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
167 "=": (3, None, None, ("keyvalue", 3), None),
168 ",": (2, None, None, ("list", 2), None),
168 ",": (2, None, None, ("list", 2), None),
169 ")": (0, None, None, None, None),
169 ")": (0, None, None, None, None),
170 "symbol": (0, "symbol", None, None, None),
170 "symbol": (0, "symbol", None, None, None),
171 "string": (0, "string", None, None, None),
171 "string": (0, "string", None, None, None),
172 "end": (0, None, None, None, None),
172 "end": (0, None, None, None, None),
173 }
173 }
174
174
175 keywords = set(['and', 'or', 'not'])
175 keywords = set(['and', 'or', 'not'])
176
176
177 # default set of valid characters for the initial letter of symbols
177 # default set of valid characters for the initial letter of symbols
178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 if c.isalnum() or c in '._@' or ord(c) > 127)
179 if c.isalnum() or c in '._@' or ord(c) > 127)
180
180
181 # default set of valid characters for non-initial letters of symbols
181 # default set of valid characters for non-initial letters of symbols
182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184
184
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 '''
186 '''
187 Parse a revset statement into a stream of tokens
187 Parse a revset statement into a stream of tokens
188
188
189 ``syminitletters`` is the set of valid characters for the initial
189 ``syminitletters`` is the set of valid characters for the initial
190 letter of symbols.
190 letter of symbols.
191
191
192 By default, character ``c`` is recognized as valid for initial
192 By default, character ``c`` is recognized as valid for initial
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194
194
195 ``symletters`` is the set of valid characters for non-initial
195 ``symletters`` is the set of valid characters for non-initial
196 letters of symbols.
196 letters of symbols.
197
197
198 By default, character ``c`` is recognized as valid for non-initial
198 By default, character ``c`` is recognized as valid for non-initial
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200
200
201 Check that @ is a valid unquoted token character (issue3686):
201 Check that @ is a valid unquoted token character (issue3686):
202 >>> list(tokenize("@::"))
202 >>> list(tokenize("@::"))
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204
204
205 '''
205 '''
206 if syminitletters is None:
206 if syminitletters is None:
207 syminitletters = _syminitletters
207 syminitletters = _syminitletters
208 if symletters is None:
208 if symletters is None:
209 symletters = _symletters
209 symletters = _symletters
210
210
211 if program and lookup:
211 if program and lookup:
212 # attempt to parse old-style ranges first to deal with
212 # attempt to parse old-style ranges first to deal with
213 # things like old-tag which contain query metacharacters
213 # things like old-tag which contain query metacharacters
214 parts = program.split(':', 1)
214 parts = program.split(':', 1)
215 if all(lookup(sym) for sym in parts if sym):
215 if all(lookup(sym) for sym in parts if sym):
216 if parts[0]:
216 if parts[0]:
217 yield ('symbol', parts[0], 0)
217 yield ('symbol', parts[0], 0)
218 if len(parts) > 1:
218 if len(parts) > 1:
219 s = len(parts[0])
219 s = len(parts[0])
220 yield (':', None, s)
220 yield (':', None, s)
221 if parts[1]:
221 if parts[1]:
222 yield ('symbol', parts[1], s + 1)
222 yield ('symbol', parts[1], s + 1)
223 yield ('end', None, len(program))
223 yield ('end', None, len(program))
224 return
224 return
225
225
226 pos, l = 0, len(program)
226 pos, l = 0, len(program)
227 while pos < l:
227 while pos < l:
228 c = program[pos]
228 c = program[pos]
229 if c.isspace(): # skip inter-token whitespace
229 if c.isspace(): # skip inter-token whitespace
230 pass
230 pass
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 yield ('::', None, pos)
232 yield ('::', None, pos)
233 pos += 1 # skip ahead
233 pos += 1 # skip ahead
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 yield ('..', None, pos)
235 yield ('..', None, pos)
236 pos += 1 # skip ahead
236 pos += 1 # skip ahead
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 yield ('##', None, pos)
238 yield ('##', None, pos)
239 pos += 1 # skip ahead
239 pos += 1 # skip ahead
240 elif c in "():=,-|&+!~^%": # handle simple operators
240 elif c in "():=,-|&+!~^%": # handle simple operators
241 yield (c, None, pos)
241 yield (c, None, pos)
242 elif (c in '"\'' or c == 'r' and
242 elif (c in '"\'' or c == 'r' and
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 if c == 'r':
244 if c == 'r':
245 pos += 1
245 pos += 1
246 c = program[pos]
246 c = program[pos]
247 decode = lambda x: x
247 decode = lambda x: x
248 else:
248 else:
249 decode = parser.unescapestr
249 decode = parser.unescapestr
250 pos += 1
250 pos += 1
251 s = pos
251 s = pos
252 while pos < l: # find closing quote
252 while pos < l: # find closing quote
253 d = program[pos]
253 d = program[pos]
254 if d == '\\': # skip over escaped characters
254 if d == '\\': # skip over escaped characters
255 pos += 2
255 pos += 2
256 continue
256 continue
257 if d == c:
257 if d == c:
258 yield ('string', decode(program[s:pos]), s)
258 yield ('string', decode(program[s:pos]), s)
259 break
259 break
260 pos += 1
260 pos += 1
261 else:
261 else:
262 raise error.ParseError(_("unterminated string"), s)
262 raise error.ParseError(_("unterminated string"), s)
263 # gather up a symbol/keyword
263 # gather up a symbol/keyword
264 elif c in syminitletters:
264 elif c in syminitletters:
265 s = pos
265 s = pos
266 pos += 1
266 pos += 1
267 while pos < l: # find end of symbol
267 while pos < l: # find end of symbol
268 d = program[pos]
268 d = program[pos]
269 if d not in symletters:
269 if d not in symletters:
270 break
270 break
271 if d == '.' and program[pos - 1] == '.': # special case for ..
271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 pos -= 1
272 pos -= 1
273 break
273 break
274 pos += 1
274 pos += 1
275 sym = program[s:pos]
275 sym = program[s:pos]
276 if sym in keywords: # operator keywords
276 if sym in keywords: # operator keywords
277 yield (sym, None, s)
277 yield (sym, None, s)
278 elif '-' in sym:
278 elif '-' in sym:
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 if lookup and lookup(sym):
280 if lookup and lookup(sym):
281 # looks like a real symbol
281 # looks like a real symbol
282 yield ('symbol', sym, s)
282 yield ('symbol', sym, s)
283 else:
283 else:
284 # looks like an expression
284 # looks like an expression
285 parts = sym.split('-')
285 parts = sym.split('-')
286 for p in parts[:-1]:
286 for p in parts[:-1]:
287 if p: # possible consecutive -
287 if p: # possible consecutive -
288 yield ('symbol', p, s)
288 yield ('symbol', p, s)
289 s += len(p)
289 s += len(p)
290 yield ('-', None, pos)
290 yield ('-', None, pos)
291 s += 1
291 s += 1
292 if parts[-1]: # possible trailing -
292 if parts[-1]: # possible trailing -
293 yield ('symbol', parts[-1], s)
293 yield ('symbol', parts[-1], s)
294 else:
294 else:
295 yield ('symbol', sym, s)
295 yield ('symbol', sym, s)
296 pos -= 1
296 pos -= 1
297 else:
297 else:
298 raise error.ParseError(_("syntax error in revset '%s'") %
298 raise error.ParseError(_("syntax error in revset '%s'") %
299 program, pos)
299 program, pos)
300 pos += 1
300 pos += 1
301 yield ('end', None, pos)
301 yield ('end', None, pos)
302
302
303 # helpers
303 # helpers
304
304
305 def getstring(x, err):
305 def getstring(x, err):
306 if x and (x[0] == 'string' or x[0] == 'symbol'):
306 if x and (x[0] == 'string' or x[0] == 'symbol'):
307 return x[1]
307 return x[1]
308 raise error.ParseError(err)
308 raise error.ParseError(err)
309
309
310 def getlist(x):
310 def getlist(x):
311 if not x:
311 if not x:
312 return []
312 return []
313 if x[0] == 'list':
313 if x[0] == 'list':
314 return list(x[1:])
314 return list(x[1:])
315 return [x]
315 return [x]
316
316
317 def getargs(x, min, max, err):
317 def getargs(x, min, max, err):
318 l = getlist(x)
318 l = getlist(x)
319 if len(l) < min or (max >= 0 and len(l) > max):
319 if len(l) < min or (max >= 0 and len(l) > max):
320 raise error.ParseError(err)
320 raise error.ParseError(err)
321 return l
321 return l
322
322
323 def getargsdict(x, funcname, keys):
323 def getargsdict(x, funcname, keys):
324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
325 keyvaluenode='keyvalue', keynode='symbol')
325 keyvaluenode='keyvalue', keynode='symbol')
326
326
327 def getset(repo, subset, x):
327 def getset(repo, subset, x):
328 if not x:
328 if not x:
329 raise error.ParseError(_("missing argument"))
329 raise error.ParseError(_("missing argument"))
330 s = methods[x[0]](repo, subset, *x[1:])
330 s = methods[x[0]](repo, subset, *x[1:])
331 if util.safehasattr(s, 'isascending'):
331 if util.safehasattr(s, 'isascending'):
332 return s
332 return s
333 # else case should not happen, because all non-func are internal,
333 # else case should not happen, because all non-func are internal,
334 # ignoring for now.
334 # ignoring for now.
335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
337 % x[1][1],
337 % x[1][1],
338 '3.9')
338 '3.9')
339 return baseset(s)
339 return baseset(s)
340
340
341 def _getrevsource(repo, r):
341 def _getrevsource(repo, r):
342 extra = repo[r].extra()
342 extra = repo[r].extra()
343 for label in ('source', 'transplant_source', 'rebase_source'):
343 for label in ('source', 'transplant_source', 'rebase_source'):
344 if label in extra:
344 if label in extra:
345 try:
345 try:
346 return repo[extra[label]].rev()
346 return repo[extra[label]].rev()
347 except error.RepoLookupError:
347 except error.RepoLookupError:
348 pass
348 pass
349 return None
349 return None
350
350
351 # operator methods
351 # operator methods
352
352
353 def stringset(repo, subset, x):
353 def stringset(repo, subset, x):
354 x = repo[x].rev()
354 x = repo[x].rev()
355 if (x in subset
355 if (x in subset
356 or x == node.nullrev and isinstance(subset, fullreposet)):
356 or x == node.nullrev and isinstance(subset, fullreposet)):
357 return baseset([x])
357 return baseset([x])
358 return baseset()
358 return baseset()
359
359
360 def rangeset(repo, subset, x, y):
360 def rangeset(repo, subset, x, y):
361 m = getset(repo, fullreposet(repo), x)
361 m = getset(repo, fullreposet(repo), x)
362 n = getset(repo, fullreposet(repo), y)
362 n = getset(repo, fullreposet(repo), y)
363
363
364 if not m or not n:
364 if not m or not n:
365 return baseset()
365 return baseset()
366 m, n = m.first(), n.last()
366 m, n = m.first(), n.last()
367
367
368 if m == n:
368 if m == n:
369 r = baseset([m])
369 r = baseset([m])
370 elif n == node.wdirrev:
370 elif n == node.wdirrev:
371 r = spanset(repo, m, len(repo)) + baseset([n])
371 r = spanset(repo, m, len(repo)) + baseset([n])
372 elif m == node.wdirrev:
372 elif m == node.wdirrev:
373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
374 elif m < n:
374 elif m < n:
375 r = spanset(repo, m, n + 1)
375 r = spanset(repo, m, n + 1)
376 else:
376 else:
377 r = spanset(repo, m, n - 1)
377 r = spanset(repo, m, n - 1)
378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
379 # necessary to ensure we preserve the order in subset.
379 # necessary to ensure we preserve the order in subset.
380 #
380 #
381 # This has performance implication, carrying the sorting over when possible
381 # This has performance implication, carrying the sorting over when possible
382 # would be more efficient.
382 # would be more efficient.
383 return r & subset
383 return r & subset
384
384
385 def dagrange(repo, subset, x, y):
385 def dagrange(repo, subset, x, y):
386 r = fullreposet(repo)
386 r = fullreposet(repo)
387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
388 includepath=True)
388 includepath=True)
389 return subset & xs
389 return subset & xs
390
390
391 def andset(repo, subset, x, y):
391 def andset(repo, subset, x, y):
392 return getset(repo, getset(repo, subset, x), y)
392 return getset(repo, getset(repo, subset, x), y)
393
393
394 def differenceset(repo, subset, x, y):
394 def differenceset(repo, subset, x, y):
395 return getset(repo, subset, x) - getset(repo, subset, y)
395 return getset(repo, subset, x) - getset(repo, subset, y)
396
396
397 def orset(repo, subset, *xs):
397 def orset(repo, subset, *xs):
398 assert xs
398 assert xs
399 if len(xs) == 1:
399 if len(xs) == 1:
400 return getset(repo, subset, xs[0])
400 return getset(repo, subset, xs[0])
401 p = len(xs) // 2
401 p = len(xs) // 2
402 a = orset(repo, subset, *xs[:p])
402 a = orset(repo, subset, *xs[:p])
403 b = orset(repo, subset, *xs[p:])
403 b = orset(repo, subset, *xs[p:])
404 return a + b
404 return a + b
405
405
406 def notset(repo, subset, x):
406 def notset(repo, subset, x):
407 return subset - getset(repo, subset, x)
407 return subset - getset(repo, subset, x)
408
408
409 def listset(repo, subset, *xs):
409 def listset(repo, subset, *xs):
410 raise error.ParseError(_("can't use a list in this context"),
410 raise error.ParseError(_("can't use a list in this context"),
411 hint=_('see hg help "revsets.x or y"'))
411 hint=_('see hg help "revsets.x or y"'))
412
412
413 def keyvaluepair(repo, subset, k, v):
413 def keyvaluepair(repo, subset, k, v):
414 raise error.ParseError(_("can't use a key-value pair in this context"))
414 raise error.ParseError(_("can't use a key-value pair in this context"))
415
415
416 def func(repo, subset, a, b):
416 def func(repo, subset, a, b):
417 if a[0] == 'symbol' and a[1] in symbols:
417 if a[0] == 'symbol' and a[1] in symbols:
418 return symbols[a[1]](repo, subset, b)
418 return symbols[a[1]](repo, subset, b)
419
419
420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
421
421
422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
423 raise error.UnknownIdentifier(a[1], syms)
423 raise error.UnknownIdentifier(a[1], syms)
424
424
425 # functions
425 # functions
426
426
427 # symbols are callables like:
427 # symbols are callables like:
428 # fn(repo, subset, x)
428 # fn(repo, subset, x)
429 # with:
429 # with:
430 # repo - current repository instance
430 # repo - current repository instance
431 # subset - of revisions to be examined
431 # subset - of revisions to be examined
432 # x - argument in tree form
432 # x - argument in tree form
433 symbols = {}
433 symbols = {}
434
434
435 # symbols which can't be used for a DoS attack for any given input
435 # symbols which can't be used for a DoS attack for any given input
436 # (e.g. those which accept regexes as plain strings shouldn't be included)
436 # (e.g. those which accept regexes as plain strings shouldn't be included)
437 # functions that just return a lot of changesets (like all) don't count here
437 # functions that just return a lot of changesets (like all) don't count here
438 safesymbols = set()
438 safesymbols = set()
439
439
440 predicate = registrar.revsetpredicate()
440 predicate = registrar.revsetpredicate()
441
441
442 @predicate('_destupdate')
442 @predicate('_destupdate')
443 def _destupdate(repo, subset, x):
443 def _destupdate(repo, subset, x):
444 # experimental revset for update destination
444 # experimental revset for update destination
445 args = getargsdict(x, 'limit', 'clean check')
445 args = getargsdict(x, 'limit', 'clean check')
446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
447
447
448 @predicate('_destmerge')
448 @predicate('_destmerge')
449 def _destmerge(repo, subset, x):
449 def _destmerge(repo, subset, x):
450 # experimental revset for merge destination
450 # experimental revset for merge destination
451 sourceset = None
451 sourceset = None
452 if x is not None:
452 if x is not None:
453 sourceset = getset(repo, fullreposet(repo), x)
453 sourceset = getset(repo, fullreposet(repo), x)
454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
455
455
456 @predicate('adds(pattern)', safe=True)
456 @predicate('adds(pattern)', safe=True)
457 def adds(repo, subset, x):
457 def adds(repo, subset, x):
458 """Changesets that add a file matching pattern.
458 """Changesets that add a file matching pattern.
459
459
460 The pattern without explicit kind like ``glob:`` is expected to be
460 The pattern without explicit kind like ``glob:`` is expected to be
461 relative to the current directory and match against a file or a
461 relative to the current directory and match against a file or a
462 directory.
462 directory.
463 """
463 """
464 # i18n: "adds" is a keyword
464 # i18n: "adds" is a keyword
465 pat = getstring(x, _("adds requires a pattern"))
465 pat = getstring(x, _("adds requires a pattern"))
466 return checkstatus(repo, subset, pat, 1)
466 return checkstatus(repo, subset, pat, 1)
467
467
468 @predicate('ancestor(*changeset)', safe=True)
468 @predicate('ancestor(*changeset)', safe=True)
469 def ancestor(repo, subset, x):
469 def ancestor(repo, subset, x):
470 """A greatest common ancestor of the changesets.
470 """A greatest common ancestor of the changesets.
471
471
472 Accepts 0 or more changesets.
472 Accepts 0 or more changesets.
473 Will return empty list when passed no args.
473 Will return empty list when passed no args.
474 Greatest common ancestor of a single changeset is that changeset.
474 Greatest common ancestor of a single changeset is that changeset.
475 """
475 """
476 # i18n: "ancestor" is a keyword
476 # i18n: "ancestor" is a keyword
477 l = getlist(x)
477 l = getlist(x)
478 rl = fullreposet(repo)
478 rl = fullreposet(repo)
479 anc = None
479 anc = None
480
480
481 # (getset(repo, rl, i) for i in l) generates a list of lists
481 # (getset(repo, rl, i) for i in l) generates a list of lists
482 for revs in (getset(repo, rl, i) for i in l):
482 for revs in (getset(repo, rl, i) for i in l):
483 for r in revs:
483 for r in revs:
484 if anc is None:
484 if anc is None:
485 anc = repo[r]
485 anc = repo[r]
486 else:
486 else:
487 anc = anc.ancestor(repo[r])
487 anc = anc.ancestor(repo[r])
488
488
489 if anc is not None and anc.rev() in subset:
489 if anc is not None and anc.rev() in subset:
490 return baseset([anc.rev()])
490 return baseset([anc.rev()])
491 return baseset()
491 return baseset()
492
492
493 def _ancestors(repo, subset, x, followfirst=False):
493 def _ancestors(repo, subset, x, followfirst=False):
494 heads = getset(repo, fullreposet(repo), x)
494 heads = getset(repo, fullreposet(repo), x)
495 if not heads:
495 if not heads:
496 return baseset()
496 return baseset()
497 s = _revancestors(repo, heads, followfirst)
497 s = _revancestors(repo, heads, followfirst)
498 return subset & s
498 return subset & s
499
499
500 @predicate('ancestors(set)', safe=True)
500 @predicate('ancestors(set)', safe=True)
501 def ancestors(repo, subset, x):
501 def ancestors(repo, subset, x):
502 """Changesets that are ancestors of a changeset in set.
502 """Changesets that are ancestors of a changeset in set.
503 """
503 """
504 return _ancestors(repo, subset, x)
504 return _ancestors(repo, subset, x)
505
505
506 @predicate('_firstancestors', safe=True)
506 @predicate('_firstancestors', safe=True)
507 def _firstancestors(repo, subset, x):
507 def _firstancestors(repo, subset, x):
508 # ``_firstancestors(set)``
508 # ``_firstancestors(set)``
509 # Like ``ancestors(set)`` but follows only the first parents.
509 # Like ``ancestors(set)`` but follows only the first parents.
510 return _ancestors(repo, subset, x, followfirst=True)
510 return _ancestors(repo, subset, x, followfirst=True)
511
511
512 def ancestorspec(repo, subset, x, n):
512 def ancestorspec(repo, subset, x, n):
513 """``set~n``
513 """``set~n``
514 Changesets that are the Nth ancestor (first parents only) of a changeset
514 Changesets that are the Nth ancestor (first parents only) of a changeset
515 in set.
515 in set.
516 """
516 """
517 try:
517 try:
518 n = int(n[1])
518 n = int(n[1])
519 except (TypeError, ValueError):
519 except (TypeError, ValueError):
520 raise error.ParseError(_("~ expects a number"))
520 raise error.ParseError(_("~ expects a number"))
521 ps = set()
521 ps = set()
522 cl = repo.changelog
522 cl = repo.changelog
523 for r in getset(repo, fullreposet(repo), x):
523 for r in getset(repo, fullreposet(repo), x):
524 for i in range(n):
524 for i in range(n):
525 r = cl.parentrevs(r)[0]
525 r = cl.parentrevs(r)[0]
526 ps.add(r)
526 ps.add(r)
527 return subset & ps
527 return subset & ps
528
528
529 @predicate('author(string)', safe=True)
529 @predicate('author(string)', safe=True)
530 def author(repo, subset, x):
530 def author(repo, subset, x):
531 """Alias for ``user(string)``.
531 """Alias for ``user(string)``.
532 """
532 """
533 # i18n: "author" is a keyword
533 # i18n: "author" is a keyword
534 n = encoding.lower(getstring(x, _("author requires a string")))
534 n = encoding.lower(getstring(x, _("author requires a string")))
535 kind, pattern, matcher = _substringmatcher(n)
535 kind, pattern, matcher = _substringmatcher(n)
536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
537 condrepr=('<user %r>', n))
537 condrepr=('<user %r>', n))
538
538
539 @predicate('bisect(string)', safe=True)
539 @predicate('bisect(string)', safe=True)
540 def bisect(repo, subset, x):
540 def bisect(repo, subset, x):
541 """Changesets marked in the specified bisect status:
541 """Changesets marked in the specified bisect status:
542
542
543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
544 - ``goods``, ``bads`` : csets topologically good/bad
544 - ``goods``, ``bads`` : csets topologically good/bad
545 - ``range`` : csets taking part in the bisection
545 - ``range`` : csets taking part in the bisection
546 - ``pruned`` : csets that are goods, bads or skipped
546 - ``pruned`` : csets that are goods, bads or skipped
547 - ``untested`` : csets whose fate is yet unknown
547 - ``untested`` : csets whose fate is yet unknown
548 - ``ignored`` : csets ignored due to DAG topology
548 - ``ignored`` : csets ignored due to DAG topology
549 - ``current`` : the cset currently being bisected
549 - ``current`` : the cset currently being bisected
550 """
550 """
551 # i18n: "bisect" is a keyword
551 # i18n: "bisect" is a keyword
552 status = getstring(x, _("bisect requires a string")).lower()
552 status = getstring(x, _("bisect requires a string")).lower()
553 state = set(hbisect.get(repo, status))
553 state = set(hbisect.get(repo, status))
554 return subset & state
554 return subset & state
555
555
556 # Backward-compatibility
556 # Backward-compatibility
557 # - no help entry so that we do not advertise it any more
557 # - no help entry so that we do not advertise it any more
558 @predicate('bisected', safe=True)
558 @predicate('bisected', safe=True)
559 def bisected(repo, subset, x):
559 def bisected(repo, subset, x):
560 return bisect(repo, subset, x)
560 return bisect(repo, subset, x)
561
561
562 @predicate('bookmark([name])', safe=True)
562 @predicate('bookmark([name])', safe=True)
563 def bookmark(repo, subset, x):
563 def bookmark(repo, subset, x):
564 """The named bookmark or all bookmarks.
564 """The named bookmark or all bookmarks.
565
565
566 If `name` starts with `re:`, the remainder of the name is treated as
566 If `name` starts with `re:`, the remainder of the name is treated as
567 a regular expression. To match a bookmark that actually starts with `re:`,
567 a regular expression. To match a bookmark that actually starts with `re:`,
568 use the prefix `literal:`.
568 use the prefix `literal:`.
569 """
569 """
570 # i18n: "bookmark" is a keyword
570 # i18n: "bookmark" is a keyword
571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
572 if args:
572 if args:
573 bm = getstring(args[0],
573 bm = getstring(args[0],
574 # i18n: "bookmark" is a keyword
574 # i18n: "bookmark" is a keyword
575 _('the argument to bookmark must be a string'))
575 _('the argument to bookmark must be a string'))
576 kind, pattern, matcher = util.stringmatcher(bm)
576 kind, pattern, matcher = util.stringmatcher(bm)
577 bms = set()
577 bms = set()
578 if kind == 'literal':
578 if kind == 'literal':
579 bmrev = repo._bookmarks.get(pattern, None)
579 bmrev = repo._bookmarks.get(pattern, None)
580 if not bmrev:
580 if not bmrev:
581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
582 % pattern)
582 % pattern)
583 bms.add(repo[bmrev].rev())
583 bms.add(repo[bmrev].rev())
584 else:
584 else:
585 matchrevs = set()
585 matchrevs = set()
586 for name, bmrev in repo._bookmarks.iteritems():
586 for name, bmrev in repo._bookmarks.iteritems():
587 if matcher(name):
587 if matcher(name):
588 matchrevs.add(bmrev)
588 matchrevs.add(bmrev)
589 if not matchrevs:
589 if not matchrevs:
590 raise error.RepoLookupError(_("no bookmarks exist"
590 raise error.RepoLookupError(_("no bookmarks exist"
591 " that match '%s'") % pattern)
591 " that match '%s'") % pattern)
592 for bmrev in matchrevs:
592 for bmrev in matchrevs:
593 bms.add(repo[bmrev].rev())
593 bms.add(repo[bmrev].rev())
594 else:
594 else:
595 bms = set([repo[r].rev()
595 bms = set([repo[r].rev()
596 for r in repo._bookmarks.values()])
596 for r in repo._bookmarks.values()])
597 bms -= set([node.nullrev])
597 bms -= set([node.nullrev])
598 return subset & bms
598 return subset & bms
599
599
600 @predicate('branch(string or set)', safe=True)
600 @predicate('branch(string or set)', safe=True)
601 def branch(repo, subset, x):
601 def branch(repo, subset, x):
602 """
602 """
603 All changesets belonging to the given branch or the branches of the given
603 All changesets belonging to the given branch or the branches of the given
604 changesets.
604 changesets.
605
605
606 If `string` starts with `re:`, the remainder of the name is treated as
606 If `string` starts with `re:`, the remainder of the name is treated as
607 a regular expression. To match a branch that actually starts with `re:`,
607 a regular expression. To match a branch that actually starts with `re:`,
608 use the prefix `literal:`.
608 use the prefix `literal:`.
609 """
609 """
610 getbi = repo.revbranchcache().branchinfo
610 getbi = repo.revbranchcache().branchinfo
611
611
612 try:
612 try:
613 b = getstring(x, '')
613 b = getstring(x, '')
614 except error.ParseError:
614 except error.ParseError:
615 # not a string, but another revspec, e.g. tip()
615 # not a string, but another revspec, e.g. tip()
616 pass
616 pass
617 else:
617 else:
618 kind, pattern, matcher = util.stringmatcher(b)
618 kind, pattern, matcher = util.stringmatcher(b)
619 if kind == 'literal':
619 if kind == 'literal':
620 # note: falls through to the revspec case if no branch with
620 # note: falls through to the revspec case if no branch with
621 # this name exists and pattern kind is not specified explicitly
621 # this name exists and pattern kind is not specified explicitly
622 if pattern in repo.branchmap():
622 if pattern in repo.branchmap():
623 return subset.filter(lambda r: matcher(getbi(r)[0]),
623 return subset.filter(lambda r: matcher(getbi(r)[0]),
624 condrepr=('<branch %r>', b))
624 condrepr=('<branch %r>', b))
625 if b.startswith('literal:'):
625 if b.startswith('literal:'):
626 raise error.RepoLookupError(_("branch '%s' does not exist")
626 raise error.RepoLookupError(_("branch '%s' does not exist")
627 % pattern)
627 % pattern)
628 else:
628 else:
629 return subset.filter(lambda r: matcher(getbi(r)[0]),
629 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 condrepr=('<branch %r>', b))
630 condrepr=('<branch %r>', b))
631
631
632 s = getset(repo, fullreposet(repo), x)
632 s = getset(repo, fullreposet(repo), x)
633 b = set()
633 b = set()
634 for r in s:
634 for r in s:
635 b.add(getbi(r)[0])
635 b.add(getbi(r)[0])
636 c = s.__contains__
636 c = s.__contains__
637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
638 condrepr=lambda: '<branch %r>' % sorted(b))
638 condrepr=lambda: '<branch %r>' % sorted(b))
639
639
640 @predicate('bumped()', safe=True)
640 @predicate('bumped()', safe=True)
641 def bumped(repo, subset, x):
641 def bumped(repo, subset, x):
642 """Mutable changesets marked as successors of public changesets.
642 """Mutable changesets marked as successors of public changesets.
643
643
644 Only non-public and non-obsolete changesets can be `bumped`.
644 Only non-public and non-obsolete changesets can be `bumped`.
645 """
645 """
646 # i18n: "bumped" is a keyword
646 # i18n: "bumped" is a keyword
647 getargs(x, 0, 0, _("bumped takes no arguments"))
647 getargs(x, 0, 0, _("bumped takes no arguments"))
648 bumped = obsmod.getrevs(repo, 'bumped')
648 bumped = obsmod.getrevs(repo, 'bumped')
649 return subset & bumped
649 return subset & bumped
650
650
651 @predicate('bundle()', safe=True)
651 @predicate('bundle()', safe=True)
652 def bundle(repo, subset, x):
652 def bundle(repo, subset, x):
653 """Changesets in the bundle.
653 """Changesets in the bundle.
654
654
655 Bundle must be specified by the -R option."""
655 Bundle must be specified by the -R option."""
656
656
657 try:
657 try:
658 bundlerevs = repo.changelog.bundlerevs
658 bundlerevs = repo.changelog.bundlerevs
659 except AttributeError:
659 except AttributeError:
660 raise error.Abort(_("no bundle provided - specify with -R"))
660 raise error.Abort(_("no bundle provided - specify with -R"))
661 return subset & bundlerevs
661 return subset & bundlerevs
662
662
663 def checkstatus(repo, subset, pat, field):
663 def checkstatus(repo, subset, pat, field):
664 hasset = matchmod.patkind(pat) == 'set'
664 hasset = matchmod.patkind(pat) == 'set'
665
665
666 mcache = [None]
666 mcache = [None]
667 def matches(x):
667 def matches(x):
668 c = repo[x]
668 c = repo[x]
669 if not mcache[0] or hasset:
669 if not mcache[0] or hasset:
670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
671 m = mcache[0]
671 m = mcache[0]
672 fname = None
672 fname = None
673 if not m.anypats() and len(m.files()) == 1:
673 if not m.anypats() and len(m.files()) == 1:
674 fname = m.files()[0]
674 fname = m.files()[0]
675 if fname is not None:
675 if fname is not None:
676 if fname not in c.files():
676 if fname not in c.files():
677 return False
677 return False
678 else:
678 else:
679 for f in c.files():
679 for f in c.files():
680 if m(f):
680 if m(f):
681 break
681 break
682 else:
682 else:
683 return False
683 return False
684 files = repo.status(c.p1().node(), c.node())[field]
684 files = repo.status(c.p1().node(), c.node())[field]
685 if fname is not None:
685 if fname is not None:
686 if fname in files:
686 if fname in files:
687 return True
687 return True
688 else:
688 else:
689 for f in files:
689 for f in files:
690 if m(f):
690 if m(f):
691 return True
691 return True
692
692
693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
694
694
695 def _children(repo, narrow, parentset):
695 def _children(repo, narrow, parentset):
696 if not parentset:
696 if not parentset:
697 return baseset()
697 return baseset()
698 cs = set()
698 cs = set()
699 pr = repo.changelog.parentrevs
699 pr = repo.changelog.parentrevs
700 minrev = parentset.min()
700 minrev = parentset.min()
701 for r in narrow:
701 for r in narrow:
702 if r <= minrev:
702 if r <= minrev:
703 continue
703 continue
704 for p in pr(r):
704 for p in pr(r):
705 if p in parentset:
705 if p in parentset:
706 cs.add(r)
706 cs.add(r)
707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
708 # This does not break because of other fullreposet misbehavior.
708 # This does not break because of other fullreposet misbehavior.
709 return baseset(cs)
709 return baseset(cs)
710
710
711 @predicate('children(set)', safe=True)
711 @predicate('children(set)', safe=True)
712 def children(repo, subset, x):
712 def children(repo, subset, x):
713 """Child changesets of changesets in set.
713 """Child changesets of changesets in set.
714 """
714 """
715 s = getset(repo, fullreposet(repo), x)
715 s = getset(repo, fullreposet(repo), x)
716 cs = _children(repo, subset, s)
716 cs = _children(repo, subset, s)
717 return subset & cs
717 return subset & cs
718
718
719 @predicate('closed()', safe=True)
719 @predicate('closed()', safe=True)
720 def closed(repo, subset, x):
720 def closed(repo, subset, x):
721 """Changeset is closed.
721 """Changeset is closed.
722 """
722 """
723 # i18n: "closed" is a keyword
723 # i18n: "closed" is a keyword
724 getargs(x, 0, 0, _("closed takes no arguments"))
724 getargs(x, 0, 0, _("closed takes no arguments"))
725 return subset.filter(lambda r: repo[r].closesbranch(),
725 return subset.filter(lambda r: repo[r].closesbranch(),
726 condrepr='<branch closed>')
726 condrepr='<branch closed>')
727
727
728 @predicate('contains(pattern)')
728 @predicate('contains(pattern)')
729 def contains(repo, subset, x):
729 def contains(repo, subset, x):
730 """The revision's manifest contains a file matching pattern (but might not
730 """The revision's manifest contains a file matching pattern (but might not
731 modify it). See :hg:`help patterns` for information about file patterns.
731 modify it). See :hg:`help patterns` for information about file patterns.
732
732
733 The pattern without explicit kind like ``glob:`` is expected to be
733 The pattern without explicit kind like ``glob:`` is expected to be
734 relative to the current directory and match against a file exactly
734 relative to the current directory and match against a file exactly
735 for efficiency.
735 for efficiency.
736 """
736 """
737 # i18n: "contains" is a keyword
737 # i18n: "contains" is a keyword
738 pat = getstring(x, _("contains requires a pattern"))
738 pat = getstring(x, _("contains requires a pattern"))
739
739
740 def matches(x):
740 def matches(x):
741 if not matchmod.patkind(pat):
741 if not matchmod.patkind(pat):
742 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
742 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
743 if pats in repo[x]:
743 if pats in repo[x]:
744 return True
744 return True
745 else:
745 else:
746 c = repo[x]
746 c = repo[x]
747 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
747 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
748 for f in c.manifest():
748 for f in c.manifest():
749 if m(f):
749 if m(f):
750 return True
750 return True
751 return False
751 return False
752
752
753 return subset.filter(matches, condrepr=('<contains %r>', pat))
753 return subset.filter(matches, condrepr=('<contains %r>', pat))
754
754
755 @predicate('converted([id])', safe=True)
755 @predicate('converted([id])', safe=True)
756 def converted(repo, subset, x):
756 def converted(repo, subset, x):
757 """Changesets converted from the given identifier in the old repository if
757 """Changesets converted from the given identifier in the old repository if
758 present, or all converted changesets if no identifier is specified.
758 present, or all converted changesets if no identifier is specified.
759 """
759 """
760
760
761 # There is exactly no chance of resolving the revision, so do a simple
761 # There is exactly no chance of resolving the revision, so do a simple
762 # string compare and hope for the best
762 # string compare and hope for the best
763
763
764 rev = None
764 rev = None
765 # i18n: "converted" is a keyword
765 # i18n: "converted" is a keyword
766 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
766 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
767 if l:
767 if l:
768 # i18n: "converted" is a keyword
768 # i18n: "converted" is a keyword
769 rev = getstring(l[0], _('converted requires a revision'))
769 rev = getstring(l[0], _('converted requires a revision'))
770
770
771 def _matchvalue(r):
771 def _matchvalue(r):
772 source = repo[r].extra().get('convert_revision', None)
772 source = repo[r].extra().get('convert_revision', None)
773 return source is not None and (rev is None or source.startswith(rev))
773 return source is not None and (rev is None or source.startswith(rev))
774
774
775 return subset.filter(lambda r: _matchvalue(r),
775 return subset.filter(lambda r: _matchvalue(r),
776 condrepr=('<converted %r>', rev))
776 condrepr=('<converted %r>', rev))
777
777
778 @predicate('date(interval)', safe=True)
778 @predicate('date(interval)', safe=True)
779 def date(repo, subset, x):
779 def date(repo, subset, x):
780 """Changesets within the interval, see :hg:`help dates`.
780 """Changesets within the interval, see :hg:`help dates`.
781 """
781 """
782 # i18n: "date" is a keyword
782 # i18n: "date" is a keyword
783 ds = getstring(x, _("date requires a string"))
783 ds = getstring(x, _("date requires a string"))
784 dm = util.matchdate(ds)
784 dm = util.matchdate(ds)
785 return subset.filter(lambda x: dm(repo[x].date()[0]),
785 return subset.filter(lambda x: dm(repo[x].date()[0]),
786 condrepr=('<date %r>', ds))
786 condrepr=('<date %r>', ds))
787
787
788 @predicate('desc(string)', safe=True)
788 @predicate('desc(string)', safe=True)
789 def desc(repo, subset, x):
789 def desc(repo, subset, x):
790 """Search commit message for string. The match is case-insensitive.
790 """Search commit message for string. The match is case-insensitive.
791 """
791 """
792 # i18n: "desc" is a keyword
792 # i18n: "desc" is a keyword
793 ds = encoding.lower(getstring(x, _("desc requires a string")))
793 ds = encoding.lower(getstring(x, _("desc requires a string")))
794
794
795 def matches(x):
795 def matches(x):
796 c = repo[x]
796 c = repo[x]
797 return ds in encoding.lower(c.description())
797 return ds in encoding.lower(c.description())
798
798
799 return subset.filter(matches, condrepr=('<desc %r>', ds))
799 return subset.filter(matches, condrepr=('<desc %r>', ds))
800
800
801 def _descendants(repo, subset, x, followfirst=False):
801 def _descendants(repo, subset, x, followfirst=False):
802 roots = getset(repo, fullreposet(repo), x)
802 roots = getset(repo, fullreposet(repo), x)
803 if not roots:
803 if not roots:
804 return baseset()
804 return baseset()
805 s = _revdescendants(repo, roots, followfirst)
805 s = _revdescendants(repo, roots, followfirst)
806
806
807 # Both sets need to be ascending in order to lazily return the union
807 # Both sets need to be ascending in order to lazily return the union
808 # in the correct order.
808 # in the correct order.
809 base = subset & roots
809 base = subset & roots
810 desc = subset & s
810 desc = subset & s
811 result = base + desc
811 result = base + desc
812 if subset.isascending():
812 if subset.isascending():
813 result.sort()
813 result.sort()
814 elif subset.isdescending():
814 elif subset.isdescending():
815 result.sort(reverse=True)
815 result.sort(reverse=True)
816 else:
816 else:
817 result = subset & result
817 result = subset & result
818 return result
818 return result
819
819
820 @predicate('descendants(set)', safe=True)
820 @predicate('descendants(set)', safe=True)
821 def descendants(repo, subset, x):
821 def descendants(repo, subset, x):
822 """Changesets which are descendants of changesets in set.
822 """Changesets which are descendants of changesets in set.
823 """
823 """
824 return _descendants(repo, subset, x)
824 return _descendants(repo, subset, x)
825
825
826 @predicate('_firstdescendants', safe=True)
826 @predicate('_firstdescendants', safe=True)
827 def _firstdescendants(repo, subset, x):
827 def _firstdescendants(repo, subset, x):
828 # ``_firstdescendants(set)``
828 # ``_firstdescendants(set)``
829 # Like ``descendants(set)`` but follows only the first parents.
829 # Like ``descendants(set)`` but follows only the first parents.
830 return _descendants(repo, subset, x, followfirst=True)
830 return _descendants(repo, subset, x, followfirst=True)
831
831
832 @predicate('destination([set])', safe=True)
832 @predicate('destination([set])', safe=True)
833 def destination(repo, subset, x):
833 def destination(repo, subset, x):
834 """Changesets that were created by a graft, transplant or rebase operation,
834 """Changesets that were created by a graft, transplant or rebase operation,
835 with the given revisions specified as the source. Omitting the optional set
835 with the given revisions specified as the source. Omitting the optional set
836 is the same as passing all().
836 is the same as passing all().
837 """
837 """
838 if x is not None:
838 if x is not None:
839 sources = getset(repo, fullreposet(repo), x)
839 sources = getset(repo, fullreposet(repo), x)
840 else:
840 else:
841 sources = fullreposet(repo)
841 sources = fullreposet(repo)
842
842
843 dests = set()
843 dests = set()
844
844
845 # subset contains all of the possible destinations that can be returned, so
845 # subset contains all of the possible destinations that can be returned, so
846 # iterate over them and see if their source(s) were provided in the arg set.
846 # iterate over them and see if their source(s) were provided in the arg set.
847 # Even if the immediate src of r is not in the arg set, src's source (or
847 # Even if the immediate src of r is not in the arg set, src's source (or
848 # further back) may be. Scanning back further than the immediate src allows
848 # further back) may be. Scanning back further than the immediate src allows
849 # transitive transplants and rebases to yield the same results as transitive
849 # transitive transplants and rebases to yield the same results as transitive
850 # grafts.
850 # grafts.
851 for r in subset:
851 for r in subset:
852 src = _getrevsource(repo, r)
852 src = _getrevsource(repo, r)
853 lineage = None
853 lineage = None
854
854
855 while src is not None:
855 while src is not None:
856 if lineage is None:
856 if lineage is None:
857 lineage = list()
857 lineage = list()
858
858
859 lineage.append(r)
859 lineage.append(r)
860
860
861 # The visited lineage is a match if the current source is in the arg
861 # The visited lineage is a match if the current source is in the arg
862 # set. Since every candidate dest is visited by way of iterating
862 # set. Since every candidate dest is visited by way of iterating
863 # subset, any dests further back in the lineage will be tested by a
863 # subset, any dests further back in the lineage will be tested by a
864 # different iteration over subset. Likewise, if the src was already
864 # different iteration over subset. Likewise, if the src was already
865 # selected, the current lineage can be selected without going back
865 # selected, the current lineage can be selected without going back
866 # further.
866 # further.
867 if src in sources or src in dests:
867 if src in sources or src in dests:
868 dests.update(lineage)
868 dests.update(lineage)
869 break
869 break
870
870
871 r = src
871 r = src
872 src = _getrevsource(repo, r)
872 src = _getrevsource(repo, r)
873
873
874 return subset.filter(dests.__contains__,
874 return subset.filter(dests.__contains__,
875 condrepr=lambda: '<destination %r>' % sorted(dests))
875 condrepr=lambda: '<destination %r>' % sorted(dests))
876
876
877 @predicate('divergent()', safe=True)
877 @predicate('divergent()', safe=True)
878 def divergent(repo, subset, x):
878 def divergent(repo, subset, x):
879 """
879 """
880 Final successors of changesets with an alternative set of final successors.
880 Final successors of changesets with an alternative set of final successors.
881 """
881 """
882 # i18n: "divergent" is a keyword
882 # i18n: "divergent" is a keyword
883 getargs(x, 0, 0, _("divergent takes no arguments"))
883 getargs(x, 0, 0, _("divergent takes no arguments"))
884 divergent = obsmod.getrevs(repo, 'divergent')
884 divergent = obsmod.getrevs(repo, 'divergent')
885 return subset & divergent
885 return subset & divergent
886
886
887 @predicate('extinct()', safe=True)
887 @predicate('extinct()', safe=True)
888 def extinct(repo, subset, x):
888 def extinct(repo, subset, x):
889 """Obsolete changesets with obsolete descendants only.
889 """Obsolete changesets with obsolete descendants only.
890 """
890 """
891 # i18n: "extinct" is a keyword
891 # i18n: "extinct" is a keyword
892 getargs(x, 0, 0, _("extinct takes no arguments"))
892 getargs(x, 0, 0, _("extinct takes no arguments"))
893 extincts = obsmod.getrevs(repo, 'extinct')
893 extincts = obsmod.getrevs(repo, 'extinct')
894 return subset & extincts
894 return subset & extincts
895
895
896 @predicate('extra(label, [value])', safe=True)
896 @predicate('extra(label, [value])', safe=True)
897 def extra(repo, subset, x):
897 def extra(repo, subset, x):
898 """Changesets with the given label in the extra metadata, with the given
898 """Changesets with the given label in the extra metadata, with the given
899 optional value.
899 optional value.
900
900
901 If `value` starts with `re:`, the remainder of the value is treated as
901 If `value` starts with `re:`, the remainder of the value is treated as
902 a regular expression. To match a value that actually starts with `re:`,
902 a regular expression. To match a value that actually starts with `re:`,
903 use the prefix `literal:`.
903 use the prefix `literal:`.
904 """
904 """
905 args = getargsdict(x, 'extra', 'label value')
905 args = getargsdict(x, 'extra', 'label value')
906 if 'label' not in args:
906 if 'label' not in args:
907 # i18n: "extra" is a keyword
907 # i18n: "extra" is a keyword
908 raise error.ParseError(_('extra takes at least 1 argument'))
908 raise error.ParseError(_('extra takes at least 1 argument'))
909 # i18n: "extra" is a keyword
909 # i18n: "extra" is a keyword
910 label = getstring(args['label'], _('first argument to extra must be '
910 label = getstring(args['label'], _('first argument to extra must be '
911 'a string'))
911 'a string'))
912 value = None
912 value = None
913
913
914 if 'value' in args:
914 if 'value' in args:
915 # i18n: "extra" is a keyword
915 # i18n: "extra" is a keyword
916 value = getstring(args['value'], _('second argument to extra must be '
916 value = getstring(args['value'], _('second argument to extra must be '
917 'a string'))
917 'a string'))
918 kind, value, matcher = util.stringmatcher(value)
918 kind, value, matcher = util.stringmatcher(value)
919
919
920 def _matchvalue(r):
920 def _matchvalue(r):
921 extra = repo[r].extra()
921 extra = repo[r].extra()
922 return label in extra and (value is None or matcher(extra[label]))
922 return label in extra and (value is None or matcher(extra[label]))
923
923
924 return subset.filter(lambda r: _matchvalue(r),
924 return subset.filter(lambda r: _matchvalue(r),
925 condrepr=('<extra[%r] %r>', label, value))
925 condrepr=('<extra[%r] %r>', label, value))
926
926
927 @predicate('filelog(pattern)', safe=True)
927 @predicate('filelog(pattern)', safe=True)
928 def filelog(repo, subset, x):
928 def filelog(repo, subset, x):
929 """Changesets connected to the specified filelog.
929 """Changesets connected to the specified filelog.
930
930
931 For performance reasons, visits only revisions mentioned in the file-level
931 For performance reasons, visits only revisions mentioned in the file-level
932 filelog, rather than filtering through all changesets (much faster, but
932 filelog, rather than filtering through all changesets (much faster, but
933 doesn't include deletes or duplicate changes). For a slower, more accurate
933 doesn't include deletes or duplicate changes). For a slower, more accurate
934 result, use ``file()``.
934 result, use ``file()``.
935
935
936 The pattern without explicit kind like ``glob:`` is expected to be
936 The pattern without explicit kind like ``glob:`` is expected to be
937 relative to the current directory and match against a file exactly
937 relative to the current directory and match against a file exactly
938 for efficiency.
938 for efficiency.
939
939
940 If some linkrev points to revisions filtered by the current repoview, we'll
940 If some linkrev points to revisions filtered by the current repoview, we'll
941 work around it to return a non-filtered value.
941 work around it to return a non-filtered value.
942 """
942 """
943
943
944 # i18n: "filelog" is a keyword
944 # i18n: "filelog" is a keyword
945 pat = getstring(x, _("filelog requires a pattern"))
945 pat = getstring(x, _("filelog requires a pattern"))
946 s = set()
946 s = set()
947 cl = repo.changelog
947 cl = repo.changelog
948
948
949 if not matchmod.patkind(pat):
949 if not matchmod.patkind(pat):
950 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
950 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
951 files = [f]
951 files = [f]
952 else:
952 else:
953 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
953 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
954 files = (f for f in repo[None] if m(f))
954 files = (f for f in repo[None] if m(f))
955
955
956 for f in files:
956 for f in files:
957 fl = repo.file(f)
957 fl = repo.file(f)
958 known = {}
958 known = {}
959 scanpos = 0
959 scanpos = 0
960 for fr in list(fl):
960 for fr in list(fl):
961 fn = fl.node(fr)
961 fn = fl.node(fr)
962 if fn in known:
962 if fn in known:
963 s.add(known[fn])
963 s.add(known[fn])
964 continue
964 continue
965
965
966 lr = fl.linkrev(fr)
966 lr = fl.linkrev(fr)
967 if lr in cl:
967 if lr in cl:
968 s.add(lr)
968 s.add(lr)
969 elif scanpos is not None:
969 elif scanpos is not None:
970 # lowest matching changeset is filtered, scan further
970 # lowest matching changeset is filtered, scan further
971 # ahead in changelog
971 # ahead in changelog
972 start = max(lr, scanpos) + 1
972 start = max(lr, scanpos) + 1
973 scanpos = None
973 scanpos = None
974 for r in cl.revs(start):
974 for r in cl.revs(start):
975 # minimize parsing of non-matching entries
975 # minimize parsing of non-matching entries
976 if f in cl.revision(r) and f in cl.readfiles(r):
976 if f in cl.revision(r) and f in cl.readfiles(r):
977 try:
977 try:
978 # try to use manifest delta fastpath
978 # try to use manifest delta fastpath
979 n = repo[r].filenode(f)
979 n = repo[r].filenode(f)
980 if n not in known:
980 if n not in known:
981 if n == fn:
981 if n == fn:
982 s.add(r)
982 s.add(r)
983 scanpos = r
983 scanpos = r
984 break
984 break
985 else:
985 else:
986 known[n] = r
986 known[n] = r
987 except error.ManifestLookupError:
987 except error.ManifestLookupError:
988 # deletion in changelog
988 # deletion in changelog
989 continue
989 continue
990
990
991 return subset & s
991 return subset & s
992
992
993 @predicate('first(set, [n])', safe=True)
993 @predicate('first(set, [n])', safe=True)
994 def first(repo, subset, x):
994 def first(repo, subset, x):
995 """An alias for limit().
995 """An alias for limit().
996 """
996 """
997 return limit(repo, subset, x)
997 return limit(repo, subset, x)
998
998
999 def _follow(repo, subset, x, name, followfirst=False):
999 def _follow(repo, subset, x, name, followfirst=False):
1000 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1000 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1001 c = repo['.']
1001 c = repo['.']
1002 if l:
1002 if l:
1003 x = getstring(l[0], _("%s expected a pattern") % name)
1003 x = getstring(l[0], _("%s expected a pattern") % name)
1004 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1004 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1005 ctx=repo[None], default='path')
1005 ctx=repo[None], default='path')
1006
1006
1007 files = c.manifest().walk(matcher)
1007 files = c.manifest().walk(matcher)
1008
1008
1009 s = set()
1009 s = set()
1010 for fname in files:
1010 for fname in files:
1011 fctx = c[fname]
1011 fctx = c[fname]
1012 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1012 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1013 # include the revision responsible for the most recent version
1013 # include the revision responsible for the most recent version
1014 s.add(fctx.introrev())
1014 s.add(fctx.introrev())
1015 else:
1015 else:
1016 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1016 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1017
1017
1018 return subset & s
1018 return subset & s
1019
1019
1020 @predicate('follow([pattern])', safe=True)
1020 @predicate('follow([pattern])', safe=True)
1021 def follow(repo, subset, x):
1021 def follow(repo, subset, x):
1022 """
1022 """
1023 An alias for ``::.`` (ancestors of the working directory's first parent).
1023 An alias for ``::.`` (ancestors of the working directory's first parent).
1024 If pattern is specified, the histories of files matching given
1024 If pattern is specified, the histories of files matching given
1025 pattern is followed, including copies.
1025 pattern is followed, including copies.
1026 """
1026 """
1027 return _follow(repo, subset, x, 'follow')
1027 return _follow(repo, subset, x, 'follow')
1028
1028
1029 @predicate('_followfirst', safe=True)
1029 @predicate('_followfirst', safe=True)
1030 def _followfirst(repo, subset, x):
1030 def _followfirst(repo, subset, x):
1031 # ``followfirst([pattern])``
1031 # ``followfirst([pattern])``
1032 # Like ``follow([pattern])`` but follows only the first parent of
1032 # Like ``follow([pattern])`` but follows only the first parent of
1033 # every revisions or files revisions.
1033 # every revisions or files revisions.
1034 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1034 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1035
1035
1036 @predicate('all()', safe=True)
1036 @predicate('all()', safe=True)
1037 def getall(repo, subset, x):
1037 def getall(repo, subset, x):
1038 """All changesets, the same as ``0:tip``.
1038 """All changesets, the same as ``0:tip``.
1039 """
1039 """
1040 # i18n: "all" is a keyword
1040 # i18n: "all" is a keyword
1041 getargs(x, 0, 0, _("all takes no arguments"))
1041 getargs(x, 0, 0, _("all takes no arguments"))
1042 return subset & spanset(repo) # drop "null" if any
1042 return subset & spanset(repo) # drop "null" if any
1043
1043
1044 @predicate('grep(regex)')
1044 @predicate('grep(regex)')
1045 def grep(repo, subset, x):
1045 def grep(repo, subset, x):
1046 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1046 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1047 to ensure special escape characters are handled correctly. Unlike
1047 to ensure special escape characters are handled correctly. Unlike
1048 ``keyword(string)``, the match is case-sensitive.
1048 ``keyword(string)``, the match is case-sensitive.
1049 """
1049 """
1050 try:
1050 try:
1051 # i18n: "grep" is a keyword
1051 # i18n: "grep" is a keyword
1052 gr = re.compile(getstring(x, _("grep requires a string")))
1052 gr = re.compile(getstring(x, _("grep requires a string")))
1053 except re.error as e:
1053 except re.error as e:
1054 raise error.ParseError(_('invalid match pattern: %s') % e)
1054 raise error.ParseError(_('invalid match pattern: %s') % e)
1055
1055
1056 def matches(x):
1056 def matches(x):
1057 c = repo[x]
1057 c = repo[x]
1058 for e in c.files() + [c.user(), c.description()]:
1058 for e in c.files() + [c.user(), c.description()]:
1059 if gr.search(e):
1059 if gr.search(e):
1060 return True
1060 return True
1061 return False
1061 return False
1062
1062
1063 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1063 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1064
1064
1065 @predicate('_matchfiles', safe=True)
1065 @predicate('_matchfiles', safe=True)
1066 def _matchfiles(repo, subset, x):
1066 def _matchfiles(repo, subset, x):
1067 # _matchfiles takes a revset list of prefixed arguments:
1067 # _matchfiles takes a revset list of prefixed arguments:
1068 #
1068 #
1069 # [p:foo, i:bar, x:baz]
1069 # [p:foo, i:bar, x:baz]
1070 #
1070 #
1071 # builds a match object from them and filters subset. Allowed
1071 # builds a match object from them and filters subset. Allowed
1072 # prefixes are 'p:' for regular patterns, 'i:' for include
1072 # prefixes are 'p:' for regular patterns, 'i:' for include
1073 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1073 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1074 # a revision identifier, or the empty string to reference the
1074 # a revision identifier, or the empty string to reference the
1075 # working directory, from which the match object is
1075 # working directory, from which the match object is
1076 # initialized. Use 'd:' to set the default matching mode, default
1076 # initialized. Use 'd:' to set the default matching mode, default
1077 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1077 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1078
1078
1079 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1079 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1080 pats, inc, exc = [], [], []
1080 pats, inc, exc = [], [], []
1081 rev, default = None, None
1081 rev, default = None, None
1082 for arg in l:
1082 for arg in l:
1083 s = getstring(arg, "_matchfiles requires string arguments")
1083 s = getstring(arg, "_matchfiles requires string arguments")
1084 prefix, value = s[:2], s[2:]
1084 prefix, value = s[:2], s[2:]
1085 if prefix == 'p:':
1085 if prefix == 'p:':
1086 pats.append(value)
1086 pats.append(value)
1087 elif prefix == 'i:':
1087 elif prefix == 'i:':
1088 inc.append(value)
1088 inc.append(value)
1089 elif prefix == 'x:':
1089 elif prefix == 'x:':
1090 exc.append(value)
1090 exc.append(value)
1091 elif prefix == 'r:':
1091 elif prefix == 'r:':
1092 if rev is not None:
1092 if rev is not None:
1093 raise error.ParseError('_matchfiles expected at most one '
1093 raise error.ParseError('_matchfiles expected at most one '
1094 'revision')
1094 'revision')
1095 if value != '': # empty means working directory; leave rev as None
1095 if value != '': # empty means working directory; leave rev as None
1096 rev = value
1096 rev = value
1097 elif prefix == 'd:':
1097 elif prefix == 'd:':
1098 if default is not None:
1098 if default is not None:
1099 raise error.ParseError('_matchfiles expected at most one '
1099 raise error.ParseError('_matchfiles expected at most one '
1100 'default mode')
1100 'default mode')
1101 default = value
1101 default = value
1102 else:
1102 else:
1103 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1103 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1104 if not default:
1104 if not default:
1105 default = 'glob'
1105 default = 'glob'
1106
1106
1107 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1107 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1108 exclude=exc, ctx=repo[rev], default=default)
1108 exclude=exc, ctx=repo[rev], default=default)
1109
1109
1110 # This directly read the changelog data as creating changectx for all
1110 # This directly read the changelog data as creating changectx for all
1111 # revisions is quite expensive.
1111 # revisions is quite expensive.
1112 getfiles = repo.changelog.readfiles
1112 getfiles = repo.changelog.readfiles
1113 wdirrev = node.wdirrev
1113 wdirrev = node.wdirrev
1114 def matches(x):
1114 def matches(x):
1115 if x == wdirrev:
1115 if x == wdirrev:
1116 files = repo[x].files()
1116 files = repo[x].files()
1117 else:
1117 else:
1118 files = getfiles(x)
1118 files = getfiles(x)
1119 for f in files:
1119 for f in files:
1120 if m(f):
1120 if m(f):
1121 return True
1121 return True
1122 return False
1122 return False
1123
1123
1124 return subset.filter(matches,
1124 return subset.filter(matches,
1125 condrepr=('<matchfiles patterns=%r, include=%r '
1125 condrepr=('<matchfiles patterns=%r, include=%r '
1126 'exclude=%r, default=%r, rev=%r>',
1126 'exclude=%r, default=%r, rev=%r>',
1127 pats, inc, exc, default, rev))
1127 pats, inc, exc, default, rev))
1128
1128
1129 @predicate('file(pattern)', safe=True)
1129 @predicate('file(pattern)', safe=True)
1130 def hasfile(repo, subset, x):
1130 def hasfile(repo, subset, x):
1131 """Changesets affecting files matched by pattern.
1131 """Changesets affecting files matched by pattern.
1132
1132
1133 For a faster but less accurate result, consider using ``filelog()``
1133 For a faster but less accurate result, consider using ``filelog()``
1134 instead.
1134 instead.
1135
1135
1136 This predicate uses ``glob:`` as the default kind of pattern.
1136 This predicate uses ``glob:`` as the default kind of pattern.
1137 """
1137 """
1138 # i18n: "file" is a keyword
1138 # i18n: "file" is a keyword
1139 pat = getstring(x, _("file requires a pattern"))
1139 pat = getstring(x, _("file requires a pattern"))
1140 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1140 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1141
1141
1142 @predicate('head()', safe=True)
1142 @predicate('head()', safe=True)
1143 def head(repo, subset, x):
1143 def head(repo, subset, x):
1144 """Changeset is a named branch head.
1144 """Changeset is a named branch head.
1145 """
1145 """
1146 # i18n: "head" is a keyword
1146 # i18n: "head" is a keyword
1147 getargs(x, 0, 0, _("head takes no arguments"))
1147 getargs(x, 0, 0, _("head takes no arguments"))
1148 hs = set()
1148 hs = set()
1149 cl = repo.changelog
1149 cl = repo.changelog
1150 for b, ls in repo.branchmap().iteritems():
1150 for b, ls in repo.branchmap().iteritems():
1151 hs.update(cl.rev(h) for h in ls)
1151 hs.update(cl.rev(h) for h in ls)
1152 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1152 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1153 # This does not break because of other fullreposet misbehavior.
1153 # This does not break because of other fullreposet misbehavior.
1154 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1154 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1155 # necessary to ensure we preserve the order in subset.
1155 # necessary to ensure we preserve the order in subset.
1156 return baseset(hs) & subset
1156 return baseset(hs) & subset
1157
1157
1158 @predicate('heads(set)', safe=True)
1158 @predicate('heads(set)', safe=True)
1159 def heads(repo, subset, x):
1159 def heads(repo, subset, x):
1160 """Members of set with no children in set.
1160 """Members of set with no children in set.
1161 """
1161 """
1162 s = getset(repo, subset, x)
1162 s = getset(repo, subset, x)
1163 ps = parents(repo, subset, x)
1163 ps = parents(repo, subset, x)
1164 return s - ps
1164 return s - ps
1165
1165
1166 @predicate('hidden()', safe=True)
1166 @predicate('hidden()', safe=True)
1167 def hidden(repo, subset, x):
1167 def hidden(repo, subset, x):
1168 """Hidden changesets.
1168 """Hidden changesets.
1169 """
1169 """
1170 # i18n: "hidden" is a keyword
1170 # i18n: "hidden" is a keyword
1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1173 return subset & hiddenrevs
1173 return subset & hiddenrevs
1174
1174
1175 @predicate('keyword(string)', safe=True)
1175 @predicate('keyword(string)', safe=True)
1176 def keyword(repo, subset, x):
1176 def keyword(repo, subset, x):
1177 """Search commit message, user name, and names of changed files for
1177 """Search commit message, user name, and names of changed files for
1178 string. The match is case-insensitive.
1178 string. The match is case-insensitive.
1179 """
1179 """
1180 # i18n: "keyword" is a keyword
1180 # i18n: "keyword" is a keyword
1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1182
1182
1183 def matches(r):
1183 def matches(r):
1184 c = repo[r]
1184 c = repo[r]
1185 return any(kw in encoding.lower(t)
1185 return any(kw in encoding.lower(t)
1186 for t in c.files() + [c.user(), c.description()])
1186 for t in c.files() + [c.user(), c.description()])
1187
1187
1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1189
1189
1190 @predicate('limit(set[, n[, offset]])', safe=True)
1190 @predicate('limit(set[, n[, offset]])', safe=True)
1191 def limit(repo, subset, x):
1191 def limit(repo, subset, x):
1192 """First n members of set, defaulting to 1, starting from offset.
1192 """First n members of set, defaulting to 1, starting from offset.
1193 """
1193 """
1194 args = getargsdict(x, 'limit', 'set n offset')
1194 args = getargsdict(x, 'limit', 'set n offset')
1195 if 'set' not in args:
1195 if 'set' not in args:
1196 # i18n: "limit" is a keyword
1196 # i18n: "limit" is a keyword
1197 raise error.ParseError(_("limit requires one to three arguments"))
1197 raise error.ParseError(_("limit requires one to three arguments"))
1198 try:
1198 try:
1199 lim, ofs = 1, 0
1199 lim, ofs = 1, 0
1200 if 'n' in args:
1200 if 'n' in args:
1201 # i18n: "limit" is a keyword
1201 # i18n: "limit" is a keyword
1202 lim = int(getstring(args['n'], _("limit requires a number")))
1202 lim = int(getstring(args['n'], _("limit requires a number")))
1203 if 'offset' in args:
1203 if 'offset' in args:
1204 # i18n: "limit" is a keyword
1204 # i18n: "limit" is a keyword
1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1206 if ofs < 0:
1206 if ofs < 0:
1207 raise error.ParseError(_("negative offset"))
1207 raise error.ParseError(_("negative offset"))
1208 except (TypeError, ValueError):
1208 except (TypeError, ValueError):
1209 # i18n: "limit" is a keyword
1209 # i18n: "limit" is a keyword
1210 raise error.ParseError(_("limit expects a number"))
1210 raise error.ParseError(_("limit expects a number"))
1211 os = getset(repo, fullreposet(repo), args['set'])
1211 os = getset(repo, fullreposet(repo), args['set'])
1212 result = []
1212 result = []
1213 it = iter(os)
1213 it = iter(os)
1214 for x in xrange(ofs):
1214 for x in xrange(ofs):
1215 y = next(it, None)
1215 y = next(it, None)
1216 if y is None:
1216 if y is None:
1217 break
1217 break
1218 for x in xrange(lim):
1218 for x in xrange(lim):
1219 y = next(it, None)
1219 y = next(it, None)
1220 if y is None:
1220 if y is None:
1221 break
1221 break
1222 elif y in subset:
1222 elif y in subset:
1223 result.append(y)
1223 result.append(y)
1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1225 lim, ofs, subset, os))
1225 lim, ofs, subset, os))
1226
1226
1227 @predicate('last(set, [n])', safe=True)
1227 @predicate('last(set, [n])', safe=True)
1228 def last(repo, subset, x):
1228 def last(repo, subset, x):
1229 """Last n members of set, defaulting to 1.
1229 """Last n members of set, defaulting to 1.
1230 """
1230 """
1231 # i18n: "last" is a keyword
1231 # i18n: "last" is a keyword
1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1233 try:
1233 try:
1234 lim = 1
1234 lim = 1
1235 if len(l) == 2:
1235 if len(l) == 2:
1236 # i18n: "last" is a keyword
1236 # i18n: "last" is a keyword
1237 lim = int(getstring(l[1], _("last requires a number")))
1237 lim = int(getstring(l[1], _("last requires a number")))
1238 except (TypeError, ValueError):
1238 except (TypeError, ValueError):
1239 # i18n: "last" is a keyword
1239 # i18n: "last" is a keyword
1240 raise error.ParseError(_("last expects a number"))
1240 raise error.ParseError(_("last expects a number"))
1241 os = getset(repo, fullreposet(repo), l[0])
1241 os = getset(repo, fullreposet(repo), l[0])
1242 os.reverse()
1242 os.reverse()
1243 result = []
1243 result = []
1244 it = iter(os)
1244 it = iter(os)
1245 for x in xrange(lim):
1245 for x in xrange(lim):
1246 y = next(it, None)
1246 y = next(it, None)
1247 if y is None:
1247 if y is None:
1248 break
1248 break
1249 elif y in subset:
1249 elif y in subset:
1250 result.append(y)
1250 result.append(y)
1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1252
1252
1253 @predicate('max(set)', safe=True)
1253 @predicate('max(set)', safe=True)
1254 def maxrev(repo, subset, x):
1254 def maxrev(repo, subset, x):
1255 """Changeset with highest revision number in set.
1255 """Changeset with highest revision number in set.
1256 """
1256 """
1257 os = getset(repo, fullreposet(repo), x)
1257 os = getset(repo, fullreposet(repo), x)
1258 try:
1258 try:
1259 m = os.max()
1259 m = os.max()
1260 if m in subset:
1260 if m in subset:
1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1262 except ValueError:
1262 except ValueError:
1263 # os.max() throws a ValueError when the collection is empty.
1263 # os.max() throws a ValueError when the collection is empty.
1264 # Same as python's max().
1264 # Same as python's max().
1265 pass
1265 pass
1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1267
1267
1268 @predicate('merge()', safe=True)
1268 @predicate('merge()', safe=True)
1269 def merge(repo, subset, x):
1269 def merge(repo, subset, x):
1270 """Changeset is a merge changeset.
1270 """Changeset is a merge changeset.
1271 """
1271 """
1272 # i18n: "merge" is a keyword
1272 # i18n: "merge" is a keyword
1273 getargs(x, 0, 0, _("merge takes no arguments"))
1273 getargs(x, 0, 0, _("merge takes no arguments"))
1274 cl = repo.changelog
1274 cl = repo.changelog
1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1276 condrepr='<merge>')
1276 condrepr='<merge>')
1277
1277
1278 @predicate('branchpoint()', safe=True)
1278 @predicate('branchpoint()', safe=True)
1279 def branchpoint(repo, subset, x):
1279 def branchpoint(repo, subset, x):
1280 """Changesets with more than one child.
1280 """Changesets with more than one child.
1281 """
1281 """
1282 # i18n: "branchpoint" is a keyword
1282 # i18n: "branchpoint" is a keyword
1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1284 cl = repo.changelog
1284 cl = repo.changelog
1285 if not subset:
1285 if not subset:
1286 return baseset()
1286 return baseset()
1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1288 # (and if it is not, it should.)
1288 # (and if it is not, it should.)
1289 baserev = min(subset)
1289 baserev = min(subset)
1290 parentscount = [0]*(len(repo) - baserev)
1290 parentscount = [0]*(len(repo) - baserev)
1291 for r in cl.revs(start=baserev + 1):
1291 for r in cl.revs(start=baserev + 1):
1292 for p in cl.parentrevs(r):
1292 for p in cl.parentrevs(r):
1293 if p >= baserev:
1293 if p >= baserev:
1294 parentscount[p - baserev] += 1
1294 parentscount[p - baserev] += 1
1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1296 condrepr='<branchpoint>')
1296 condrepr='<branchpoint>')
1297
1297
1298 @predicate('min(set)', safe=True)
1298 @predicate('min(set)', safe=True)
1299 def minrev(repo, subset, x):
1299 def minrev(repo, subset, x):
1300 """Changeset with lowest revision number in set.
1300 """Changeset with lowest revision number in set.
1301 """
1301 """
1302 os = getset(repo, fullreposet(repo), x)
1302 os = getset(repo, fullreposet(repo), x)
1303 try:
1303 try:
1304 m = os.min()
1304 m = os.min()
1305 if m in subset:
1305 if m in subset:
1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1307 except ValueError:
1307 except ValueError:
1308 # os.min() throws a ValueError when the collection is empty.
1308 # os.min() throws a ValueError when the collection is empty.
1309 # Same as python's min().
1309 # Same as python's min().
1310 pass
1310 pass
1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1312
1312
1313 @predicate('modifies(pattern)', safe=True)
1313 @predicate('modifies(pattern)', safe=True)
1314 def modifies(repo, subset, x):
1314 def modifies(repo, subset, x):
1315 """Changesets modifying files matched by pattern.
1315 """Changesets modifying files matched by pattern.
1316
1316
1317 The pattern without explicit kind like ``glob:`` is expected to be
1317 The pattern without explicit kind like ``glob:`` is expected to be
1318 relative to the current directory and match against a file or a
1318 relative to the current directory and match against a file or a
1319 directory.
1319 directory.
1320 """
1320 """
1321 # i18n: "modifies" is a keyword
1321 # i18n: "modifies" is a keyword
1322 pat = getstring(x, _("modifies requires a pattern"))
1322 pat = getstring(x, _("modifies requires a pattern"))
1323 return checkstatus(repo, subset, pat, 0)
1323 return checkstatus(repo, subset, pat, 0)
1324
1324
1325 @predicate('named(namespace)')
1325 @predicate('named(namespace)')
1326 def named(repo, subset, x):
1326 def named(repo, subset, x):
1327 """The changesets in a given namespace.
1327 """The changesets in a given namespace.
1328
1328
1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1330 a regular expression. To match a namespace that actually starts with `re:`,
1330 a regular expression. To match a namespace that actually starts with `re:`,
1331 use the prefix `literal:`.
1331 use the prefix `literal:`.
1332 """
1332 """
1333 # i18n: "named" is a keyword
1333 # i18n: "named" is a keyword
1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1335
1335
1336 ns = getstring(args[0],
1336 ns = getstring(args[0],
1337 # i18n: "named" is a keyword
1337 # i18n: "named" is a keyword
1338 _('the argument to named must be a string'))
1338 _('the argument to named must be a string'))
1339 kind, pattern, matcher = util.stringmatcher(ns)
1339 kind, pattern, matcher = util.stringmatcher(ns)
1340 namespaces = set()
1340 namespaces = set()
1341 if kind == 'literal':
1341 if kind == 'literal':
1342 if pattern not in repo.names:
1342 if pattern not in repo.names:
1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1344 % ns)
1344 % ns)
1345 namespaces.add(repo.names[pattern])
1345 namespaces.add(repo.names[pattern])
1346 else:
1346 else:
1347 for name, ns in repo.names.iteritems():
1347 for name, ns in repo.names.iteritems():
1348 if matcher(name):
1348 if matcher(name):
1349 namespaces.add(ns)
1349 namespaces.add(ns)
1350 if not namespaces:
1350 if not namespaces:
1351 raise error.RepoLookupError(_("no namespace exists"
1351 raise error.RepoLookupError(_("no namespace exists"
1352 " that match '%s'") % pattern)
1352 " that match '%s'") % pattern)
1353
1353
1354 names = set()
1354 names = set()
1355 for ns in namespaces:
1355 for ns in namespaces:
1356 for name in ns.listnames(repo):
1356 for name in ns.listnames(repo):
1357 if name not in ns.deprecated:
1357 if name not in ns.deprecated:
1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1359
1359
1360 names -= set([node.nullrev])
1360 names -= set([node.nullrev])
1361 return subset & names
1361 return subset & names
1362
1362
1363 @predicate('id(string)', safe=True)
1363 @predicate('id(string)', safe=True)
1364 def node_(repo, subset, x):
1364 def node_(repo, subset, x):
1365 """Revision non-ambiguously specified by the given hex string prefix.
1365 """Revision non-ambiguously specified by the given hex string prefix.
1366 """
1366 """
1367 # i18n: "id" is a keyword
1367 # i18n: "id" is a keyword
1368 l = getargs(x, 1, 1, _("id requires one argument"))
1368 l = getargs(x, 1, 1, _("id requires one argument"))
1369 # i18n: "id" is a keyword
1369 # i18n: "id" is a keyword
1370 n = getstring(l[0], _("id requires a string"))
1370 n = getstring(l[0], _("id requires a string"))
1371 if len(n) == 40:
1371 if len(n) == 40:
1372 try:
1372 try:
1373 rn = repo.changelog.rev(node.bin(n))
1373 rn = repo.changelog.rev(node.bin(n))
1374 except (LookupError, TypeError):
1374 except (LookupError, TypeError):
1375 rn = None
1375 rn = None
1376 else:
1376 else:
1377 rn = None
1377 rn = None
1378 pm = repo.changelog._partialmatch(n)
1378 pm = repo.changelog._partialmatch(n)
1379 if pm is not None:
1379 if pm is not None:
1380 rn = repo.changelog.rev(pm)
1380 rn = repo.changelog.rev(pm)
1381
1381
1382 if rn is None:
1382 if rn is None:
1383 return baseset()
1383 return baseset()
1384 result = baseset([rn])
1384 result = baseset([rn])
1385 return result & subset
1385 return result & subset
1386
1386
1387 @predicate('obsolete()', safe=True)
1387 @predicate('obsolete()', safe=True)
1388 def obsolete(repo, subset, x):
1388 def obsolete(repo, subset, x):
1389 """Mutable changeset with a newer version."""
1389 """Mutable changeset with a newer version."""
1390 # i18n: "obsolete" is a keyword
1390 # i18n: "obsolete" is a keyword
1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1393 return subset & obsoletes
1393 return subset & obsoletes
1394
1394
1395 @predicate('only(set, [set])', safe=True)
1395 @predicate('only(set, [set])', safe=True)
1396 def only(repo, subset, x):
1396 def only(repo, subset, x):
1397 """Changesets that are ancestors of the first set that are not ancestors
1397 """Changesets that are ancestors of the first set that are not ancestors
1398 of any other head in the repo. If a second set is specified, the result
1398 of any other head in the repo. If a second set is specified, the result
1399 is ancestors of the first set that are not ancestors of the second set
1399 is ancestors of the first set that are not ancestors of the second set
1400 (i.e. ::<set1> - ::<set2>).
1400 (i.e. ::<set1> - ::<set2>).
1401 """
1401 """
1402 cl = repo.changelog
1402 cl = repo.changelog
1403 # i18n: "only" is a keyword
1403 # i18n: "only" is a keyword
1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1405 include = getset(repo, fullreposet(repo), args[0])
1405 include = getset(repo, fullreposet(repo), args[0])
1406 if len(args) == 1:
1406 if len(args) == 1:
1407 if not include:
1407 if not include:
1408 return baseset()
1408 return baseset()
1409
1409
1410 descendants = set(_revdescendants(repo, include, False))
1410 descendants = set(_revdescendants(repo, include, False))
1411 exclude = [rev for rev in cl.headrevs()
1411 exclude = [rev for rev in cl.headrevs()
1412 if not rev in descendants and not rev in include]
1412 if not rev in descendants and not rev in include]
1413 else:
1413 else:
1414 exclude = getset(repo, fullreposet(repo), args[1])
1414 exclude = getset(repo, fullreposet(repo), args[1])
1415
1415
1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1418 # some optimisations from the fact this is a baseset.
1418 # some optimisations from the fact this is a baseset.
1419 return subset & results
1419 return subset & results
1420
1420
1421 @predicate('origin([set])', safe=True)
1421 @predicate('origin([set])', safe=True)
1422 def origin(repo, subset, x):
1422 def origin(repo, subset, x):
1423 """
1423 """
1424 Changesets that were specified as a source for the grafts, transplants or
1424 Changesets that were specified as a source for the grafts, transplants or
1425 rebases that created the given revisions. Omitting the optional set is the
1425 rebases that created the given revisions. Omitting the optional set is the
1426 same as passing all(). If a changeset created by these operations is itself
1426 same as passing all(). If a changeset created by these operations is itself
1427 specified as a source for one of these operations, only the source changeset
1427 specified as a source for one of these operations, only the source changeset
1428 for the first operation is selected.
1428 for the first operation is selected.
1429 """
1429 """
1430 if x is not None:
1430 if x is not None:
1431 dests = getset(repo, fullreposet(repo), x)
1431 dests = getset(repo, fullreposet(repo), x)
1432 else:
1432 else:
1433 dests = fullreposet(repo)
1433 dests = fullreposet(repo)
1434
1434
1435 def _firstsrc(rev):
1435 def _firstsrc(rev):
1436 src = _getrevsource(repo, rev)
1436 src = _getrevsource(repo, rev)
1437 if src is None:
1437 if src is None:
1438 return None
1438 return None
1439
1439
1440 while True:
1440 while True:
1441 prev = _getrevsource(repo, src)
1441 prev = _getrevsource(repo, src)
1442
1442
1443 if prev is None:
1443 if prev is None:
1444 return src
1444 return src
1445 src = prev
1445 src = prev
1446
1446
1447 o = set([_firstsrc(r) for r in dests])
1447 o = set([_firstsrc(r) for r in dests])
1448 o -= set([None])
1448 o -= set([None])
1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1450 # some optimisations from the fact this is a baseset.
1450 # some optimisations from the fact this is a baseset.
1451 return subset & o
1451 return subset & o
1452
1452
1453 @predicate('outgoing([path])', safe=True)
1453 @predicate('outgoing([path])', safe=True)
1454 def outgoing(repo, subset, x):
1454 def outgoing(repo, subset, x):
1455 """Changesets not found in the specified destination repository, or the
1455 """Changesets not found in the specified destination repository, or the
1456 default push location.
1456 default push location.
1457 """
1457 """
1458 # Avoid cycles.
1458 # Avoid cycles.
1459 from . import (
1459 from . import (
1460 discovery,
1460 discovery,
1461 hg,
1461 hg,
1462 )
1462 )
1463 # i18n: "outgoing" is a keyword
1463 # i18n: "outgoing" is a keyword
1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1465 # i18n: "outgoing" is a keyword
1465 # i18n: "outgoing" is a keyword
1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1468 dest, branches = hg.parseurl(dest)
1468 dest, branches = hg.parseurl(dest)
1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1470 if revs:
1470 if revs:
1471 revs = [repo.lookup(rev) for rev in revs]
1471 revs = [repo.lookup(rev) for rev in revs]
1472 other = hg.peer(repo, {}, dest)
1472 other = hg.peer(repo, {}, dest)
1473 repo.ui.pushbuffer()
1473 repo.ui.pushbuffer()
1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1475 repo.ui.popbuffer()
1475 repo.ui.popbuffer()
1476 cl = repo.changelog
1476 cl = repo.changelog
1477 o = set([cl.rev(r) for r in outgoing.missing])
1477 o = set([cl.rev(r) for r in outgoing.missing])
1478 return subset & o
1478 return subset & o
1479
1479
1480 @predicate('p1([set])', safe=True)
1480 @predicate('p1([set])', safe=True)
1481 def p1(repo, subset, x):
1481 def p1(repo, subset, x):
1482 """First parent of changesets in set, or the working directory.
1482 """First parent of changesets in set, or the working directory.
1483 """
1483 """
1484 if x is None:
1484 if x is None:
1485 p = repo[x].p1().rev()
1485 p = repo[x].p1().rev()
1486 if p >= 0:
1486 if p >= 0:
1487 return subset & baseset([p])
1487 return subset & baseset([p])
1488 return baseset()
1488 return baseset()
1489
1489
1490 ps = set()
1490 ps = set()
1491 cl = repo.changelog
1491 cl = repo.changelog
1492 for r in getset(repo, fullreposet(repo), x):
1492 for r in getset(repo, fullreposet(repo), x):
1493 ps.add(cl.parentrevs(r)[0])
1493 ps.add(cl.parentrevs(r)[0])
1494 ps -= set([node.nullrev])
1494 ps -= set([node.nullrev])
1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1496 # some optimisations from the fact this is a baseset.
1496 # some optimisations from the fact this is a baseset.
1497 return subset & ps
1497 return subset & ps
1498
1498
1499 @predicate('p2([set])', safe=True)
1499 @predicate('p2([set])', safe=True)
1500 def p2(repo, subset, x):
1500 def p2(repo, subset, x):
1501 """Second parent of changesets in set, or the working directory.
1501 """Second parent of changesets in set, or the working directory.
1502 """
1502 """
1503 if x is None:
1503 if x is None:
1504 ps = repo[x].parents()
1504 ps = repo[x].parents()
1505 try:
1505 try:
1506 p = ps[1].rev()
1506 p = ps[1].rev()
1507 if p >= 0:
1507 if p >= 0:
1508 return subset & baseset([p])
1508 return subset & baseset([p])
1509 return baseset()
1509 return baseset()
1510 except IndexError:
1510 except IndexError:
1511 return baseset()
1511 return baseset()
1512
1512
1513 ps = set()
1513 ps = set()
1514 cl = repo.changelog
1514 cl = repo.changelog
1515 for r in getset(repo, fullreposet(repo), x):
1515 for r in getset(repo, fullreposet(repo), x):
1516 ps.add(cl.parentrevs(r)[1])
1516 ps.add(cl.parentrevs(r)[1])
1517 ps -= set([node.nullrev])
1517 ps -= set([node.nullrev])
1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1519 # some optimisations from the fact this is a baseset.
1519 # some optimisations from the fact this is a baseset.
1520 return subset & ps
1520 return subset & ps
1521
1521
1522 @predicate('parents([set])', safe=True)
1522 @predicate('parents([set])', safe=True)
1523 def parents(repo, subset, x):
1523 def parents(repo, subset, x):
1524 """
1524 """
1525 The set of all parents for all changesets in set, or the working directory.
1525 The set of all parents for all changesets in set, or the working directory.
1526 """
1526 """
1527 if x is None:
1527 if x is None:
1528 ps = set(p.rev() for p in repo[x].parents())
1528 ps = set(p.rev() for p in repo[x].parents())
1529 else:
1529 else:
1530 ps = set()
1530 ps = set()
1531 cl = repo.changelog
1531 cl = repo.changelog
1532 up = ps.update
1532 up = ps.update
1533 parentrevs = cl.parentrevs
1533 parentrevs = cl.parentrevs
1534 for r in getset(repo, fullreposet(repo), x):
1534 for r in getset(repo, fullreposet(repo), x):
1535 if r == node.wdirrev:
1535 if r == node.wdirrev:
1536 up(p.rev() for p in repo[r].parents())
1536 up(p.rev() for p in repo[r].parents())
1537 else:
1537 else:
1538 up(parentrevs(r))
1538 up(parentrevs(r))
1539 ps -= set([node.nullrev])
1539 ps -= set([node.nullrev])
1540 return subset & ps
1540 return subset & ps
1541
1541
1542 def _phase(repo, subset, target):
1542 def _phase(repo, subset, target):
1543 """helper to select all rev in phase <target>"""
1543 """helper to select all rev in phase <target>"""
1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1545 if repo._phasecache._phasesets:
1545 if repo._phasecache._phasesets:
1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1547 s = baseset(s)
1547 s = baseset(s)
1548 s.sort() # set are non ordered, so we enforce ascending
1548 s.sort() # set are non ordered, so we enforce ascending
1549 return subset & s
1549 return subset & s
1550 else:
1550 else:
1551 phase = repo._phasecache.phase
1551 phase = repo._phasecache.phase
1552 condition = lambda r: phase(repo, r) == target
1552 condition = lambda r: phase(repo, r) == target
1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1554 cache=False)
1554 cache=False)
1555
1555
1556 @predicate('draft()', safe=True)
1556 @predicate('draft()', safe=True)
1557 def draft(repo, subset, x):
1557 def draft(repo, subset, x):
1558 """Changeset in draft phase."""
1558 """Changeset in draft phase."""
1559 # i18n: "draft" is a keyword
1559 # i18n: "draft" is a keyword
1560 getargs(x, 0, 0, _("draft takes no arguments"))
1560 getargs(x, 0, 0, _("draft takes no arguments"))
1561 target = phases.draft
1561 target = phases.draft
1562 return _phase(repo, subset, target)
1562 return _phase(repo, subset, target)
1563
1563
1564 @predicate('secret()', safe=True)
1564 @predicate('secret()', safe=True)
1565 def secret(repo, subset, x):
1565 def secret(repo, subset, x):
1566 """Changeset in secret phase."""
1566 """Changeset in secret phase."""
1567 # i18n: "secret" is a keyword
1567 # i18n: "secret" is a keyword
1568 getargs(x, 0, 0, _("secret takes no arguments"))
1568 getargs(x, 0, 0, _("secret takes no arguments"))
1569 target = phases.secret
1569 target = phases.secret
1570 return _phase(repo, subset, target)
1570 return _phase(repo, subset, target)
1571
1571
1572 def parentspec(repo, subset, x, n):
1572 def parentspec(repo, subset, x, n):
1573 """``set^0``
1573 """``set^0``
1574 The set.
1574 The set.
1575 ``set^1`` (or ``set^``), ``set^2``
1575 ``set^1`` (or ``set^``), ``set^2``
1576 First or second parent, respectively, of all changesets in set.
1576 First or second parent, respectively, of all changesets in set.
1577 """
1577 """
1578 try:
1578 try:
1579 n = int(n[1])
1579 n = int(n[1])
1580 if n not in (0, 1, 2):
1580 if n not in (0, 1, 2):
1581 raise ValueError
1581 raise ValueError
1582 except (TypeError, ValueError):
1582 except (TypeError, ValueError):
1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1584 ps = set()
1584 ps = set()
1585 cl = repo.changelog
1585 cl = repo.changelog
1586 for r in getset(repo, fullreposet(repo), x):
1586 for r in getset(repo, fullreposet(repo), x):
1587 if n == 0:
1587 if n == 0:
1588 ps.add(r)
1588 ps.add(r)
1589 elif n == 1:
1589 elif n == 1:
1590 ps.add(cl.parentrevs(r)[0])
1590 ps.add(cl.parentrevs(r)[0])
1591 elif n == 2:
1591 elif n == 2:
1592 parents = cl.parentrevs(r)
1592 parents = cl.parentrevs(r)
1593 if len(parents) > 1:
1593 if len(parents) > 1:
1594 ps.add(parents[1])
1594 ps.add(parents[1])
1595 return subset & ps
1595 return subset & ps
1596
1596
1597 @predicate('present(set)', safe=True)
1597 @predicate('present(set)', safe=True)
1598 def present(repo, subset, x):
1598 def present(repo, subset, x):
1599 """An empty set, if any revision in set isn't found; otherwise,
1599 """An empty set, if any revision in set isn't found; otherwise,
1600 all revisions in set.
1600 all revisions in set.
1601
1601
1602 If any of specified revisions is not present in the local repository,
1602 If any of specified revisions is not present in the local repository,
1603 the query is normally aborted. But this predicate allows the query
1603 the query is normally aborted. But this predicate allows the query
1604 to continue even in such cases.
1604 to continue even in such cases.
1605 """
1605 """
1606 try:
1606 try:
1607 return getset(repo, subset, x)
1607 return getset(repo, subset, x)
1608 except error.RepoLookupError:
1608 except error.RepoLookupError:
1609 return baseset()
1609 return baseset()
1610
1610
1611 # for internal use
1611 # for internal use
1612 @predicate('_notpublic', safe=True)
1612 @predicate('_notpublic', safe=True)
1613 def _notpublic(repo, subset, x):
1613 def _notpublic(repo, subset, x):
1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1616 if repo._phasecache._phasesets:
1616 if repo._phasecache._phasesets:
1617 s = set()
1617 s = set()
1618 for u in repo._phasecache._phasesets[1:]:
1618 for u in repo._phasecache._phasesets[1:]:
1619 s.update(u)
1619 s.update(u)
1620 s = baseset(s - repo.changelog.filteredrevs)
1620 s = baseset(s - repo.changelog.filteredrevs)
1621 s.sort()
1621 s.sort()
1622 return subset & s
1622 return subset & s
1623 else:
1623 else:
1624 phase = repo._phasecache.phase
1624 phase = repo._phasecache.phase
1625 target = phases.public
1625 target = phases.public
1626 condition = lambda r: phase(repo, r) != target
1626 condition = lambda r: phase(repo, r) != target
1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1628 cache=False)
1628 cache=False)
1629
1629
1630 @predicate('public()', safe=True)
1630 @predicate('public()', safe=True)
1631 def public(repo, subset, x):
1631 def public(repo, subset, x):
1632 """Changeset in public phase."""
1632 """Changeset in public phase."""
1633 # i18n: "public" is a keyword
1633 # i18n: "public" is a keyword
1634 getargs(x, 0, 0, _("public takes no arguments"))
1634 getargs(x, 0, 0, _("public takes no arguments"))
1635 phase = repo._phasecache.phase
1635 phase = repo._phasecache.phase
1636 target = phases.public
1636 target = phases.public
1637 condition = lambda r: phase(repo, r) == target
1637 condition = lambda r: phase(repo, r) == target
1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1639 cache=False)
1639 cache=False)
1640
1640
1641 @predicate('remote([id [,path]])', safe=True)
1641 @predicate('remote([id [,path]])', safe=True)
1642 def remote(repo, subset, x):
1642 def remote(repo, subset, x):
1643 """Local revision that corresponds to the given identifier in a
1643 """Local revision that corresponds to the given identifier in a
1644 remote repository, if present. Here, the '.' identifier is a
1644 remote repository, if present. Here, the '.' identifier is a
1645 synonym for the current local branch.
1645 synonym for the current local branch.
1646 """
1646 """
1647
1647
1648 from . import hg # avoid start-up nasties
1648 from . import hg # avoid start-up nasties
1649 # i18n: "remote" is a keyword
1649 # i18n: "remote" is a keyword
1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1651
1651
1652 q = '.'
1652 q = '.'
1653 if len(l) > 0:
1653 if len(l) > 0:
1654 # i18n: "remote" is a keyword
1654 # i18n: "remote" is a keyword
1655 q = getstring(l[0], _("remote requires a string id"))
1655 q = getstring(l[0], _("remote requires a string id"))
1656 if q == '.':
1656 if q == '.':
1657 q = repo['.'].branch()
1657 q = repo['.'].branch()
1658
1658
1659 dest = ''
1659 dest = ''
1660 if len(l) > 1:
1660 if len(l) > 1:
1661 # i18n: "remote" is a keyword
1661 # i18n: "remote" is a keyword
1662 dest = getstring(l[1], _("remote requires a repository path"))
1662 dest = getstring(l[1], _("remote requires a repository path"))
1663 dest = repo.ui.expandpath(dest or 'default')
1663 dest = repo.ui.expandpath(dest or 'default')
1664 dest, branches = hg.parseurl(dest)
1664 dest, branches = hg.parseurl(dest)
1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1666 if revs:
1666 if revs:
1667 revs = [repo.lookup(rev) for rev in revs]
1667 revs = [repo.lookup(rev) for rev in revs]
1668 other = hg.peer(repo, {}, dest)
1668 other = hg.peer(repo, {}, dest)
1669 n = other.lookup(q)
1669 n = other.lookup(q)
1670 if n in repo:
1670 if n in repo:
1671 r = repo[n].rev()
1671 r = repo[n].rev()
1672 if r in subset:
1672 if r in subset:
1673 return baseset([r])
1673 return baseset([r])
1674 return baseset()
1674 return baseset()
1675
1675
1676 @predicate('removes(pattern)', safe=True)
1676 @predicate('removes(pattern)', safe=True)
1677 def removes(repo, subset, x):
1677 def removes(repo, subset, x):
1678 """Changesets which remove files matching pattern.
1678 """Changesets which remove files matching pattern.
1679
1679
1680 The pattern without explicit kind like ``glob:`` is expected to be
1680 The pattern without explicit kind like ``glob:`` is expected to be
1681 relative to the current directory and match against a file or a
1681 relative to the current directory and match against a file or a
1682 directory.
1682 directory.
1683 """
1683 """
1684 # i18n: "removes" is a keyword
1684 # i18n: "removes" is a keyword
1685 pat = getstring(x, _("removes requires a pattern"))
1685 pat = getstring(x, _("removes requires a pattern"))
1686 return checkstatus(repo, subset, pat, 2)
1686 return checkstatus(repo, subset, pat, 2)
1687
1687
1688 @predicate('rev(number)', safe=True)
1688 @predicate('rev(number)', safe=True)
1689 def rev(repo, subset, x):
1689 def rev(repo, subset, x):
1690 """Revision with the given numeric identifier.
1690 """Revision with the given numeric identifier.
1691 """
1691 """
1692 # i18n: "rev" is a keyword
1692 # i18n: "rev" is a keyword
1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1694 try:
1694 try:
1695 # i18n: "rev" is a keyword
1695 # i18n: "rev" is a keyword
1696 l = int(getstring(l[0], _("rev requires a number")))
1696 l = int(getstring(l[0], _("rev requires a number")))
1697 except (TypeError, ValueError):
1697 except (TypeError, ValueError):
1698 # i18n: "rev" is a keyword
1698 # i18n: "rev" is a keyword
1699 raise error.ParseError(_("rev expects a number"))
1699 raise error.ParseError(_("rev expects a number"))
1700 if l not in repo.changelog and l != node.nullrev:
1700 if l not in repo.changelog and l != node.nullrev:
1701 return baseset()
1701 return baseset()
1702 return subset & baseset([l])
1702 return subset & baseset([l])
1703
1703
1704 @predicate('matching(revision [, field])', safe=True)
1704 @predicate('matching(revision [, field])', safe=True)
1705 def matching(repo, subset, x):
1705 def matching(repo, subset, x):
1706 """Changesets in which a given set of fields match the set of fields in the
1706 """Changesets in which a given set of fields match the set of fields in the
1707 selected revision or set.
1707 selected revision or set.
1708
1708
1709 To match more than one field pass the list of fields to match separated
1709 To match more than one field pass the list of fields to match separated
1710 by spaces (e.g. ``author description``).
1710 by spaces (e.g. ``author description``).
1711
1711
1712 Valid fields are most regular revision fields and some special fields.
1712 Valid fields are most regular revision fields and some special fields.
1713
1713
1714 Regular revision fields are ``description``, ``author``, ``branch``,
1714 Regular revision fields are ``description``, ``author``, ``branch``,
1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1716 and ``diff``.
1716 and ``diff``.
1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1718 contents of the revision. Two revisions matching their ``diff`` will
1718 contents of the revision. Two revisions matching their ``diff`` will
1719 also match their ``files``.
1719 also match their ``files``.
1720
1720
1721 Special fields are ``summary`` and ``metadata``:
1721 Special fields are ``summary`` and ``metadata``:
1722 ``summary`` matches the first line of the description.
1722 ``summary`` matches the first line of the description.
1723 ``metadata`` is equivalent to matching ``description user date``
1723 ``metadata`` is equivalent to matching ``description user date``
1724 (i.e. it matches the main metadata fields).
1724 (i.e. it matches the main metadata fields).
1725
1725
1726 ``metadata`` is the default field which is used when no fields are
1726 ``metadata`` is the default field which is used when no fields are
1727 specified. You can match more than one field at a time.
1727 specified. You can match more than one field at a time.
1728 """
1728 """
1729 # i18n: "matching" is a keyword
1729 # i18n: "matching" is a keyword
1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1731
1731
1732 revs = getset(repo, fullreposet(repo), l[0])
1732 revs = getset(repo, fullreposet(repo), l[0])
1733
1733
1734 fieldlist = ['metadata']
1734 fieldlist = ['metadata']
1735 if len(l) > 1:
1735 if len(l) > 1:
1736 fieldlist = getstring(l[1],
1736 fieldlist = getstring(l[1],
1737 # i18n: "matching" is a keyword
1737 # i18n: "matching" is a keyword
1738 _("matching requires a string "
1738 _("matching requires a string "
1739 "as its second argument")).split()
1739 "as its second argument")).split()
1740
1740
1741 # Make sure that there are no repeated fields,
1741 # Make sure that there are no repeated fields,
1742 # expand the 'special' 'metadata' field type
1742 # expand the 'special' 'metadata' field type
1743 # and check the 'files' whenever we check the 'diff'
1743 # and check the 'files' whenever we check the 'diff'
1744 fields = []
1744 fields = []
1745 for field in fieldlist:
1745 for field in fieldlist:
1746 if field == 'metadata':
1746 if field == 'metadata':
1747 fields += ['user', 'description', 'date']
1747 fields += ['user', 'description', 'date']
1748 elif field == 'diff':
1748 elif field == 'diff':
1749 # a revision matching the diff must also match the files
1749 # a revision matching the diff must also match the files
1750 # since matching the diff is very costly, make sure to
1750 # since matching the diff is very costly, make sure to
1751 # also match the files first
1751 # also match the files first
1752 fields += ['files', 'diff']
1752 fields += ['files', 'diff']
1753 else:
1753 else:
1754 if field == 'author':
1754 if field == 'author':
1755 field = 'user'
1755 field = 'user'
1756 fields.append(field)
1756 fields.append(field)
1757 fields = set(fields)
1757 fields = set(fields)
1758 if 'summary' in fields and 'description' in fields:
1758 if 'summary' in fields and 'description' in fields:
1759 # If a revision matches its description it also matches its summary
1759 # If a revision matches its description it also matches its summary
1760 fields.discard('summary')
1760 fields.discard('summary')
1761
1761
1762 # We may want to match more than one field
1762 # We may want to match more than one field
1763 # Not all fields take the same amount of time to be matched
1763 # Not all fields take the same amount of time to be matched
1764 # Sort the selected fields in order of increasing matching cost
1764 # Sort the selected fields in order of increasing matching cost
1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1766 'files', 'description', 'substate', 'diff']
1766 'files', 'description', 'substate', 'diff']
1767 def fieldkeyfunc(f):
1767 def fieldkeyfunc(f):
1768 try:
1768 try:
1769 return fieldorder.index(f)
1769 return fieldorder.index(f)
1770 except ValueError:
1770 except ValueError:
1771 # assume an unknown field is very costly
1771 # assume an unknown field is very costly
1772 return len(fieldorder)
1772 return len(fieldorder)
1773 fields = list(fields)
1773 fields = list(fields)
1774 fields.sort(key=fieldkeyfunc)
1774 fields.sort(key=fieldkeyfunc)
1775
1775
1776 # Each field will be matched with its own "getfield" function
1776 # Each field will be matched with its own "getfield" function
1777 # which will be added to the getfieldfuncs array of functions
1777 # which will be added to the getfieldfuncs array of functions
1778 getfieldfuncs = []
1778 getfieldfuncs = []
1779 _funcs = {
1779 _funcs = {
1780 'user': lambda r: repo[r].user(),
1780 'user': lambda r: repo[r].user(),
1781 'branch': lambda r: repo[r].branch(),
1781 'branch': lambda r: repo[r].branch(),
1782 'date': lambda r: repo[r].date(),
1782 'date': lambda r: repo[r].date(),
1783 'description': lambda r: repo[r].description(),
1783 'description': lambda r: repo[r].description(),
1784 'files': lambda r: repo[r].files(),
1784 'files': lambda r: repo[r].files(),
1785 'parents': lambda r: repo[r].parents(),
1785 'parents': lambda r: repo[r].parents(),
1786 'phase': lambda r: repo[r].phase(),
1786 'phase': lambda r: repo[r].phase(),
1787 'substate': lambda r: repo[r].substate,
1787 'substate': lambda r: repo[r].substate,
1788 'summary': lambda r: repo[r].description().splitlines()[0],
1788 'summary': lambda r: repo[r].description().splitlines()[0],
1789 'diff': lambda r: list(repo[r].diff(git=True),)
1789 'diff': lambda r: list(repo[r].diff(git=True),)
1790 }
1790 }
1791 for info in fields:
1791 for info in fields:
1792 getfield = _funcs.get(info, None)
1792 getfield = _funcs.get(info, None)
1793 if getfield is None:
1793 if getfield is None:
1794 raise error.ParseError(
1794 raise error.ParseError(
1795 # i18n: "matching" is a keyword
1795 # i18n: "matching" is a keyword
1796 _("unexpected field name passed to matching: %s") % info)
1796 _("unexpected field name passed to matching: %s") % info)
1797 getfieldfuncs.append(getfield)
1797 getfieldfuncs.append(getfield)
1798 # convert the getfield array of functions into a "getinfo" function
1798 # convert the getfield array of functions into a "getinfo" function
1799 # which returns an array of field values (or a single value if there
1799 # which returns an array of field values (or a single value if there
1800 # is only one field to match)
1800 # is only one field to match)
1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1802
1802
1803 def matches(x):
1803 def matches(x):
1804 for rev in revs:
1804 for rev in revs:
1805 target = getinfo(rev)
1805 target = getinfo(rev)
1806 match = True
1806 match = True
1807 for n, f in enumerate(getfieldfuncs):
1807 for n, f in enumerate(getfieldfuncs):
1808 if target[n] != f(x):
1808 if target[n] != f(x):
1809 match = False
1809 match = False
1810 if match:
1810 if match:
1811 return True
1811 return True
1812 return False
1812 return False
1813
1813
1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1815
1815
1816 @predicate('reverse(set)', safe=True)
1816 @predicate('reverse(set)', safe=True)
1817 def reverse(repo, subset, x):
1817 def reverse(repo, subset, x):
1818 """Reverse order of set.
1818 """Reverse order of set.
1819 """
1819 """
1820 l = getset(repo, subset, x)
1820 l = getset(repo, subset, x)
1821 l.reverse()
1821 l.reverse()
1822 return l
1822 return l
1823
1823
1824 @predicate('roots(set)', safe=True)
1824 @predicate('roots(set)', safe=True)
1825 def roots(repo, subset, x):
1825 def roots(repo, subset, x):
1826 """Changesets in set with no parent changeset in set.
1826 """Changesets in set with no parent changeset in set.
1827 """
1827 """
1828 s = getset(repo, fullreposet(repo), x)
1828 s = getset(repo, fullreposet(repo), x)
1829 parents = repo.changelog.parentrevs
1829 parents = repo.changelog.parentrevs
1830 def filter(r):
1830 def filter(r):
1831 for p in parents(r):
1831 for p in parents(r):
1832 if 0 <= p and p in s:
1832 if 0 <= p and p in s:
1833 return False
1833 return False
1834 return True
1834 return True
1835 return subset & s.filter(filter, condrepr='<roots>')
1835 return subset & s.filter(filter, condrepr='<roots>')
1836
1836
1837 _sortkeyfuncs = {
1837 _sortkeyfuncs = {
1838 'rev': lambda c: c.rev(),
1838 'rev': lambda c: c.rev(),
1839 'branch': lambda c: c.branch(),
1839 'branch': lambda c: c.branch(),
1840 'desc': lambda c: c.description(),
1840 'desc': lambda c: c.description(),
1841 'user': lambda c: c.user(),
1841 'user': lambda c: c.user(),
1842 'author': lambda c: c.user(),
1842 'author': lambda c: c.user(),
1843 'date': lambda c: c.date()[0],
1843 'date': lambda c: c.date()[0],
1844 }
1844 }
1845
1845
1846 @predicate('sort(set[, [-]key...])', safe=True)
1846 @predicate('sort(set[, [-]key...])', safe=True)
1847 def sort(repo, subset, x):
1847 def sort(repo, subset, x):
1848 """Sort set by keys. The default sort order is ascending, specify a key
1848 """Sort set by keys. The default sort order is ascending, specify a key
1849 as ``-key`` to sort in descending order.
1849 as ``-key`` to sort in descending order.
1850
1850
1851 The keys can be:
1851 The keys can be:
1852
1852
1853 - ``rev`` for the revision number,
1853 - ``rev`` for the revision number,
1854 - ``branch`` for the branch name,
1854 - ``branch`` for the branch name,
1855 - ``desc`` for the commit message (description),
1855 - ``desc`` for the commit message (description),
1856 - ``user`` for user name (``author`` can be used as an alias),
1856 - ``user`` for user name (``author`` can be used as an alias),
1857 - ``date`` for the commit date
1857 - ``date`` for the commit date
1858 """
1858 """
1859 args = getargsdict(x, 'sort', 'set keys')
1859 args = getargsdict(x, 'sort', 'set keys')
1860 if 'set' not in args:
1860 if 'set' not in args:
1861 # i18n: "sort" is a keyword
1861 # i18n: "sort" is a keyword
1862 raise error.ParseError(_('sort requires one or two arguments'))
1862 raise error.ParseError(_('sort requires one or two arguments'))
1863 keys = "rev"
1863 keys = "rev"
1864 if 'keys' in args:
1864 if 'keys' in args:
1865 # i18n: "sort" is a keyword
1865 # i18n: "sort" is a keyword
1866 keys = getstring(args['keys'], _("sort spec must be a string"))
1866 keys = getstring(args['keys'], _("sort spec must be a string"))
1867
1867
1868 s = args['set']
1868 s = args['set']
1869 keys = keys.split()
1869 keys = keys.split()
1870 revs = getset(repo, subset, s)
1870 revs = getset(repo, subset, s)
1871 if keys == ["rev"]:
1871 if keys == ["rev"]:
1872 revs.sort()
1872 revs.sort()
1873 return revs
1873 return revs
1874 elif keys == ["-rev"]:
1874 elif keys == ["-rev"]:
1875 revs.sort(reverse=True)
1875 revs.sort(reverse=True)
1876 return revs
1876 return revs
1877 # sort() is guaranteed to be stable
1877 # sort() is guaranteed to be stable
1878 ctxs = [repo[r] for r in revs]
1878 ctxs = [repo[r] for r in revs]
1879 for k in reversed(keys):
1879 for k in reversed(keys):
1880 fk = k
1880 fk = k
1881 reverse = (k[0] == '-')
1881 reverse = (k[0] == '-')
1882 if reverse:
1882 if reverse:
1883 k = k[1:]
1883 k = k[1:]
1884 try:
1884 try:
1885 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1885 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1886 except KeyError:
1886 except KeyError:
1887 raise error.ParseError(_("unknown sort key %r") % fk)
1887 raise error.ParseError(_("unknown sort key %r") % fk)
1888 return baseset([c.rev() for c in ctxs])
1888 return baseset([c.rev() for c in ctxs])
1889
1889
1890 def groupbranchiter(revs, parentsfunc, firstbranch=()):
1891 """Yield revisions from heads to roots one (topo) branch at a time.
1892
1893 This function aims to be used by a graph generator that wishes to minimize
1894 the number of parallel branches and their interleaving.
1895
1896 Example iteration order (numbers show the "true" order in a changelog):
1897
1898 o 4
1899 |
1900 o 1
1901 |
1902 | o 3
1903 | |
1904 | o 2
1905 |/
1906 o 0
1907
1908 Note that the ancestors of merges are understood by the current
1909 algorithm to be on the same branch. This means no reordering will
1910 occur behind a merge.
1911 """
1912
1913 ### Quick summary of the algorithm
1914 #
1915 # This function is based around a "retention" principle. We keep revisions
1916 # in memory until we are ready to emit a whole branch that immediately
1917 # "merges" into an existing one. This reduces the number of parallel
1918 # branches with interleaved revisions.
1919 #
1920 # During iteration revs are split into two groups:
1921 # A) revision already emitted
1922 # B) revision in "retention". They are stored as different subgroups.
1923 #
1924 # for each REV, we do the following logic:
1925 #
1926 # 1) if REV is a parent of (A), we will emit it. If there is a
1927 # retention group ((B) above) that is blocked on REV being
1928 # available, we emit all the revisions out of that retention
1929 # group first.
1930 #
1931 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1932 # available, if such subgroup exist, we add REV to it and the subgroup is
1933 # now awaiting for REV.parents() to be available.
1934 #
1935 # 3) finally if no such group existed in (B), we create a new subgroup.
1936 #
1937 #
1938 # To bootstrap the algorithm, we emit the tipmost revision (which
1939 # puts it in group (A) from above).
1940
1941 revs.sort(reverse=True)
1942
1943 # Set of parents of revision that have been emitted. They can be considered
1944 # unblocked as the graph generator is already aware of them so there is no
1945 # need to delay the revisions that reference them.
1946 #
1947 # If someone wants to prioritize a branch over the others, pre-filling this
1948 # set will force all other branches to wait until this branch is ready to be
1949 # emitted.
1950 unblocked = set(firstbranch)
1951
1952 # list of groups waiting to be displayed, each group is defined by:
1953 #
1954 # (revs: lists of revs waiting to be displayed,
1955 # blocked: set of that cannot be displayed before those in 'revs')
1956 #
1957 # The second value ('blocked') correspond to parents of any revision in the
1958 # group ('revs') that is not itself contained in the group. The main idea
1959 # of this algorithm is to delay as much as possible the emission of any
1960 # revision. This means waiting for the moment we are about to display
1961 # these parents to display the revs in a group.
1962 #
1963 # This first implementation is smart until it encounters a merge: it will
1964 # emit revs as soon as any parent is about to be emitted and can grow an
1965 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1966 # retains new branches but gives up on any special ordering for ancestors
1967 # of merges. The implementation can be improved to handle this better.
1968 #
1969 # The first subgroup is special. It corresponds to all the revision that
1970 # were already emitted. The 'revs' lists is expected to be empty and the
1971 # 'blocked' set contains the parents revisions of already emitted revision.
1972 #
1973 # You could pre-seed the <parents> set of groups[0] to a specific
1974 # changesets to select what the first emitted branch should be.
1975 groups = [([], unblocked)]
1976 pendingheap = []
1977 pendingset = set()
1978
1979 heapq.heapify(pendingheap)
1980 heappop = heapq.heappop
1981 heappush = heapq.heappush
1982 for currentrev in revs:
1983 # Heap works with smallest element, we want highest so we invert
1984 if currentrev not in pendingset:
1985 heappush(pendingheap, -currentrev)
1986 pendingset.add(currentrev)
1987 # iterates on pending rev until after the current rev have been
1988 # processed.
1989 rev = None
1990 while rev != currentrev:
1991 rev = -heappop(pendingheap)
1992 pendingset.remove(rev)
1993
1994 # Seek for a subgroup blocked, waiting for the current revision.
1995 matching = [i for i, g in enumerate(groups) if rev in g[1]]
1996
1997 if matching:
1998 # The main idea is to gather together all sets that are blocked
1999 # on the same revision.
2000 #
2001 # Groups are merged when a common blocking ancestor is
2002 # observed. For example, given two groups:
2003 #
2004 # revs [5, 4] waiting for 1
2005 # revs [3, 2] waiting for 1
2006 #
2007 # These two groups will be merged when we process
2008 # 1. In theory, we could have merged the groups when
2009 # we added 2 to the group it is now in (we could have
2010 # noticed the groups were both blocked on 1 then), but
2011 # the way it works now makes the algorithm simpler.
2012 #
2013 # We also always keep the oldest subgroup first. We can
2014 # probably improve the behavior by having the longest set
2015 # first. That way, graph algorithms could minimise the length
2016 # of parallel lines their drawing. This is currently not done.
2017 targetidx = matching.pop(0)
2018 trevs, tparents = groups[targetidx]
2019 for i in matching:
2020 gr = groups[i]
2021 trevs.extend(gr[0])
2022 tparents |= gr[1]
2023 # delete all merged subgroups (except the one we kept)
2024 # (starting from the last subgroup for performance and
2025 # sanity reasons)
2026 for i in reversed(matching):
2027 del groups[i]
2028 else:
2029 # This is a new head. We create a new subgroup for it.
2030 targetidx = len(groups)
2031 groups.append(([], set([rev])))
2032
2033 gr = groups[targetidx]
2034
2035 # We now add the current nodes to this subgroups. This is done
2036 # after the subgroup merging because all elements from a subgroup
2037 # that relied on this rev must precede it.
2038 #
2039 # we also update the <parents> set to include the parents of the
2040 # new nodes.
2041 if rev == currentrev: # only display stuff in rev
2042 gr[0].append(rev)
2043 gr[1].remove(rev)
2044 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2045 gr[1].update(parents)
2046 for p in parents:
2047 if p not in pendingset:
2048 pendingset.add(p)
2049 heappush(pendingheap, -p)
2050
2051 # Look for a subgroup to display
2052 #
2053 # When unblocked is empty (if clause), we were not waiting for any
2054 # revisions during the first iteration (if no priority was given) or
2055 # if we emitted a whole disconnected set of the graph (reached a
2056 # root). In that case we arbitrarily take the oldest known
2057 # subgroup. The heuristic could probably be better.
2058 #
2059 # Otherwise (elif clause) if the subgroup is blocked on
2060 # a revision we just emitted, we can safely emit it as
2061 # well.
2062 if not unblocked:
2063 if len(groups) > 1: # display other subset
2064 targetidx = 1
2065 gr = groups[1]
2066 elif not gr[1] & unblocked:
2067 gr = None
2068
2069 if gr is not None:
2070 # update the set of awaited revisions with the one from the
2071 # subgroup
2072 unblocked |= gr[1]
2073 # output all revisions in the subgroup
2074 for r in gr[0]:
2075 yield r
2076 # delete the subgroup that you just output
2077 # unless it is groups[0] in which case you just empty it.
2078 if targetidx:
2079 del groups[targetidx]
2080 else:
2081 gr[0][:] = []
2082 # Check if we have some subgroup waiting for revisions we are not going to
2083 # iterate over
2084 for g in groups:
2085 for r in g[0]:
2086 yield r
2087
1890 @predicate('subrepo([pattern])')
2088 @predicate('subrepo([pattern])')
1891 def subrepo(repo, subset, x):
2089 def subrepo(repo, subset, x):
1892 """Changesets that add, modify or remove the given subrepo. If no subrepo
2090 """Changesets that add, modify or remove the given subrepo. If no subrepo
1893 pattern is named, any subrepo changes are returned.
2091 pattern is named, any subrepo changes are returned.
1894 """
2092 """
1895 # i18n: "subrepo" is a keyword
2093 # i18n: "subrepo" is a keyword
1896 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2094 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1897 pat = None
2095 pat = None
1898 if len(args) != 0:
2096 if len(args) != 0:
1899 pat = getstring(args[0], _("subrepo requires a pattern"))
2097 pat = getstring(args[0], _("subrepo requires a pattern"))
1900
2098
1901 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2099 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1902
2100
1903 def submatches(names):
2101 def submatches(names):
1904 k, p, m = util.stringmatcher(pat)
2102 k, p, m = util.stringmatcher(pat)
1905 for name in names:
2103 for name in names:
1906 if m(name):
2104 if m(name):
1907 yield name
2105 yield name
1908
2106
1909 def matches(x):
2107 def matches(x):
1910 c = repo[x]
2108 c = repo[x]
1911 s = repo.status(c.p1().node(), c.node(), match=m)
2109 s = repo.status(c.p1().node(), c.node(), match=m)
1912
2110
1913 if pat is None:
2111 if pat is None:
1914 return s.added or s.modified or s.removed
2112 return s.added or s.modified or s.removed
1915
2113
1916 if s.added:
2114 if s.added:
1917 return any(submatches(c.substate.keys()))
2115 return any(submatches(c.substate.keys()))
1918
2116
1919 if s.modified:
2117 if s.modified:
1920 subs = set(c.p1().substate.keys())
2118 subs = set(c.p1().substate.keys())
1921 subs.update(c.substate.keys())
2119 subs.update(c.substate.keys())
1922
2120
1923 for path in submatches(subs):
2121 for path in submatches(subs):
1924 if c.p1().substate.get(path) != c.substate.get(path):
2122 if c.p1().substate.get(path) != c.substate.get(path):
1925 return True
2123 return True
1926
2124
1927 if s.removed:
2125 if s.removed:
1928 return any(submatches(c.p1().substate.keys()))
2126 return any(submatches(c.p1().substate.keys()))
1929
2127
1930 return False
2128 return False
1931
2129
1932 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2130 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1933
2131
1934 def _substringmatcher(pattern):
2132 def _substringmatcher(pattern):
1935 kind, pattern, matcher = util.stringmatcher(pattern)
2133 kind, pattern, matcher = util.stringmatcher(pattern)
1936 if kind == 'literal':
2134 if kind == 'literal':
1937 matcher = lambda s: pattern in s
2135 matcher = lambda s: pattern in s
1938 return kind, pattern, matcher
2136 return kind, pattern, matcher
1939
2137
1940 @predicate('tag([name])', safe=True)
2138 @predicate('tag([name])', safe=True)
1941 def tag(repo, subset, x):
2139 def tag(repo, subset, x):
1942 """The specified tag by name, or all tagged revisions if no name is given.
2140 """The specified tag by name, or all tagged revisions if no name is given.
1943
2141
1944 If `name` starts with `re:`, the remainder of the name is treated as
2142 If `name` starts with `re:`, the remainder of the name is treated as
1945 a regular expression. To match a tag that actually starts with `re:`,
2143 a regular expression. To match a tag that actually starts with `re:`,
1946 use the prefix `literal:`.
2144 use the prefix `literal:`.
1947 """
2145 """
1948 # i18n: "tag" is a keyword
2146 # i18n: "tag" is a keyword
1949 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2147 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1950 cl = repo.changelog
2148 cl = repo.changelog
1951 if args:
2149 if args:
1952 pattern = getstring(args[0],
2150 pattern = getstring(args[0],
1953 # i18n: "tag" is a keyword
2151 # i18n: "tag" is a keyword
1954 _('the argument to tag must be a string'))
2152 _('the argument to tag must be a string'))
1955 kind, pattern, matcher = util.stringmatcher(pattern)
2153 kind, pattern, matcher = util.stringmatcher(pattern)
1956 if kind == 'literal':
2154 if kind == 'literal':
1957 # avoid resolving all tags
2155 # avoid resolving all tags
1958 tn = repo._tagscache.tags.get(pattern, None)
2156 tn = repo._tagscache.tags.get(pattern, None)
1959 if tn is None:
2157 if tn is None:
1960 raise error.RepoLookupError(_("tag '%s' does not exist")
2158 raise error.RepoLookupError(_("tag '%s' does not exist")
1961 % pattern)
2159 % pattern)
1962 s = set([repo[tn].rev()])
2160 s = set([repo[tn].rev()])
1963 else:
2161 else:
1964 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2162 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1965 else:
2163 else:
1966 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2164 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1967 return subset & s
2165 return subset & s
1968
2166
1969 @predicate('tagged', safe=True)
2167 @predicate('tagged', safe=True)
1970 def tagged(repo, subset, x):
2168 def tagged(repo, subset, x):
1971 return tag(repo, subset, x)
2169 return tag(repo, subset, x)
1972
2170
1973 @predicate('unstable()', safe=True)
2171 @predicate('unstable()', safe=True)
1974 def unstable(repo, subset, x):
2172 def unstable(repo, subset, x):
1975 """Non-obsolete changesets with obsolete ancestors.
2173 """Non-obsolete changesets with obsolete ancestors.
1976 """
2174 """
1977 # i18n: "unstable" is a keyword
2175 # i18n: "unstable" is a keyword
1978 getargs(x, 0, 0, _("unstable takes no arguments"))
2176 getargs(x, 0, 0, _("unstable takes no arguments"))
1979 unstables = obsmod.getrevs(repo, 'unstable')
2177 unstables = obsmod.getrevs(repo, 'unstable')
1980 return subset & unstables
2178 return subset & unstables
1981
2179
1982
2180
1983 @predicate('user(string)', safe=True)
2181 @predicate('user(string)', safe=True)
1984 def user(repo, subset, x):
2182 def user(repo, subset, x):
1985 """User name contains string. The match is case-insensitive.
2183 """User name contains string. The match is case-insensitive.
1986
2184
1987 If `string` starts with `re:`, the remainder of the string is treated as
2185 If `string` starts with `re:`, the remainder of the string is treated as
1988 a regular expression. To match a user that actually contains `re:`, use
2186 a regular expression. To match a user that actually contains `re:`, use
1989 the prefix `literal:`.
2187 the prefix `literal:`.
1990 """
2188 """
1991 return author(repo, subset, x)
2189 return author(repo, subset, x)
1992
2190
1993 # experimental
2191 # experimental
1994 @predicate('wdir', safe=True)
2192 @predicate('wdir', safe=True)
1995 def wdir(repo, subset, x):
2193 def wdir(repo, subset, x):
1996 # i18n: "wdir" is a keyword
2194 # i18n: "wdir" is a keyword
1997 getargs(x, 0, 0, _("wdir takes no arguments"))
2195 getargs(x, 0, 0, _("wdir takes no arguments"))
1998 if node.wdirrev in subset or isinstance(subset, fullreposet):
2196 if node.wdirrev in subset or isinstance(subset, fullreposet):
1999 return baseset([node.wdirrev])
2197 return baseset([node.wdirrev])
2000 return baseset()
2198 return baseset()
2001
2199
2002 # for internal use
2200 # for internal use
2003 @predicate('_list', safe=True)
2201 @predicate('_list', safe=True)
2004 def _list(repo, subset, x):
2202 def _list(repo, subset, x):
2005 s = getstring(x, "internal error")
2203 s = getstring(x, "internal error")
2006 if not s:
2204 if not s:
2007 return baseset()
2205 return baseset()
2008 # remove duplicates here. it's difficult for caller to deduplicate sets
2206 # remove duplicates here. it's difficult for caller to deduplicate sets
2009 # because different symbols can point to the same rev.
2207 # because different symbols can point to the same rev.
2010 cl = repo.changelog
2208 cl = repo.changelog
2011 ls = []
2209 ls = []
2012 seen = set()
2210 seen = set()
2013 for t in s.split('\0'):
2211 for t in s.split('\0'):
2014 try:
2212 try:
2015 # fast path for integer revision
2213 # fast path for integer revision
2016 r = int(t)
2214 r = int(t)
2017 if str(r) != t or r not in cl:
2215 if str(r) != t or r not in cl:
2018 raise ValueError
2216 raise ValueError
2019 revs = [r]
2217 revs = [r]
2020 except ValueError:
2218 except ValueError:
2021 revs = stringset(repo, subset, t)
2219 revs = stringset(repo, subset, t)
2022
2220
2023 for r in revs:
2221 for r in revs:
2024 if r in seen:
2222 if r in seen:
2025 continue
2223 continue
2026 if (r in subset
2224 if (r in subset
2027 or r == node.nullrev and isinstance(subset, fullreposet)):
2225 or r == node.nullrev and isinstance(subset, fullreposet)):
2028 ls.append(r)
2226 ls.append(r)
2029 seen.add(r)
2227 seen.add(r)
2030 return baseset(ls)
2228 return baseset(ls)
2031
2229
2032 # for internal use
2230 # for internal use
2033 @predicate('_intlist', safe=True)
2231 @predicate('_intlist', safe=True)
2034 def _intlist(repo, subset, x):
2232 def _intlist(repo, subset, x):
2035 s = getstring(x, "internal error")
2233 s = getstring(x, "internal error")
2036 if not s:
2234 if not s:
2037 return baseset()
2235 return baseset()
2038 ls = [int(r) for r in s.split('\0')]
2236 ls = [int(r) for r in s.split('\0')]
2039 s = subset
2237 s = subset
2040 return baseset([r for r in ls if r in s])
2238 return baseset([r for r in ls if r in s])
2041
2239
2042 # for internal use
2240 # for internal use
2043 @predicate('_hexlist', safe=True)
2241 @predicate('_hexlist', safe=True)
2044 def _hexlist(repo, subset, x):
2242 def _hexlist(repo, subset, x):
2045 s = getstring(x, "internal error")
2243 s = getstring(x, "internal error")
2046 if not s:
2244 if not s:
2047 return baseset()
2245 return baseset()
2048 cl = repo.changelog
2246 cl = repo.changelog
2049 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2247 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2050 s = subset
2248 s = subset
2051 return baseset([r for r in ls if r in s])
2249 return baseset([r for r in ls if r in s])
2052
2250
2053 methods = {
2251 methods = {
2054 "range": rangeset,
2252 "range": rangeset,
2055 "dagrange": dagrange,
2253 "dagrange": dagrange,
2056 "string": stringset,
2254 "string": stringset,
2057 "symbol": stringset,
2255 "symbol": stringset,
2058 "and": andset,
2256 "and": andset,
2059 "or": orset,
2257 "or": orset,
2060 "not": notset,
2258 "not": notset,
2061 "difference": differenceset,
2259 "difference": differenceset,
2062 "list": listset,
2260 "list": listset,
2063 "keyvalue": keyvaluepair,
2261 "keyvalue": keyvaluepair,
2064 "func": func,
2262 "func": func,
2065 "ancestor": ancestorspec,
2263 "ancestor": ancestorspec,
2066 "parent": parentspec,
2264 "parent": parentspec,
2067 "parentpost": p1,
2265 "parentpost": p1,
2068 }
2266 }
2069
2267
2070 def _matchonly(revs, bases):
2268 def _matchonly(revs, bases):
2071 """
2269 """
2072 >>> f = lambda *args: _matchonly(*map(parse, args))
2270 >>> f = lambda *args: _matchonly(*map(parse, args))
2073 >>> f('ancestors(A)', 'not ancestors(B)')
2271 >>> f('ancestors(A)', 'not ancestors(B)')
2074 ('list', ('symbol', 'A'), ('symbol', 'B'))
2272 ('list', ('symbol', 'A'), ('symbol', 'B'))
2075 """
2273 """
2076 if (revs is not None
2274 if (revs is not None
2077 and revs[0] == 'func'
2275 and revs[0] == 'func'
2078 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2276 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2079 and bases is not None
2277 and bases is not None
2080 and bases[0] == 'not'
2278 and bases[0] == 'not'
2081 and bases[1][0] == 'func'
2279 and bases[1][0] == 'func'
2082 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2280 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2083 return ('list', revs[2], bases[1][2])
2281 return ('list', revs[2], bases[1][2])
2084
2282
2085 def _optimize(x, small):
2283 def _optimize(x, small):
2086 if x is None:
2284 if x is None:
2087 return 0, x
2285 return 0, x
2088
2286
2089 smallbonus = 1
2287 smallbonus = 1
2090 if small:
2288 if small:
2091 smallbonus = .5
2289 smallbonus = .5
2092
2290
2093 op = x[0]
2291 op = x[0]
2094 if op == 'minus':
2292 if op == 'minus':
2095 return _optimize(('and', x[1], ('not', x[2])), small)
2293 return _optimize(('and', x[1], ('not', x[2])), small)
2096 elif op == 'only':
2294 elif op == 'only':
2097 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2295 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2098 return _optimize(t, small)
2296 return _optimize(t, small)
2099 elif op == 'onlypost':
2297 elif op == 'onlypost':
2100 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2298 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2101 elif op == 'dagrangepre':
2299 elif op == 'dagrangepre':
2102 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2300 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2103 elif op == 'dagrangepost':
2301 elif op == 'dagrangepost':
2104 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2302 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2105 elif op == 'rangeall':
2303 elif op == 'rangeall':
2106 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2304 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2107 elif op == 'rangepre':
2305 elif op == 'rangepre':
2108 return _optimize(('range', ('string', '0'), x[1]), small)
2306 return _optimize(('range', ('string', '0'), x[1]), small)
2109 elif op == 'rangepost':
2307 elif op == 'rangepost':
2110 return _optimize(('range', x[1], ('string', 'tip')), small)
2308 return _optimize(('range', x[1], ('string', 'tip')), small)
2111 elif op == 'negate':
2309 elif op == 'negate':
2112 s = getstring(x[1], _("can't negate that"))
2310 s = getstring(x[1], _("can't negate that"))
2113 return _optimize(('string', '-' + s), small)
2311 return _optimize(('string', '-' + s), small)
2114 elif op in 'string symbol negate':
2312 elif op in 'string symbol negate':
2115 return smallbonus, x # single revisions are small
2313 return smallbonus, x # single revisions are small
2116 elif op == 'and':
2314 elif op == 'and':
2117 wa, ta = _optimize(x[1], True)
2315 wa, ta = _optimize(x[1], True)
2118 wb, tb = _optimize(x[2], True)
2316 wb, tb = _optimize(x[2], True)
2119 w = min(wa, wb)
2317 w = min(wa, wb)
2120
2318
2121 # (::x and not ::y)/(not ::y and ::x) have a fast path
2319 # (::x and not ::y)/(not ::y and ::x) have a fast path
2122 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2320 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2123 if tm:
2321 if tm:
2124 return w, ('func', ('symbol', 'only'), tm)
2322 return w, ('func', ('symbol', 'only'), tm)
2125
2323
2126 if tb is not None and tb[0] == 'not':
2324 if tb is not None and tb[0] == 'not':
2127 return wa, ('difference', ta, tb[1])
2325 return wa, ('difference', ta, tb[1])
2128
2326
2129 if wa > wb:
2327 if wa > wb:
2130 return w, (op, tb, ta)
2328 return w, (op, tb, ta)
2131 return w, (op, ta, tb)
2329 return w, (op, ta, tb)
2132 elif op == 'or':
2330 elif op == 'or':
2133 # fast path for machine-generated expression, that is likely to have
2331 # fast path for machine-generated expression, that is likely to have
2134 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2332 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2135 ws, ts, ss = [], [], []
2333 ws, ts, ss = [], [], []
2136 def flushss():
2334 def flushss():
2137 if not ss:
2335 if not ss:
2138 return
2336 return
2139 if len(ss) == 1:
2337 if len(ss) == 1:
2140 w, t = ss[0]
2338 w, t = ss[0]
2141 else:
2339 else:
2142 s = '\0'.join(t[1] for w, t in ss)
2340 s = '\0'.join(t[1] for w, t in ss)
2143 y = ('func', ('symbol', '_list'), ('string', s))
2341 y = ('func', ('symbol', '_list'), ('string', s))
2144 w, t = _optimize(y, False)
2342 w, t = _optimize(y, False)
2145 ws.append(w)
2343 ws.append(w)
2146 ts.append(t)
2344 ts.append(t)
2147 del ss[:]
2345 del ss[:]
2148 for y in x[1:]:
2346 for y in x[1:]:
2149 w, t = _optimize(y, False)
2347 w, t = _optimize(y, False)
2150 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2348 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2151 ss.append((w, t))
2349 ss.append((w, t))
2152 continue
2350 continue
2153 flushss()
2351 flushss()
2154 ws.append(w)
2352 ws.append(w)
2155 ts.append(t)
2353 ts.append(t)
2156 flushss()
2354 flushss()
2157 if len(ts) == 1:
2355 if len(ts) == 1:
2158 return ws[0], ts[0] # 'or' operation is fully optimized out
2356 return ws[0], ts[0] # 'or' operation is fully optimized out
2159 # we can't reorder trees by weight because it would change the order.
2357 # we can't reorder trees by weight because it would change the order.
2160 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2358 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2161 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2359 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2162 return max(ws), (op,) + tuple(ts)
2360 return max(ws), (op,) + tuple(ts)
2163 elif op == 'not':
2361 elif op == 'not':
2164 # Optimize not public() to _notpublic() because we have a fast version
2362 # Optimize not public() to _notpublic() because we have a fast version
2165 if x[1] == ('func', ('symbol', 'public'), None):
2363 if x[1] == ('func', ('symbol', 'public'), None):
2166 newsym = ('func', ('symbol', '_notpublic'), None)
2364 newsym = ('func', ('symbol', '_notpublic'), None)
2167 o = _optimize(newsym, not small)
2365 o = _optimize(newsym, not small)
2168 return o[0], o[1]
2366 return o[0], o[1]
2169 else:
2367 else:
2170 o = _optimize(x[1], not small)
2368 o = _optimize(x[1], not small)
2171 return o[0], (op, o[1])
2369 return o[0], (op, o[1])
2172 elif op == 'parentpost':
2370 elif op == 'parentpost':
2173 o = _optimize(x[1], small)
2371 o = _optimize(x[1], small)
2174 return o[0], (op, o[1])
2372 return o[0], (op, o[1])
2175 elif op == 'group':
2373 elif op == 'group':
2176 return _optimize(x[1], small)
2374 return _optimize(x[1], small)
2177 elif op in 'dagrange range parent ancestorspec':
2375 elif op in 'dagrange range parent ancestorspec':
2178 if op == 'parent':
2376 if op == 'parent':
2179 # x^:y means (x^) : y, not x ^ (:y)
2377 # x^:y means (x^) : y, not x ^ (:y)
2180 post = ('parentpost', x[1])
2378 post = ('parentpost', x[1])
2181 if x[2][0] == 'dagrangepre':
2379 if x[2][0] == 'dagrangepre':
2182 return _optimize(('dagrange', post, x[2][1]), small)
2380 return _optimize(('dagrange', post, x[2][1]), small)
2183 elif x[2][0] == 'rangepre':
2381 elif x[2][0] == 'rangepre':
2184 return _optimize(('range', post, x[2][1]), small)
2382 return _optimize(('range', post, x[2][1]), small)
2185
2383
2186 wa, ta = _optimize(x[1], small)
2384 wa, ta = _optimize(x[1], small)
2187 wb, tb = _optimize(x[2], small)
2385 wb, tb = _optimize(x[2], small)
2188 return wa + wb, (op, ta, tb)
2386 return wa + wb, (op, ta, tb)
2189 elif op == 'list':
2387 elif op == 'list':
2190 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2388 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2191 return sum(ws), (op,) + ts
2389 return sum(ws), (op,) + ts
2192 elif op == 'func':
2390 elif op == 'func':
2193 f = getstring(x[1], _("not a symbol"))
2391 f = getstring(x[1], _("not a symbol"))
2194 wa, ta = _optimize(x[2], small)
2392 wa, ta = _optimize(x[2], small)
2195 if f in ("author branch closed date desc file grep keyword "
2393 if f in ("author branch closed date desc file grep keyword "
2196 "outgoing user"):
2394 "outgoing user"):
2197 w = 10 # slow
2395 w = 10 # slow
2198 elif f in "modifies adds removes":
2396 elif f in "modifies adds removes":
2199 w = 30 # slower
2397 w = 30 # slower
2200 elif f == "contains":
2398 elif f == "contains":
2201 w = 100 # very slow
2399 w = 100 # very slow
2202 elif f == "ancestor":
2400 elif f == "ancestor":
2203 w = 1 * smallbonus
2401 w = 1 * smallbonus
2204 elif f in "reverse limit first _intlist":
2402 elif f in "reverse limit first _intlist":
2205 w = 0
2403 w = 0
2206 elif f in "sort":
2404 elif f in "sort":
2207 w = 10 # assume most sorts look at changelog
2405 w = 10 # assume most sorts look at changelog
2208 else:
2406 else:
2209 w = 1
2407 w = 1
2210 return w + wa, (op, x[1], ta)
2408 return w + wa, (op, x[1], ta)
2211 return 1, x
2409 return 1, x
2212
2410
2213 def optimize(tree):
2411 def optimize(tree):
2214 _weight, newtree = _optimize(tree, small=True)
2412 _weight, newtree = _optimize(tree, small=True)
2215 return newtree
2413 return newtree
2216
2414
2217 # the set of valid characters for the initial letter of symbols in
2415 # the set of valid characters for the initial letter of symbols in
2218 # alias declarations and definitions
2416 # alias declarations and definitions
2219 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2417 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2220 if c.isalnum() or c in '._@$' or ord(c) > 127)
2418 if c.isalnum() or c in '._@$' or ord(c) > 127)
2221
2419
2222 def _parsewith(spec, lookup=None, syminitletters=None):
2420 def _parsewith(spec, lookup=None, syminitletters=None):
2223 """Generate a parse tree of given spec with given tokenizing options
2421 """Generate a parse tree of given spec with given tokenizing options
2224
2422
2225 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2423 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2226 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2424 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2227 >>> _parsewith('$1')
2425 >>> _parsewith('$1')
2228 Traceback (most recent call last):
2426 Traceback (most recent call last):
2229 ...
2427 ...
2230 ParseError: ("syntax error in revset '$1'", 0)
2428 ParseError: ("syntax error in revset '$1'", 0)
2231 >>> _parsewith('foo bar')
2429 >>> _parsewith('foo bar')
2232 Traceback (most recent call last):
2430 Traceback (most recent call last):
2233 ...
2431 ...
2234 ParseError: ('invalid token', 4)
2432 ParseError: ('invalid token', 4)
2235 """
2433 """
2236 p = parser.parser(elements)
2434 p = parser.parser(elements)
2237 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2435 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2238 syminitletters=syminitletters))
2436 syminitletters=syminitletters))
2239 if pos != len(spec):
2437 if pos != len(spec):
2240 raise error.ParseError(_('invalid token'), pos)
2438 raise error.ParseError(_('invalid token'), pos)
2241 return parser.simplifyinfixops(tree, ('list', 'or'))
2439 return parser.simplifyinfixops(tree, ('list', 'or'))
2242
2440
2243 class _aliasrules(parser.basealiasrules):
2441 class _aliasrules(parser.basealiasrules):
2244 """Parsing and expansion rule set of revset aliases"""
2442 """Parsing and expansion rule set of revset aliases"""
2245 _section = _('revset alias')
2443 _section = _('revset alias')
2246
2444
2247 @staticmethod
2445 @staticmethod
2248 def _parse(spec):
2446 def _parse(spec):
2249 """Parse alias declaration/definition ``spec``
2447 """Parse alias declaration/definition ``spec``
2250
2448
2251 This allows symbol names to use also ``$`` as an initial letter
2449 This allows symbol names to use also ``$`` as an initial letter
2252 (for backward compatibility), and callers of this function should
2450 (for backward compatibility), and callers of this function should
2253 examine whether ``$`` is used also for unexpected symbols or not.
2451 examine whether ``$`` is used also for unexpected symbols or not.
2254 """
2452 """
2255 return _parsewith(spec, syminitletters=_aliassyminitletters)
2453 return _parsewith(spec, syminitletters=_aliassyminitletters)
2256
2454
2257 @staticmethod
2455 @staticmethod
2258 def _trygetfunc(tree):
2456 def _trygetfunc(tree):
2259 if tree[0] == 'func' and tree[1][0] == 'symbol':
2457 if tree[0] == 'func' and tree[1][0] == 'symbol':
2260 return tree[1][1], getlist(tree[2])
2458 return tree[1][1], getlist(tree[2])
2261
2459
2262 def expandaliases(ui, tree, showwarning=None):
2460 def expandaliases(ui, tree, showwarning=None):
2263 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2461 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2264 tree = _aliasrules.expand(aliases, tree)
2462 tree = _aliasrules.expand(aliases, tree)
2265 if showwarning:
2463 if showwarning:
2266 # warn about problematic (but not referred) aliases
2464 # warn about problematic (but not referred) aliases
2267 for name, alias in sorted(aliases.iteritems()):
2465 for name, alias in sorted(aliases.iteritems()):
2268 if alias.error and not alias.warned:
2466 if alias.error and not alias.warned:
2269 showwarning(_('warning: %s\n') % (alias.error))
2467 showwarning(_('warning: %s\n') % (alias.error))
2270 alias.warned = True
2468 alias.warned = True
2271 return tree
2469 return tree
2272
2470
2273 def foldconcat(tree):
2471 def foldconcat(tree):
2274 """Fold elements to be concatenated by `##`
2472 """Fold elements to be concatenated by `##`
2275 """
2473 """
2276 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2474 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2277 return tree
2475 return tree
2278 if tree[0] == '_concat':
2476 if tree[0] == '_concat':
2279 pending = [tree]
2477 pending = [tree]
2280 l = []
2478 l = []
2281 while pending:
2479 while pending:
2282 e = pending.pop()
2480 e = pending.pop()
2283 if e[0] == '_concat':
2481 if e[0] == '_concat':
2284 pending.extend(reversed(e[1:]))
2482 pending.extend(reversed(e[1:]))
2285 elif e[0] in ('string', 'symbol'):
2483 elif e[0] in ('string', 'symbol'):
2286 l.append(e[1])
2484 l.append(e[1])
2287 else:
2485 else:
2288 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2486 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2289 raise error.ParseError(msg)
2487 raise error.ParseError(msg)
2290 return ('string', ''.join(l))
2488 return ('string', ''.join(l))
2291 else:
2489 else:
2292 return tuple(foldconcat(t) for t in tree)
2490 return tuple(foldconcat(t) for t in tree)
2293
2491
2294 def parse(spec, lookup=None):
2492 def parse(spec, lookup=None):
2295 return _parsewith(spec, lookup=lookup)
2493 return _parsewith(spec, lookup=lookup)
2296
2494
2297 def posttreebuilthook(tree, repo):
2495 def posttreebuilthook(tree, repo):
2298 # hook for extensions to execute code on the optimized tree
2496 # hook for extensions to execute code on the optimized tree
2299 pass
2497 pass
2300
2498
2301 def match(ui, spec, repo=None):
2499 def match(ui, spec, repo=None):
2302 if not spec:
2500 if not spec:
2303 raise error.ParseError(_("empty query"))
2501 raise error.ParseError(_("empty query"))
2304 lookup = None
2502 lookup = None
2305 if repo:
2503 if repo:
2306 lookup = repo.__contains__
2504 lookup = repo.__contains__
2307 tree = parse(spec, lookup)
2505 tree = parse(spec, lookup)
2308 return _makematcher(ui, tree, repo)
2506 return _makematcher(ui, tree, repo)
2309
2507
2310 def matchany(ui, specs, repo=None):
2508 def matchany(ui, specs, repo=None):
2311 """Create a matcher that will include any revisions matching one of the
2509 """Create a matcher that will include any revisions matching one of the
2312 given specs"""
2510 given specs"""
2313 if not specs:
2511 if not specs:
2314 def mfunc(repo, subset=None):
2512 def mfunc(repo, subset=None):
2315 return baseset()
2513 return baseset()
2316 return mfunc
2514 return mfunc
2317 if not all(specs):
2515 if not all(specs):
2318 raise error.ParseError(_("empty query"))
2516 raise error.ParseError(_("empty query"))
2319 lookup = None
2517 lookup = None
2320 if repo:
2518 if repo:
2321 lookup = repo.__contains__
2519 lookup = repo.__contains__
2322 if len(specs) == 1:
2520 if len(specs) == 1:
2323 tree = parse(specs[0], lookup)
2521 tree = parse(specs[0], lookup)
2324 else:
2522 else:
2325 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2523 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2326 return _makematcher(ui, tree, repo)
2524 return _makematcher(ui, tree, repo)
2327
2525
2328 def _makematcher(ui, tree, repo):
2526 def _makematcher(ui, tree, repo):
2329 if ui:
2527 if ui:
2330 tree = expandaliases(ui, tree, showwarning=ui.warn)
2528 tree = expandaliases(ui, tree, showwarning=ui.warn)
2331 tree = foldconcat(tree)
2529 tree = foldconcat(tree)
2332 tree = optimize(tree)
2530 tree = optimize(tree)
2333 posttreebuilthook(tree, repo)
2531 posttreebuilthook(tree, repo)
2334 def mfunc(repo, subset=None):
2532 def mfunc(repo, subset=None):
2335 if subset is None:
2533 if subset is None:
2336 subset = fullreposet(repo)
2534 subset = fullreposet(repo)
2337 if util.safehasattr(subset, 'isascending'):
2535 if util.safehasattr(subset, 'isascending'):
2338 result = getset(repo, subset, tree)
2536 result = getset(repo, subset, tree)
2339 else:
2537 else:
2340 result = getset(repo, baseset(subset), tree)
2538 result = getset(repo, baseset(subset), tree)
2341 return result
2539 return result
2342 return mfunc
2540 return mfunc
2343
2541
2344 def formatspec(expr, *args):
2542 def formatspec(expr, *args):
2345 '''
2543 '''
2346 This is a convenience function for using revsets internally, and
2544 This is a convenience function for using revsets internally, and
2347 escapes arguments appropriately. Aliases are intentionally ignored
2545 escapes arguments appropriately. Aliases are intentionally ignored
2348 so that intended expression behavior isn't accidentally subverted.
2546 so that intended expression behavior isn't accidentally subverted.
2349
2547
2350 Supported arguments:
2548 Supported arguments:
2351
2549
2352 %r = revset expression, parenthesized
2550 %r = revset expression, parenthesized
2353 %d = int(arg), no quoting
2551 %d = int(arg), no quoting
2354 %s = string(arg), escaped and single-quoted
2552 %s = string(arg), escaped and single-quoted
2355 %b = arg.branch(), escaped and single-quoted
2553 %b = arg.branch(), escaped and single-quoted
2356 %n = hex(arg), single-quoted
2554 %n = hex(arg), single-quoted
2357 %% = a literal '%'
2555 %% = a literal '%'
2358
2556
2359 Prefixing the type with 'l' specifies a parenthesized list of that type.
2557 Prefixing the type with 'l' specifies a parenthesized list of that type.
2360
2558
2361 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2559 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2362 '(10 or 11):: and ((this()) or (that()))'
2560 '(10 or 11):: and ((this()) or (that()))'
2363 >>> formatspec('%d:: and not %d::', 10, 20)
2561 >>> formatspec('%d:: and not %d::', 10, 20)
2364 '10:: and not 20::'
2562 '10:: and not 20::'
2365 >>> formatspec('%ld or %ld', [], [1])
2563 >>> formatspec('%ld or %ld', [], [1])
2366 "_list('') or 1"
2564 "_list('') or 1"
2367 >>> formatspec('keyword(%s)', 'foo\\xe9')
2565 >>> formatspec('keyword(%s)', 'foo\\xe9')
2368 "keyword('foo\\\\xe9')"
2566 "keyword('foo\\\\xe9')"
2369 >>> b = lambda: 'default'
2567 >>> b = lambda: 'default'
2370 >>> b.branch = b
2568 >>> b.branch = b
2371 >>> formatspec('branch(%b)', b)
2569 >>> formatspec('branch(%b)', b)
2372 "branch('default')"
2570 "branch('default')"
2373 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2571 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2374 "root(_list('a\\x00b\\x00c\\x00d'))"
2572 "root(_list('a\\x00b\\x00c\\x00d'))"
2375 '''
2573 '''
2376
2574
2377 def quote(s):
2575 def quote(s):
2378 return repr(str(s))
2576 return repr(str(s))
2379
2577
2380 def argtype(c, arg):
2578 def argtype(c, arg):
2381 if c == 'd':
2579 if c == 'd':
2382 return str(int(arg))
2580 return str(int(arg))
2383 elif c == 's':
2581 elif c == 's':
2384 return quote(arg)
2582 return quote(arg)
2385 elif c == 'r':
2583 elif c == 'r':
2386 parse(arg) # make sure syntax errors are confined
2584 parse(arg) # make sure syntax errors are confined
2387 return '(%s)' % arg
2585 return '(%s)' % arg
2388 elif c == 'n':
2586 elif c == 'n':
2389 return quote(node.hex(arg))
2587 return quote(node.hex(arg))
2390 elif c == 'b':
2588 elif c == 'b':
2391 return quote(arg.branch())
2589 return quote(arg.branch())
2392
2590
2393 def listexp(s, t):
2591 def listexp(s, t):
2394 l = len(s)
2592 l = len(s)
2395 if l == 0:
2593 if l == 0:
2396 return "_list('')"
2594 return "_list('')"
2397 elif l == 1:
2595 elif l == 1:
2398 return argtype(t, s[0])
2596 return argtype(t, s[0])
2399 elif t == 'd':
2597 elif t == 'd':
2400 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2598 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2401 elif t == 's':
2599 elif t == 's':
2402 return "_list('%s')" % "\0".join(s)
2600 return "_list('%s')" % "\0".join(s)
2403 elif t == 'n':
2601 elif t == 'n':
2404 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2602 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2405 elif t == 'b':
2603 elif t == 'b':
2406 return "_list('%s')" % "\0".join(a.branch() for a in s)
2604 return "_list('%s')" % "\0".join(a.branch() for a in s)
2407
2605
2408 m = l // 2
2606 m = l // 2
2409 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2607 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2410
2608
2411 ret = ''
2609 ret = ''
2412 pos = 0
2610 pos = 0
2413 arg = 0
2611 arg = 0
2414 while pos < len(expr):
2612 while pos < len(expr):
2415 c = expr[pos]
2613 c = expr[pos]
2416 if c == '%':
2614 if c == '%':
2417 pos += 1
2615 pos += 1
2418 d = expr[pos]
2616 d = expr[pos]
2419 if d == '%':
2617 if d == '%':
2420 ret += d
2618 ret += d
2421 elif d in 'dsnbr':
2619 elif d in 'dsnbr':
2422 ret += argtype(d, args[arg])
2620 ret += argtype(d, args[arg])
2423 arg += 1
2621 arg += 1
2424 elif d == 'l':
2622 elif d == 'l':
2425 # a list of some type
2623 # a list of some type
2426 pos += 1
2624 pos += 1
2427 d = expr[pos]
2625 d = expr[pos]
2428 ret += listexp(list(args[arg]), d)
2626 ret += listexp(list(args[arg]), d)
2429 arg += 1
2627 arg += 1
2430 else:
2628 else:
2431 raise error.Abort('unexpected revspec format character %s' % d)
2629 raise error.Abort('unexpected revspec format character %s' % d)
2432 else:
2630 else:
2433 ret += c
2631 ret += c
2434 pos += 1
2632 pos += 1
2435
2633
2436 return ret
2634 return ret
2437
2635
2438 def prettyformat(tree):
2636 def prettyformat(tree):
2439 return parser.prettyformat(tree, ('string', 'symbol'))
2637 return parser.prettyformat(tree, ('string', 'symbol'))
2440
2638
2441 def depth(tree):
2639 def depth(tree):
2442 if isinstance(tree, tuple):
2640 if isinstance(tree, tuple):
2443 return max(map(depth, tree)) + 1
2641 return max(map(depth, tree)) + 1
2444 else:
2642 else:
2445 return 0
2643 return 0
2446
2644
2447 def funcsused(tree):
2645 def funcsused(tree):
2448 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2646 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2449 return set()
2647 return set()
2450 else:
2648 else:
2451 funcs = set()
2649 funcs = set()
2452 for s in tree[1:]:
2650 for s in tree[1:]:
2453 funcs |= funcsused(s)
2651 funcs |= funcsused(s)
2454 if tree[0] == 'func':
2652 if tree[0] == 'func':
2455 funcs.add(tree[1][1])
2653 funcs.add(tree[1][1])
2456 return funcs
2654 return funcs
2457
2655
2458 def _formatsetrepr(r):
2656 def _formatsetrepr(r):
2459 """Format an optional printable representation of a set
2657 """Format an optional printable representation of a set
2460
2658
2461 ======== =================================
2659 ======== =================================
2462 type(r) example
2660 type(r) example
2463 ======== =================================
2661 ======== =================================
2464 tuple ('<not %r>', other)
2662 tuple ('<not %r>', other)
2465 str '<branch closed>'
2663 str '<branch closed>'
2466 callable lambda: '<branch %r>' % sorted(b)
2664 callable lambda: '<branch %r>' % sorted(b)
2467 object other
2665 object other
2468 ======== =================================
2666 ======== =================================
2469 """
2667 """
2470 if r is None:
2668 if r is None:
2471 return ''
2669 return ''
2472 elif isinstance(r, tuple):
2670 elif isinstance(r, tuple):
2473 return r[0] % r[1:]
2671 return r[0] % r[1:]
2474 elif isinstance(r, str):
2672 elif isinstance(r, str):
2475 return r
2673 return r
2476 elif callable(r):
2674 elif callable(r):
2477 return r()
2675 return r()
2478 else:
2676 else:
2479 return repr(r)
2677 return repr(r)
2480
2678
2481 class abstractsmartset(object):
2679 class abstractsmartset(object):
2482
2680
2483 def __nonzero__(self):
2681 def __nonzero__(self):
2484 """True if the smartset is not empty"""
2682 """True if the smartset is not empty"""
2485 raise NotImplementedError()
2683 raise NotImplementedError()
2486
2684
2487 def __contains__(self, rev):
2685 def __contains__(self, rev):
2488 """provide fast membership testing"""
2686 """provide fast membership testing"""
2489 raise NotImplementedError()
2687 raise NotImplementedError()
2490
2688
2491 def __iter__(self):
2689 def __iter__(self):
2492 """iterate the set in the order it is supposed to be iterated"""
2690 """iterate the set in the order it is supposed to be iterated"""
2493 raise NotImplementedError()
2691 raise NotImplementedError()
2494
2692
2495 # Attributes containing a function to perform a fast iteration in a given
2693 # Attributes containing a function to perform a fast iteration in a given
2496 # direction. A smartset can have none, one, or both defined.
2694 # direction. A smartset can have none, one, or both defined.
2497 #
2695 #
2498 # Default value is None instead of a function returning None to avoid
2696 # Default value is None instead of a function returning None to avoid
2499 # initializing an iterator just for testing if a fast method exists.
2697 # initializing an iterator just for testing if a fast method exists.
2500 fastasc = None
2698 fastasc = None
2501 fastdesc = None
2699 fastdesc = None
2502
2700
2503 def isascending(self):
2701 def isascending(self):
2504 """True if the set will iterate in ascending order"""
2702 """True if the set will iterate in ascending order"""
2505 raise NotImplementedError()
2703 raise NotImplementedError()
2506
2704
2507 def isdescending(self):
2705 def isdescending(self):
2508 """True if the set will iterate in descending order"""
2706 """True if the set will iterate in descending order"""
2509 raise NotImplementedError()
2707 raise NotImplementedError()
2510
2708
2511 def istopo(self):
2709 def istopo(self):
2512 """True if the set will iterate in topographical order"""
2710 """True if the set will iterate in topographical order"""
2513 raise NotImplementedError()
2711 raise NotImplementedError()
2514
2712
2515 @util.cachefunc
2713 @util.cachefunc
2516 def min(self):
2714 def min(self):
2517 """return the minimum element in the set"""
2715 """return the minimum element in the set"""
2518 if self.fastasc is not None:
2716 if self.fastasc is not None:
2519 for r in self.fastasc():
2717 for r in self.fastasc():
2520 return r
2718 return r
2521 raise ValueError('arg is an empty sequence')
2719 raise ValueError('arg is an empty sequence')
2522 return min(self)
2720 return min(self)
2523
2721
2524 @util.cachefunc
2722 @util.cachefunc
2525 def max(self):
2723 def max(self):
2526 """return the maximum element in the set"""
2724 """return the maximum element in the set"""
2527 if self.fastdesc is not None:
2725 if self.fastdesc is not None:
2528 for r in self.fastdesc():
2726 for r in self.fastdesc():
2529 return r
2727 return r
2530 raise ValueError('arg is an empty sequence')
2728 raise ValueError('arg is an empty sequence')
2531 return max(self)
2729 return max(self)
2532
2730
2533 def first(self):
2731 def first(self):
2534 """return the first element in the set (user iteration perspective)
2732 """return the first element in the set (user iteration perspective)
2535
2733
2536 Return None if the set is empty"""
2734 Return None if the set is empty"""
2537 raise NotImplementedError()
2735 raise NotImplementedError()
2538
2736
2539 def last(self):
2737 def last(self):
2540 """return the last element in the set (user iteration perspective)
2738 """return the last element in the set (user iteration perspective)
2541
2739
2542 Return None if the set is empty"""
2740 Return None if the set is empty"""
2543 raise NotImplementedError()
2741 raise NotImplementedError()
2544
2742
2545 def __len__(self):
2743 def __len__(self):
2546 """return the length of the smartsets
2744 """return the length of the smartsets
2547
2745
2548 This can be expensive on smartset that could be lazy otherwise."""
2746 This can be expensive on smartset that could be lazy otherwise."""
2549 raise NotImplementedError()
2747 raise NotImplementedError()
2550
2748
2551 def reverse(self):
2749 def reverse(self):
2552 """reverse the expected iteration order"""
2750 """reverse the expected iteration order"""
2553 raise NotImplementedError()
2751 raise NotImplementedError()
2554
2752
2555 def sort(self, reverse=True):
2753 def sort(self, reverse=True):
2556 """get the set to iterate in an ascending or descending order"""
2754 """get the set to iterate in an ascending or descending order"""
2557 raise NotImplementedError()
2755 raise NotImplementedError()
2558
2756
2559 def __and__(self, other):
2757 def __and__(self, other):
2560 """Returns a new object with the intersection of the two collections.
2758 """Returns a new object with the intersection of the two collections.
2561
2759
2562 This is part of the mandatory API for smartset."""
2760 This is part of the mandatory API for smartset."""
2563 if isinstance(other, fullreposet):
2761 if isinstance(other, fullreposet):
2564 return self
2762 return self
2565 return self.filter(other.__contains__, condrepr=other, cache=False)
2763 return self.filter(other.__contains__, condrepr=other, cache=False)
2566
2764
2567 def __add__(self, other):
2765 def __add__(self, other):
2568 """Returns a new object with the union of the two collections.
2766 """Returns a new object with the union of the two collections.
2569
2767
2570 This is part of the mandatory API for smartset."""
2768 This is part of the mandatory API for smartset."""
2571 return addset(self, other)
2769 return addset(self, other)
2572
2770
2573 def __sub__(self, other):
2771 def __sub__(self, other):
2574 """Returns a new object with the substraction of the two collections.
2772 """Returns a new object with the substraction of the two collections.
2575
2773
2576 This is part of the mandatory API for smartset."""
2774 This is part of the mandatory API for smartset."""
2577 c = other.__contains__
2775 c = other.__contains__
2578 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2776 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2579 cache=False)
2777 cache=False)
2580
2778
2581 def filter(self, condition, condrepr=None, cache=True):
2779 def filter(self, condition, condrepr=None, cache=True):
2582 """Returns this smartset filtered by condition as a new smartset.
2780 """Returns this smartset filtered by condition as a new smartset.
2583
2781
2584 `condition` is a callable which takes a revision number and returns a
2782 `condition` is a callable which takes a revision number and returns a
2585 boolean. Optional `condrepr` provides a printable representation of
2783 boolean. Optional `condrepr` provides a printable representation of
2586 the given `condition`.
2784 the given `condition`.
2587
2785
2588 This is part of the mandatory API for smartset."""
2786 This is part of the mandatory API for smartset."""
2589 # builtin cannot be cached. but do not needs to
2787 # builtin cannot be cached. but do not needs to
2590 if cache and util.safehasattr(condition, 'func_code'):
2788 if cache and util.safehasattr(condition, 'func_code'):
2591 condition = util.cachefunc(condition)
2789 condition = util.cachefunc(condition)
2592 return filteredset(self, condition, condrepr)
2790 return filteredset(self, condition, condrepr)
2593
2791
2594 class baseset(abstractsmartset):
2792 class baseset(abstractsmartset):
2595 """Basic data structure that represents a revset and contains the basic
2793 """Basic data structure that represents a revset and contains the basic
2596 operation that it should be able to perform.
2794 operation that it should be able to perform.
2597
2795
2598 Every method in this class should be implemented by any smartset class.
2796 Every method in this class should be implemented by any smartset class.
2599 """
2797 """
2600 def __init__(self, data=(), datarepr=None, istopo=False):
2798 def __init__(self, data=(), datarepr=None, istopo=False):
2601 """
2799 """
2602 datarepr: a tuple of (format, obj, ...), a function or an object that
2800 datarepr: a tuple of (format, obj, ...), a function or an object that
2603 provides a printable representation of the given data.
2801 provides a printable representation of the given data.
2604 """
2802 """
2605 self._ascending = None
2803 self._ascending = None
2606 self._istopo = istopo
2804 self._istopo = istopo
2607 if not isinstance(data, list):
2805 if not isinstance(data, list):
2608 if isinstance(data, set):
2806 if isinstance(data, set):
2609 self._set = data
2807 self._set = data
2610 # set has no order we pick one for stability purpose
2808 # set has no order we pick one for stability purpose
2611 self._ascending = True
2809 self._ascending = True
2612 data = list(data)
2810 data = list(data)
2613 self._list = data
2811 self._list = data
2614 self._datarepr = datarepr
2812 self._datarepr = datarepr
2615
2813
2616 @util.propertycache
2814 @util.propertycache
2617 def _set(self):
2815 def _set(self):
2618 return set(self._list)
2816 return set(self._list)
2619
2817
2620 @util.propertycache
2818 @util.propertycache
2621 def _asclist(self):
2819 def _asclist(self):
2622 asclist = self._list[:]
2820 asclist = self._list[:]
2623 asclist.sort()
2821 asclist.sort()
2624 return asclist
2822 return asclist
2625
2823
2626 def __iter__(self):
2824 def __iter__(self):
2627 if self._ascending is None:
2825 if self._ascending is None:
2628 return iter(self._list)
2826 return iter(self._list)
2629 elif self._ascending:
2827 elif self._ascending:
2630 return iter(self._asclist)
2828 return iter(self._asclist)
2631 else:
2829 else:
2632 return reversed(self._asclist)
2830 return reversed(self._asclist)
2633
2831
2634 def fastasc(self):
2832 def fastasc(self):
2635 return iter(self._asclist)
2833 return iter(self._asclist)
2636
2834
2637 def fastdesc(self):
2835 def fastdesc(self):
2638 return reversed(self._asclist)
2836 return reversed(self._asclist)
2639
2837
2640 @util.propertycache
2838 @util.propertycache
2641 def __contains__(self):
2839 def __contains__(self):
2642 return self._set.__contains__
2840 return self._set.__contains__
2643
2841
2644 def __nonzero__(self):
2842 def __nonzero__(self):
2645 return bool(self._list)
2843 return bool(self._list)
2646
2844
2647 def sort(self, reverse=False):
2845 def sort(self, reverse=False):
2648 self._ascending = not bool(reverse)
2846 self._ascending = not bool(reverse)
2649 self._istopo = False
2847 self._istopo = False
2650
2848
2651 def reverse(self):
2849 def reverse(self):
2652 if self._ascending is None:
2850 if self._ascending is None:
2653 self._list.reverse()
2851 self._list.reverse()
2654 else:
2852 else:
2655 self._ascending = not self._ascending
2853 self._ascending = not self._ascending
2656 self._istopo = False
2854 self._istopo = False
2657
2855
2658 def __len__(self):
2856 def __len__(self):
2659 return len(self._list)
2857 return len(self._list)
2660
2858
2661 def isascending(self):
2859 def isascending(self):
2662 """Returns True if the collection is ascending order, False if not.
2860 """Returns True if the collection is ascending order, False if not.
2663
2861
2664 This is part of the mandatory API for smartset."""
2862 This is part of the mandatory API for smartset."""
2665 if len(self) <= 1:
2863 if len(self) <= 1:
2666 return True
2864 return True
2667 return self._ascending is not None and self._ascending
2865 return self._ascending is not None and self._ascending
2668
2866
2669 def isdescending(self):
2867 def isdescending(self):
2670 """Returns True if the collection is descending order, False if not.
2868 """Returns True if the collection is descending order, False if not.
2671
2869
2672 This is part of the mandatory API for smartset."""
2870 This is part of the mandatory API for smartset."""
2673 if len(self) <= 1:
2871 if len(self) <= 1:
2674 return True
2872 return True
2675 return self._ascending is not None and not self._ascending
2873 return self._ascending is not None and not self._ascending
2676
2874
2677 def istopo(self):
2875 def istopo(self):
2678 """Is the collection is in topographical order or not.
2876 """Is the collection is in topographical order or not.
2679
2877
2680 This is part of the mandatory API for smartset."""
2878 This is part of the mandatory API for smartset."""
2681 if len(self) <= 1:
2879 if len(self) <= 1:
2682 return True
2880 return True
2683 return self._istopo
2881 return self._istopo
2684
2882
2685 def first(self):
2883 def first(self):
2686 if self:
2884 if self:
2687 if self._ascending is None:
2885 if self._ascending is None:
2688 return self._list[0]
2886 return self._list[0]
2689 elif self._ascending:
2887 elif self._ascending:
2690 return self._asclist[0]
2888 return self._asclist[0]
2691 else:
2889 else:
2692 return self._asclist[-1]
2890 return self._asclist[-1]
2693 return None
2891 return None
2694
2892
2695 def last(self):
2893 def last(self):
2696 if self:
2894 if self:
2697 if self._ascending is None:
2895 if self._ascending is None:
2698 return self._list[-1]
2896 return self._list[-1]
2699 elif self._ascending:
2897 elif self._ascending:
2700 return self._asclist[-1]
2898 return self._asclist[-1]
2701 else:
2899 else:
2702 return self._asclist[0]
2900 return self._asclist[0]
2703 return None
2901 return None
2704
2902
2705 def __repr__(self):
2903 def __repr__(self):
2706 d = {None: '', False: '-', True: '+'}[self._ascending]
2904 d = {None: '', False: '-', True: '+'}[self._ascending]
2707 s = _formatsetrepr(self._datarepr)
2905 s = _formatsetrepr(self._datarepr)
2708 if not s:
2906 if not s:
2709 l = self._list
2907 l = self._list
2710 # if _list has been built from a set, it might have a different
2908 # if _list has been built from a set, it might have a different
2711 # order from one python implementation to another.
2909 # order from one python implementation to another.
2712 # We fallback to the sorted version for a stable output.
2910 # We fallback to the sorted version for a stable output.
2713 if self._ascending is not None:
2911 if self._ascending is not None:
2714 l = self._asclist
2912 l = self._asclist
2715 s = repr(l)
2913 s = repr(l)
2716 return '<%s%s %s>' % (type(self).__name__, d, s)
2914 return '<%s%s %s>' % (type(self).__name__, d, s)
2717
2915
2718 class filteredset(abstractsmartset):
2916 class filteredset(abstractsmartset):
2719 """Duck type for baseset class which iterates lazily over the revisions in
2917 """Duck type for baseset class which iterates lazily over the revisions in
2720 the subset and contains a function which tests for membership in the
2918 the subset and contains a function which tests for membership in the
2721 revset
2919 revset
2722 """
2920 """
2723 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2921 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2724 """
2922 """
2725 condition: a function that decide whether a revision in the subset
2923 condition: a function that decide whether a revision in the subset
2726 belongs to the revset or not.
2924 belongs to the revset or not.
2727 condrepr: a tuple of (format, obj, ...), a function or an object that
2925 condrepr: a tuple of (format, obj, ...), a function or an object that
2728 provides a printable representation of the given condition.
2926 provides a printable representation of the given condition.
2729 """
2927 """
2730 self._subset = subset
2928 self._subset = subset
2731 self._condition = condition
2929 self._condition = condition
2732 self._condrepr = condrepr
2930 self._condrepr = condrepr
2733
2931
2734 def __contains__(self, x):
2932 def __contains__(self, x):
2735 return x in self._subset and self._condition(x)
2933 return x in self._subset and self._condition(x)
2736
2934
2737 def __iter__(self):
2935 def __iter__(self):
2738 return self._iterfilter(self._subset)
2936 return self._iterfilter(self._subset)
2739
2937
2740 def _iterfilter(self, it):
2938 def _iterfilter(self, it):
2741 cond = self._condition
2939 cond = self._condition
2742 for x in it:
2940 for x in it:
2743 if cond(x):
2941 if cond(x):
2744 yield x
2942 yield x
2745
2943
2746 @property
2944 @property
2747 def fastasc(self):
2945 def fastasc(self):
2748 it = self._subset.fastasc
2946 it = self._subset.fastasc
2749 if it is None:
2947 if it is None:
2750 return None
2948 return None
2751 return lambda: self._iterfilter(it())
2949 return lambda: self._iterfilter(it())
2752
2950
2753 @property
2951 @property
2754 def fastdesc(self):
2952 def fastdesc(self):
2755 it = self._subset.fastdesc
2953 it = self._subset.fastdesc
2756 if it is None:
2954 if it is None:
2757 return None
2955 return None
2758 return lambda: self._iterfilter(it())
2956 return lambda: self._iterfilter(it())
2759
2957
2760 def __nonzero__(self):
2958 def __nonzero__(self):
2761 fast = None
2959 fast = None
2762 candidates = [self.fastasc if self.isascending() else None,
2960 candidates = [self.fastasc if self.isascending() else None,
2763 self.fastdesc if self.isdescending() else None,
2961 self.fastdesc if self.isdescending() else None,
2764 self.fastasc,
2962 self.fastasc,
2765 self.fastdesc]
2963 self.fastdesc]
2766 for candidate in candidates:
2964 for candidate in candidates:
2767 if candidate is not None:
2965 if candidate is not None:
2768 fast = candidate
2966 fast = candidate
2769 break
2967 break
2770
2968
2771 if fast is not None:
2969 if fast is not None:
2772 it = fast()
2970 it = fast()
2773 else:
2971 else:
2774 it = self
2972 it = self
2775
2973
2776 for r in it:
2974 for r in it:
2777 return True
2975 return True
2778 return False
2976 return False
2779
2977
2780 def __len__(self):
2978 def __len__(self):
2781 # Basic implementation to be changed in future patches.
2979 # Basic implementation to be changed in future patches.
2782 # until this gets improved, we use generator expression
2980 # until this gets improved, we use generator expression
2783 # here, since list compr is free to call __len__ again
2981 # here, since list compr is free to call __len__ again
2784 # causing infinite recursion
2982 # causing infinite recursion
2785 l = baseset(r for r in self)
2983 l = baseset(r for r in self)
2786 return len(l)
2984 return len(l)
2787
2985
2788 def sort(self, reverse=False):
2986 def sort(self, reverse=False):
2789 self._subset.sort(reverse=reverse)
2987 self._subset.sort(reverse=reverse)
2790
2988
2791 def reverse(self):
2989 def reverse(self):
2792 self._subset.reverse()
2990 self._subset.reverse()
2793
2991
2794 def isascending(self):
2992 def isascending(self):
2795 return self._subset.isascending()
2993 return self._subset.isascending()
2796
2994
2797 def isdescending(self):
2995 def isdescending(self):
2798 return self._subset.isdescending()
2996 return self._subset.isdescending()
2799
2997
2800 def istopo(self):
2998 def istopo(self):
2801 return self._subset.istopo()
2999 return self._subset.istopo()
2802
3000
2803 def first(self):
3001 def first(self):
2804 for x in self:
3002 for x in self:
2805 return x
3003 return x
2806 return None
3004 return None
2807
3005
2808 def last(self):
3006 def last(self):
2809 it = None
3007 it = None
2810 if self.isascending():
3008 if self.isascending():
2811 it = self.fastdesc
3009 it = self.fastdesc
2812 elif self.isdescending():
3010 elif self.isdescending():
2813 it = self.fastasc
3011 it = self.fastasc
2814 if it is not None:
3012 if it is not None:
2815 for x in it():
3013 for x in it():
2816 return x
3014 return x
2817 return None #empty case
3015 return None #empty case
2818 else:
3016 else:
2819 x = None
3017 x = None
2820 for x in self:
3018 for x in self:
2821 pass
3019 pass
2822 return x
3020 return x
2823
3021
2824 def __repr__(self):
3022 def __repr__(self):
2825 xs = [repr(self._subset)]
3023 xs = [repr(self._subset)]
2826 s = _formatsetrepr(self._condrepr)
3024 s = _formatsetrepr(self._condrepr)
2827 if s:
3025 if s:
2828 xs.append(s)
3026 xs.append(s)
2829 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3027 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
2830
3028
2831 def _iterordered(ascending, iter1, iter2):
3029 def _iterordered(ascending, iter1, iter2):
2832 """produce an ordered iteration from two iterators with the same order
3030 """produce an ordered iteration from two iterators with the same order
2833
3031
2834 The ascending is used to indicated the iteration direction.
3032 The ascending is used to indicated the iteration direction.
2835 """
3033 """
2836 choice = max
3034 choice = max
2837 if ascending:
3035 if ascending:
2838 choice = min
3036 choice = min
2839
3037
2840 val1 = None
3038 val1 = None
2841 val2 = None
3039 val2 = None
2842 try:
3040 try:
2843 # Consume both iterators in an ordered way until one is empty
3041 # Consume both iterators in an ordered way until one is empty
2844 while True:
3042 while True:
2845 if val1 is None:
3043 if val1 is None:
2846 val1 = next(iter1)
3044 val1 = next(iter1)
2847 if val2 is None:
3045 if val2 is None:
2848 val2 = next(iter2)
3046 val2 = next(iter2)
2849 n = choice(val1, val2)
3047 n = choice(val1, val2)
2850 yield n
3048 yield n
2851 if val1 == n:
3049 if val1 == n:
2852 val1 = None
3050 val1 = None
2853 if val2 == n:
3051 if val2 == n:
2854 val2 = None
3052 val2 = None
2855 except StopIteration:
3053 except StopIteration:
2856 # Flush any remaining values and consume the other one
3054 # Flush any remaining values and consume the other one
2857 it = iter2
3055 it = iter2
2858 if val1 is not None:
3056 if val1 is not None:
2859 yield val1
3057 yield val1
2860 it = iter1
3058 it = iter1
2861 elif val2 is not None:
3059 elif val2 is not None:
2862 # might have been equality and both are empty
3060 # might have been equality and both are empty
2863 yield val2
3061 yield val2
2864 for val in it:
3062 for val in it:
2865 yield val
3063 yield val
2866
3064
2867 class addset(abstractsmartset):
3065 class addset(abstractsmartset):
2868 """Represent the addition of two sets
3066 """Represent the addition of two sets
2869
3067
2870 Wrapper structure for lazily adding two structures without losing much
3068 Wrapper structure for lazily adding two structures without losing much
2871 performance on the __contains__ method
3069 performance on the __contains__ method
2872
3070
2873 If the ascending attribute is set, that means the two structures are
3071 If the ascending attribute is set, that means the two structures are
2874 ordered in either an ascending or descending way. Therefore, we can add
3072 ordered in either an ascending or descending way. Therefore, we can add
2875 them maintaining the order by iterating over both at the same time
3073 them maintaining the order by iterating over both at the same time
2876
3074
2877 >>> xs = baseset([0, 3, 2])
3075 >>> xs = baseset([0, 3, 2])
2878 >>> ys = baseset([5, 2, 4])
3076 >>> ys = baseset([5, 2, 4])
2879
3077
2880 >>> rs = addset(xs, ys)
3078 >>> rs = addset(xs, ys)
2881 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3079 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
2882 (True, True, False, True, 0, 4)
3080 (True, True, False, True, 0, 4)
2883 >>> rs = addset(xs, baseset([]))
3081 >>> rs = addset(xs, baseset([]))
2884 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3082 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
2885 (True, True, False, 0, 2)
3083 (True, True, False, 0, 2)
2886 >>> rs = addset(baseset([]), baseset([]))
3084 >>> rs = addset(baseset([]), baseset([]))
2887 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3085 >>> bool(rs), 0 in rs, rs.first(), rs.last()
2888 (False, False, None, None)
3086 (False, False, None, None)
2889
3087
2890 iterate unsorted:
3088 iterate unsorted:
2891 >>> rs = addset(xs, ys)
3089 >>> rs = addset(xs, ys)
2892 >>> # (use generator because pypy could call len())
3090 >>> # (use generator because pypy could call len())
2893 >>> list(x for x in rs) # without _genlist
3091 >>> list(x for x in rs) # without _genlist
2894 [0, 3, 2, 5, 4]
3092 [0, 3, 2, 5, 4]
2895 >>> assert not rs._genlist
3093 >>> assert not rs._genlist
2896 >>> len(rs)
3094 >>> len(rs)
2897 5
3095 5
2898 >>> [x for x in rs] # with _genlist
3096 >>> [x for x in rs] # with _genlist
2899 [0, 3, 2, 5, 4]
3097 [0, 3, 2, 5, 4]
2900 >>> assert rs._genlist
3098 >>> assert rs._genlist
2901
3099
2902 iterate ascending:
3100 iterate ascending:
2903 >>> rs = addset(xs, ys, ascending=True)
3101 >>> rs = addset(xs, ys, ascending=True)
2904 >>> # (use generator because pypy could call len())
3102 >>> # (use generator because pypy could call len())
2905 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3103 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
2906 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3104 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
2907 >>> assert not rs._asclist
3105 >>> assert not rs._asclist
2908 >>> len(rs)
3106 >>> len(rs)
2909 5
3107 5
2910 >>> [x for x in rs], [x for x in rs.fastasc()]
3108 >>> [x for x in rs], [x for x in rs.fastasc()]
2911 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3109 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
2912 >>> assert rs._asclist
3110 >>> assert rs._asclist
2913
3111
2914 iterate descending:
3112 iterate descending:
2915 >>> rs = addset(xs, ys, ascending=False)
3113 >>> rs = addset(xs, ys, ascending=False)
2916 >>> # (use generator because pypy could call len())
3114 >>> # (use generator because pypy could call len())
2917 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3115 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
2918 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3116 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
2919 >>> assert not rs._asclist
3117 >>> assert not rs._asclist
2920 >>> len(rs)
3118 >>> len(rs)
2921 5
3119 5
2922 >>> [x for x in rs], [x for x in rs.fastdesc()]
3120 >>> [x for x in rs], [x for x in rs.fastdesc()]
2923 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3121 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
2924 >>> assert rs._asclist
3122 >>> assert rs._asclist
2925
3123
2926 iterate ascending without fastasc:
3124 iterate ascending without fastasc:
2927 >>> rs = addset(xs, generatorset(ys), ascending=True)
3125 >>> rs = addset(xs, generatorset(ys), ascending=True)
2928 >>> assert rs.fastasc is None
3126 >>> assert rs.fastasc is None
2929 >>> [x for x in rs]
3127 >>> [x for x in rs]
2930 [0, 2, 3, 4, 5]
3128 [0, 2, 3, 4, 5]
2931
3129
2932 iterate descending without fastdesc:
3130 iterate descending without fastdesc:
2933 >>> rs = addset(generatorset(xs), ys, ascending=False)
3131 >>> rs = addset(generatorset(xs), ys, ascending=False)
2934 >>> assert rs.fastdesc is None
3132 >>> assert rs.fastdesc is None
2935 >>> [x for x in rs]
3133 >>> [x for x in rs]
2936 [5, 4, 3, 2, 0]
3134 [5, 4, 3, 2, 0]
2937 """
3135 """
2938 def __init__(self, revs1, revs2, ascending=None):
3136 def __init__(self, revs1, revs2, ascending=None):
2939 self._r1 = revs1
3137 self._r1 = revs1
2940 self._r2 = revs2
3138 self._r2 = revs2
2941 self._iter = None
3139 self._iter = None
2942 self._ascending = ascending
3140 self._ascending = ascending
2943 self._genlist = None
3141 self._genlist = None
2944 self._asclist = None
3142 self._asclist = None
2945
3143
2946 def __len__(self):
3144 def __len__(self):
2947 return len(self._list)
3145 return len(self._list)
2948
3146
2949 def __nonzero__(self):
3147 def __nonzero__(self):
2950 return bool(self._r1) or bool(self._r2)
3148 return bool(self._r1) or bool(self._r2)
2951
3149
2952 @util.propertycache
3150 @util.propertycache
2953 def _list(self):
3151 def _list(self):
2954 if not self._genlist:
3152 if not self._genlist:
2955 self._genlist = baseset(iter(self))
3153 self._genlist = baseset(iter(self))
2956 return self._genlist
3154 return self._genlist
2957
3155
2958 def __iter__(self):
3156 def __iter__(self):
2959 """Iterate over both collections without repeating elements
3157 """Iterate over both collections without repeating elements
2960
3158
2961 If the ascending attribute is not set, iterate over the first one and
3159 If the ascending attribute is not set, iterate over the first one and
2962 then over the second one checking for membership on the first one so we
3160 then over the second one checking for membership on the first one so we
2963 dont yield any duplicates.
3161 dont yield any duplicates.
2964
3162
2965 If the ascending attribute is set, iterate over both collections at the
3163 If the ascending attribute is set, iterate over both collections at the
2966 same time, yielding only one value at a time in the given order.
3164 same time, yielding only one value at a time in the given order.
2967 """
3165 """
2968 if self._ascending is None:
3166 if self._ascending is None:
2969 if self._genlist:
3167 if self._genlist:
2970 return iter(self._genlist)
3168 return iter(self._genlist)
2971 def arbitraryordergen():
3169 def arbitraryordergen():
2972 for r in self._r1:
3170 for r in self._r1:
2973 yield r
3171 yield r
2974 inr1 = self._r1.__contains__
3172 inr1 = self._r1.__contains__
2975 for r in self._r2:
3173 for r in self._r2:
2976 if not inr1(r):
3174 if not inr1(r):
2977 yield r
3175 yield r
2978 return arbitraryordergen()
3176 return arbitraryordergen()
2979 # try to use our own fast iterator if it exists
3177 # try to use our own fast iterator if it exists
2980 self._trysetasclist()
3178 self._trysetasclist()
2981 if self._ascending:
3179 if self._ascending:
2982 attr = 'fastasc'
3180 attr = 'fastasc'
2983 else:
3181 else:
2984 attr = 'fastdesc'
3182 attr = 'fastdesc'
2985 it = getattr(self, attr)
3183 it = getattr(self, attr)
2986 if it is not None:
3184 if it is not None:
2987 return it()
3185 return it()
2988 # maybe half of the component supports fast
3186 # maybe half of the component supports fast
2989 # get iterator for _r1
3187 # get iterator for _r1
2990 iter1 = getattr(self._r1, attr)
3188 iter1 = getattr(self._r1, attr)
2991 if iter1 is None:
3189 if iter1 is None:
2992 # let's avoid side effect (not sure it matters)
3190 # let's avoid side effect (not sure it matters)
2993 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3191 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
2994 else:
3192 else:
2995 iter1 = iter1()
3193 iter1 = iter1()
2996 # get iterator for _r2
3194 # get iterator for _r2
2997 iter2 = getattr(self._r2, attr)
3195 iter2 = getattr(self._r2, attr)
2998 if iter2 is None:
3196 if iter2 is None:
2999 # let's avoid side effect (not sure it matters)
3197 # let's avoid side effect (not sure it matters)
3000 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3198 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3001 else:
3199 else:
3002 iter2 = iter2()
3200 iter2 = iter2()
3003 return _iterordered(self._ascending, iter1, iter2)
3201 return _iterordered(self._ascending, iter1, iter2)
3004
3202
3005 def _trysetasclist(self):
3203 def _trysetasclist(self):
3006 """populate the _asclist attribute if possible and necessary"""
3204 """populate the _asclist attribute if possible and necessary"""
3007 if self._genlist is not None and self._asclist is None:
3205 if self._genlist is not None and self._asclist is None:
3008 self._asclist = sorted(self._genlist)
3206 self._asclist = sorted(self._genlist)
3009
3207
3010 @property
3208 @property
3011 def fastasc(self):
3209 def fastasc(self):
3012 self._trysetasclist()
3210 self._trysetasclist()
3013 if self._asclist is not None:
3211 if self._asclist is not None:
3014 return self._asclist.__iter__
3212 return self._asclist.__iter__
3015 iter1 = self._r1.fastasc
3213 iter1 = self._r1.fastasc
3016 iter2 = self._r2.fastasc
3214 iter2 = self._r2.fastasc
3017 if None in (iter1, iter2):
3215 if None in (iter1, iter2):
3018 return None
3216 return None
3019 return lambda: _iterordered(True, iter1(), iter2())
3217 return lambda: _iterordered(True, iter1(), iter2())
3020
3218
3021 @property
3219 @property
3022 def fastdesc(self):
3220 def fastdesc(self):
3023 self._trysetasclist()
3221 self._trysetasclist()
3024 if self._asclist is not None:
3222 if self._asclist is not None:
3025 return self._asclist.__reversed__
3223 return self._asclist.__reversed__
3026 iter1 = self._r1.fastdesc
3224 iter1 = self._r1.fastdesc
3027 iter2 = self._r2.fastdesc
3225 iter2 = self._r2.fastdesc
3028 if None in (iter1, iter2):
3226 if None in (iter1, iter2):
3029 return None
3227 return None
3030 return lambda: _iterordered(False, iter1(), iter2())
3228 return lambda: _iterordered(False, iter1(), iter2())
3031
3229
3032 def __contains__(self, x):
3230 def __contains__(self, x):
3033 return x in self._r1 or x in self._r2
3231 return x in self._r1 or x in self._r2
3034
3232
3035 def sort(self, reverse=False):
3233 def sort(self, reverse=False):
3036 """Sort the added set
3234 """Sort the added set
3037
3235
3038 For this we use the cached list with all the generated values and if we
3236 For this we use the cached list with all the generated values and if we
3039 know they are ascending or descending we can sort them in a smart way.
3237 know they are ascending or descending we can sort them in a smart way.
3040 """
3238 """
3041 self._ascending = not reverse
3239 self._ascending = not reverse
3042
3240
3043 def isascending(self):
3241 def isascending(self):
3044 return self._ascending is not None and self._ascending
3242 return self._ascending is not None and self._ascending
3045
3243
3046 def isdescending(self):
3244 def isdescending(self):
3047 return self._ascending is not None and not self._ascending
3245 return self._ascending is not None and not self._ascending
3048
3246
3049 def istopo(self):
3247 def istopo(self):
3050 # not worth the trouble asserting if the two sets combined are still
3248 # not worth the trouble asserting if the two sets combined are still
3051 # in topographical order. Use the sort() predicate to explicitly sort
3249 # in topographical order. Use the sort() predicate to explicitly sort
3052 # again instead.
3250 # again instead.
3053 return False
3251 return False
3054
3252
3055 def reverse(self):
3253 def reverse(self):
3056 if self._ascending is None:
3254 if self._ascending is None:
3057 self._list.reverse()
3255 self._list.reverse()
3058 else:
3256 else:
3059 self._ascending = not self._ascending
3257 self._ascending = not self._ascending
3060
3258
3061 def first(self):
3259 def first(self):
3062 for x in self:
3260 for x in self:
3063 return x
3261 return x
3064 return None
3262 return None
3065
3263
3066 def last(self):
3264 def last(self):
3067 self.reverse()
3265 self.reverse()
3068 val = self.first()
3266 val = self.first()
3069 self.reverse()
3267 self.reverse()
3070 return val
3268 return val
3071
3269
3072 def __repr__(self):
3270 def __repr__(self):
3073 d = {None: '', False: '-', True: '+'}[self._ascending]
3271 d = {None: '', False: '-', True: '+'}[self._ascending]
3074 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3272 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3075
3273
3076 class generatorset(abstractsmartset):
3274 class generatorset(abstractsmartset):
3077 """Wrap a generator for lazy iteration
3275 """Wrap a generator for lazy iteration
3078
3276
3079 Wrapper structure for generators that provides lazy membership and can
3277 Wrapper structure for generators that provides lazy membership and can
3080 be iterated more than once.
3278 be iterated more than once.
3081 When asked for membership it generates values until either it finds the
3279 When asked for membership it generates values until either it finds the
3082 requested one or has gone through all the elements in the generator
3280 requested one or has gone through all the elements in the generator
3083 """
3281 """
3084 def __init__(self, gen, iterasc=None):
3282 def __init__(self, gen, iterasc=None):
3085 """
3283 """
3086 gen: a generator producing the values for the generatorset.
3284 gen: a generator producing the values for the generatorset.
3087 """
3285 """
3088 self._gen = gen
3286 self._gen = gen
3089 self._asclist = None
3287 self._asclist = None
3090 self._cache = {}
3288 self._cache = {}
3091 self._genlist = []
3289 self._genlist = []
3092 self._finished = False
3290 self._finished = False
3093 self._ascending = True
3291 self._ascending = True
3094 if iterasc is not None:
3292 if iterasc is not None:
3095 if iterasc:
3293 if iterasc:
3096 self.fastasc = self._iterator
3294 self.fastasc = self._iterator
3097 self.__contains__ = self._asccontains
3295 self.__contains__ = self._asccontains
3098 else:
3296 else:
3099 self.fastdesc = self._iterator
3297 self.fastdesc = self._iterator
3100 self.__contains__ = self._desccontains
3298 self.__contains__ = self._desccontains
3101
3299
3102 def __nonzero__(self):
3300 def __nonzero__(self):
3103 # Do not use 'for r in self' because it will enforce the iteration
3301 # Do not use 'for r in self' because it will enforce the iteration
3104 # order (default ascending), possibly unrolling a whole descending
3302 # order (default ascending), possibly unrolling a whole descending
3105 # iterator.
3303 # iterator.
3106 if self._genlist:
3304 if self._genlist:
3107 return True
3305 return True
3108 for r in self._consumegen():
3306 for r in self._consumegen():
3109 return True
3307 return True
3110 return False
3308 return False
3111
3309
3112 def __contains__(self, x):
3310 def __contains__(self, x):
3113 if x in self._cache:
3311 if x in self._cache:
3114 return self._cache[x]
3312 return self._cache[x]
3115
3313
3116 # Use new values only, as existing values would be cached.
3314 # Use new values only, as existing values would be cached.
3117 for l in self._consumegen():
3315 for l in self._consumegen():
3118 if l == x:
3316 if l == x:
3119 return True
3317 return True
3120
3318
3121 self._cache[x] = False
3319 self._cache[x] = False
3122 return False
3320 return False
3123
3321
3124 def _asccontains(self, x):
3322 def _asccontains(self, x):
3125 """version of contains optimised for ascending generator"""
3323 """version of contains optimised for ascending generator"""
3126 if x in self._cache:
3324 if x in self._cache:
3127 return self._cache[x]
3325 return self._cache[x]
3128
3326
3129 # Use new values only, as existing values would be cached.
3327 # Use new values only, as existing values would be cached.
3130 for l in self._consumegen():
3328 for l in self._consumegen():
3131 if l == x:
3329 if l == x:
3132 return True
3330 return True
3133 if l > x:
3331 if l > x:
3134 break
3332 break
3135
3333
3136 self._cache[x] = False
3334 self._cache[x] = False
3137 return False
3335 return False
3138
3336
3139 def _desccontains(self, x):
3337 def _desccontains(self, x):
3140 """version of contains optimised for descending generator"""
3338 """version of contains optimised for descending generator"""
3141 if x in self._cache:
3339 if x in self._cache:
3142 return self._cache[x]
3340 return self._cache[x]
3143
3341
3144 # Use new values only, as existing values would be cached.
3342 # Use new values only, as existing values would be cached.
3145 for l in self._consumegen():
3343 for l in self._consumegen():
3146 if l == x:
3344 if l == x:
3147 return True
3345 return True
3148 if l < x:
3346 if l < x:
3149 break
3347 break
3150
3348
3151 self._cache[x] = False
3349 self._cache[x] = False
3152 return False
3350 return False
3153
3351
3154 def __iter__(self):
3352 def __iter__(self):
3155 if self._ascending:
3353 if self._ascending:
3156 it = self.fastasc
3354 it = self.fastasc
3157 else:
3355 else:
3158 it = self.fastdesc
3356 it = self.fastdesc
3159 if it is not None:
3357 if it is not None:
3160 return it()
3358 return it()
3161 # we need to consume the iterator
3359 # we need to consume the iterator
3162 for x in self._consumegen():
3360 for x in self._consumegen():
3163 pass
3361 pass
3164 # recall the same code
3362 # recall the same code
3165 return iter(self)
3363 return iter(self)
3166
3364
3167 def _iterator(self):
3365 def _iterator(self):
3168 if self._finished:
3366 if self._finished:
3169 return iter(self._genlist)
3367 return iter(self._genlist)
3170
3368
3171 # We have to use this complex iteration strategy to allow multiple
3369 # We have to use this complex iteration strategy to allow multiple
3172 # iterations at the same time. We need to be able to catch revision
3370 # iterations at the same time. We need to be able to catch revision
3173 # removed from _consumegen and added to genlist in another instance.
3371 # removed from _consumegen and added to genlist in another instance.
3174 #
3372 #
3175 # Getting rid of it would provide an about 15% speed up on this
3373 # Getting rid of it would provide an about 15% speed up on this
3176 # iteration.
3374 # iteration.
3177 genlist = self._genlist
3375 genlist = self._genlist
3178 nextrev = self._consumegen().next
3376 nextrev = self._consumegen().next
3179 _len = len # cache global lookup
3377 _len = len # cache global lookup
3180 def gen():
3378 def gen():
3181 i = 0
3379 i = 0
3182 while True:
3380 while True:
3183 if i < _len(genlist):
3381 if i < _len(genlist):
3184 yield genlist[i]
3382 yield genlist[i]
3185 else:
3383 else:
3186 yield nextrev()
3384 yield nextrev()
3187 i += 1
3385 i += 1
3188 return gen()
3386 return gen()
3189
3387
3190 def _consumegen(self):
3388 def _consumegen(self):
3191 cache = self._cache
3389 cache = self._cache
3192 genlist = self._genlist.append
3390 genlist = self._genlist.append
3193 for item in self._gen:
3391 for item in self._gen:
3194 cache[item] = True
3392 cache[item] = True
3195 genlist(item)
3393 genlist(item)
3196 yield item
3394 yield item
3197 if not self._finished:
3395 if not self._finished:
3198 self._finished = True
3396 self._finished = True
3199 asc = self._genlist[:]
3397 asc = self._genlist[:]
3200 asc.sort()
3398 asc.sort()
3201 self._asclist = asc
3399 self._asclist = asc
3202 self.fastasc = asc.__iter__
3400 self.fastasc = asc.__iter__
3203 self.fastdesc = asc.__reversed__
3401 self.fastdesc = asc.__reversed__
3204
3402
3205 def __len__(self):
3403 def __len__(self):
3206 for x in self._consumegen():
3404 for x in self._consumegen():
3207 pass
3405 pass
3208 return len(self._genlist)
3406 return len(self._genlist)
3209
3407
3210 def sort(self, reverse=False):
3408 def sort(self, reverse=False):
3211 self._ascending = not reverse
3409 self._ascending = not reverse
3212
3410
3213 def reverse(self):
3411 def reverse(self):
3214 self._ascending = not self._ascending
3412 self._ascending = not self._ascending
3215
3413
3216 def isascending(self):
3414 def isascending(self):
3217 return self._ascending
3415 return self._ascending
3218
3416
3219 def isdescending(self):
3417 def isdescending(self):
3220 return not self._ascending
3418 return not self._ascending
3221
3419
3222 def istopo(self):
3420 def istopo(self):
3223 # not worth the trouble asserting if the two sets combined are still
3421 # not worth the trouble asserting if the two sets combined are still
3224 # in topographical order. Use the sort() predicate to explicitly sort
3422 # in topographical order. Use the sort() predicate to explicitly sort
3225 # again instead.
3423 # again instead.
3226 return False
3424 return False
3227
3425
3228 def first(self):
3426 def first(self):
3229 if self._ascending:
3427 if self._ascending:
3230 it = self.fastasc
3428 it = self.fastasc
3231 else:
3429 else:
3232 it = self.fastdesc
3430 it = self.fastdesc
3233 if it is None:
3431 if it is None:
3234 # we need to consume all and try again
3432 # we need to consume all and try again
3235 for x in self._consumegen():
3433 for x in self._consumegen():
3236 pass
3434 pass
3237 return self.first()
3435 return self.first()
3238 return next(it(), None)
3436 return next(it(), None)
3239
3437
3240 def last(self):
3438 def last(self):
3241 if self._ascending:
3439 if self._ascending:
3242 it = self.fastdesc
3440 it = self.fastdesc
3243 else:
3441 else:
3244 it = self.fastasc
3442 it = self.fastasc
3245 if it is None:
3443 if it is None:
3246 # we need to consume all and try again
3444 # we need to consume all and try again
3247 for x in self._consumegen():
3445 for x in self._consumegen():
3248 pass
3446 pass
3249 return self.first()
3447 return self.first()
3250 return next(it(), None)
3448 return next(it(), None)
3251
3449
3252 def __repr__(self):
3450 def __repr__(self):
3253 d = {False: '-', True: '+'}[self._ascending]
3451 d = {False: '-', True: '+'}[self._ascending]
3254 return '<%s%s>' % (type(self).__name__, d)
3452 return '<%s%s>' % (type(self).__name__, d)
3255
3453
3256 class spanset(abstractsmartset):
3454 class spanset(abstractsmartset):
3257 """Duck type for baseset class which represents a range of revisions and
3455 """Duck type for baseset class which represents a range of revisions and
3258 can work lazily and without having all the range in memory
3456 can work lazily and without having all the range in memory
3259
3457
3260 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3458 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3261 notable points:
3459 notable points:
3262 - when x < y it will be automatically descending,
3460 - when x < y it will be automatically descending,
3263 - revision filtered with this repoview will be skipped.
3461 - revision filtered with this repoview will be skipped.
3264
3462
3265 """
3463 """
3266 def __init__(self, repo, start=0, end=None):
3464 def __init__(self, repo, start=0, end=None):
3267 """
3465 """
3268 start: first revision included the set
3466 start: first revision included the set
3269 (default to 0)
3467 (default to 0)
3270 end: first revision excluded (last+1)
3468 end: first revision excluded (last+1)
3271 (default to len(repo)
3469 (default to len(repo)
3272
3470
3273 Spanset will be descending if `end` < `start`.
3471 Spanset will be descending if `end` < `start`.
3274 """
3472 """
3275 if end is None:
3473 if end is None:
3276 end = len(repo)
3474 end = len(repo)
3277 self._ascending = start <= end
3475 self._ascending = start <= end
3278 if not self._ascending:
3476 if not self._ascending:
3279 start, end = end + 1, start +1
3477 start, end = end + 1, start +1
3280 self._start = start
3478 self._start = start
3281 self._end = end
3479 self._end = end
3282 self._hiddenrevs = repo.changelog.filteredrevs
3480 self._hiddenrevs = repo.changelog.filteredrevs
3283
3481
3284 def sort(self, reverse=False):
3482 def sort(self, reverse=False):
3285 self._ascending = not reverse
3483 self._ascending = not reverse
3286
3484
3287 def reverse(self):
3485 def reverse(self):
3288 self._ascending = not self._ascending
3486 self._ascending = not self._ascending
3289
3487
3290 def istopo(self):
3488 def istopo(self):
3291 # not worth the trouble asserting if the two sets combined are still
3489 # not worth the trouble asserting if the two sets combined are still
3292 # in topographical order. Use the sort() predicate to explicitly sort
3490 # in topographical order. Use the sort() predicate to explicitly sort
3293 # again instead.
3491 # again instead.
3294 return False
3492 return False
3295
3493
3296 def _iterfilter(self, iterrange):
3494 def _iterfilter(self, iterrange):
3297 s = self._hiddenrevs
3495 s = self._hiddenrevs
3298 for r in iterrange:
3496 for r in iterrange:
3299 if r not in s:
3497 if r not in s:
3300 yield r
3498 yield r
3301
3499
3302 def __iter__(self):
3500 def __iter__(self):
3303 if self._ascending:
3501 if self._ascending:
3304 return self.fastasc()
3502 return self.fastasc()
3305 else:
3503 else:
3306 return self.fastdesc()
3504 return self.fastdesc()
3307
3505
3308 def fastasc(self):
3506 def fastasc(self):
3309 iterrange = xrange(self._start, self._end)
3507 iterrange = xrange(self._start, self._end)
3310 if self._hiddenrevs:
3508 if self._hiddenrevs:
3311 return self._iterfilter(iterrange)
3509 return self._iterfilter(iterrange)
3312 return iter(iterrange)
3510 return iter(iterrange)
3313
3511
3314 def fastdesc(self):
3512 def fastdesc(self):
3315 iterrange = xrange(self._end - 1, self._start - 1, -1)
3513 iterrange = xrange(self._end - 1, self._start - 1, -1)
3316 if self._hiddenrevs:
3514 if self._hiddenrevs:
3317 return self._iterfilter(iterrange)
3515 return self._iterfilter(iterrange)
3318 return iter(iterrange)
3516 return iter(iterrange)
3319
3517
3320 def __contains__(self, rev):
3518 def __contains__(self, rev):
3321 hidden = self._hiddenrevs
3519 hidden = self._hiddenrevs
3322 return ((self._start <= rev < self._end)
3520 return ((self._start <= rev < self._end)
3323 and not (hidden and rev in hidden))
3521 and not (hidden and rev in hidden))
3324
3522
3325 def __nonzero__(self):
3523 def __nonzero__(self):
3326 for r in self:
3524 for r in self:
3327 return True
3525 return True
3328 return False
3526 return False
3329
3527
3330 def __len__(self):
3528 def __len__(self):
3331 if not self._hiddenrevs:
3529 if not self._hiddenrevs:
3332 return abs(self._end - self._start)
3530 return abs(self._end - self._start)
3333 else:
3531 else:
3334 count = 0
3532 count = 0
3335 start = self._start
3533 start = self._start
3336 end = self._end
3534 end = self._end
3337 for rev in self._hiddenrevs:
3535 for rev in self._hiddenrevs:
3338 if (end < rev <= start) or (start <= rev < end):
3536 if (end < rev <= start) or (start <= rev < end):
3339 count += 1
3537 count += 1
3340 return abs(self._end - self._start) - count
3538 return abs(self._end - self._start) - count
3341
3539
3342 def isascending(self):
3540 def isascending(self):
3343 return self._ascending
3541 return self._ascending
3344
3542
3345 def isdescending(self):
3543 def isdescending(self):
3346 return not self._ascending
3544 return not self._ascending
3347
3545
3348 def first(self):
3546 def first(self):
3349 if self._ascending:
3547 if self._ascending:
3350 it = self.fastasc
3548 it = self.fastasc
3351 else:
3549 else:
3352 it = self.fastdesc
3550 it = self.fastdesc
3353 for x in it():
3551 for x in it():
3354 return x
3552 return x
3355 return None
3553 return None
3356
3554
3357 def last(self):
3555 def last(self):
3358 if self._ascending:
3556 if self._ascending:
3359 it = self.fastdesc
3557 it = self.fastdesc
3360 else:
3558 else:
3361 it = self.fastasc
3559 it = self.fastasc
3362 for x in it():
3560 for x in it():
3363 return x
3561 return x
3364 return None
3562 return None
3365
3563
3366 def __repr__(self):
3564 def __repr__(self):
3367 d = {False: '-', True: '+'}[self._ascending]
3565 d = {False: '-', True: '+'}[self._ascending]
3368 return '<%s%s %d:%d>' % (type(self).__name__, d,
3566 return '<%s%s %d:%d>' % (type(self).__name__, d,
3369 self._start, self._end - 1)
3567 self._start, self._end - 1)
3370
3568
3371 class fullreposet(spanset):
3569 class fullreposet(spanset):
3372 """a set containing all revisions in the repo
3570 """a set containing all revisions in the repo
3373
3571
3374 This class exists to host special optimization and magic to handle virtual
3572 This class exists to host special optimization and magic to handle virtual
3375 revisions such as "null".
3573 revisions such as "null".
3376 """
3574 """
3377
3575
3378 def __init__(self, repo):
3576 def __init__(self, repo):
3379 super(fullreposet, self).__init__(repo)
3577 super(fullreposet, self).__init__(repo)
3380
3578
3381 def __and__(self, other):
3579 def __and__(self, other):
3382 """As self contains the whole repo, all of the other set should also be
3580 """As self contains the whole repo, all of the other set should also be
3383 in self. Therefore `self & other = other`.
3581 in self. Therefore `self & other = other`.
3384
3582
3385 This boldly assumes the other contains valid revs only.
3583 This boldly assumes the other contains valid revs only.
3386 """
3584 """
3387 # other not a smartset, make is so
3585 # other not a smartset, make is so
3388 if not util.safehasattr(other, 'isascending'):
3586 if not util.safehasattr(other, 'isascending'):
3389 # filter out hidden revision
3587 # filter out hidden revision
3390 # (this boldly assumes all smartset are pure)
3588 # (this boldly assumes all smartset are pure)
3391 #
3589 #
3392 # `other` was used with "&", let's assume this is a set like
3590 # `other` was used with "&", let's assume this is a set like
3393 # object.
3591 # object.
3394 other = baseset(other - self._hiddenrevs)
3592 other = baseset(other - self._hiddenrevs)
3395
3593
3396 # XXX As fullreposet is also used as bootstrap, this is wrong.
3594 # XXX As fullreposet is also used as bootstrap, this is wrong.
3397 #
3595 #
3398 # With a giveme312() revset returning [3,1,2], this makes
3596 # With a giveme312() revset returning [3,1,2], this makes
3399 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3597 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3400 # We cannot just drop it because other usage still need to sort it:
3598 # We cannot just drop it because other usage still need to sort it:
3401 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3599 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3402 #
3600 #
3403 # There is also some faulty revset implementations that rely on it
3601 # There is also some faulty revset implementations that rely on it
3404 # (eg: children as of its state in e8075329c5fb)
3602 # (eg: children as of its state in e8075329c5fb)
3405 #
3603 #
3406 # When we fix the two points above we can move this into the if clause
3604 # When we fix the two points above we can move this into the if clause
3407 other.sort(reverse=self.isdescending())
3605 other.sort(reverse=self.isdescending())
3408 return other
3606 return other
3409
3607
3410 def prettyformatset(revs):
3608 def prettyformatset(revs):
3411 lines = []
3609 lines = []
3412 rs = repr(revs)
3610 rs = repr(revs)
3413 p = 0
3611 p = 0
3414 while p < len(rs):
3612 while p < len(rs):
3415 q = rs.find('<', p + 1)
3613 q = rs.find('<', p + 1)
3416 if q < 0:
3614 if q < 0:
3417 q = len(rs)
3615 q = len(rs)
3418 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3616 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3419 assert l >= 0
3617 assert l >= 0
3420 lines.append((l, rs[p:q].rstrip()))
3618 lines.append((l, rs[p:q].rstrip()))
3421 p = q
3619 p = q
3422 return '\n'.join(' ' * l + s for l, s in lines)
3620 return '\n'.join(' ' * l + s for l, s in lines)
3423
3621
3424 def loadpredicate(ui, extname, registrarobj):
3622 def loadpredicate(ui, extname, registrarobj):
3425 """Load revset predicates from specified registrarobj
3623 """Load revset predicates from specified registrarobj
3426 """
3624 """
3427 for name, func in registrarobj._table.iteritems():
3625 for name, func in registrarobj._table.iteritems():
3428 symbols[name] = func
3626 symbols[name] = func
3429 if func._safe:
3627 if func._safe:
3430 safesymbols.add(name)
3628 safesymbols.add(name)
3431
3629
3432 # load built-in predicates explicitly to setup safesymbols
3630 # load built-in predicates explicitly to setup safesymbols
3433 loadpredicate(None, None, predicate)
3631 loadpredicate(None, None, predicate)
3434
3632
3435 # tell hggettext to extract docstrings from these functions:
3633 # tell hggettext to extract docstrings from these functions:
3436 i18nfunctions = symbols.values()
3634 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now