##// END OF EJS Templates
revset: add new topographical sort...
Martijn Pieters -
r29348:2188f170 default
parent child Browse files
Show More
@@ -1,482 +1,472
1 1 # Revision graph generator for Mercurial
2 2 #
3 3 # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
4 4 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """supports walking the history as DAGs suitable for graphical output
10 10
11 11 The most basic format we use is that of::
12 12
13 13 (id, type, data, [parentids])
14 14
15 15 The node and parent ids are arbitrary integers which identify a node in the
16 16 context of the graph returned. Type is a constant specifying the node type.
17 17 Data depends on type.
18 18 """
19 19
20 20 from __future__ import absolute_import
21 21
22 22 from .node import nullrev
23 23 from . import (
24 24 revset,
25 25 util,
26 26 )
27 27
28 28 CHANGESET = 'C'
29 29 PARENT = 'P'
30 30 GRANDPARENT = 'G'
31 31 MISSINGPARENT = 'M'
32 32 # Style of line to draw. None signals a line that ends and is removed at this
33 33 # point. A number prefix means only the last N characters of the current block
34 34 # will use that style, the rest will use the PARENT style. Add a - sign
35 35 # (so making N negative) and all but the first N characters use that style.
36 36 EDGES = {PARENT: '|', GRANDPARENT: ':', MISSINGPARENT: None}
37 37
38 38 def dagwalker(repo, revs):
39 39 """cset DAG generator yielding (id, CHANGESET, ctx, [parentinfo]) tuples
40 40
41 41 This generator function walks through revisions (which should be ordered
42 42 from bigger to lower). It returns a tuple for each node.
43 43
44 44 Each parentinfo entry is a tuple with (edgetype, parentid), where edgetype
45 45 is one of PARENT, GRANDPARENT or MISSINGPARENT. The node and parent ids
46 46 are arbitrary integers which identify a node in the context of the graph
47 47 returned.
48 48
49 49 """
50 50 if not revs:
51 51 return
52 52
53 53 gpcache = {}
54 54
55 if repo.ui.configbool('experimental', 'graph-group-branches', False):
56 firstbranch = ()
57 firstbranchrevset = repo.ui.config(
58 'experimental', 'graph-group-branches.firstbranch', '')
59 if firstbranchrevset:
60 firstbranch = repo.revs(firstbranchrevset)
61 parentrevs = repo.changelog.parentrevs
62 revs = revset.groupbranchiter(revs, parentrevs, firstbranch)
63 revs = revset.baseset(revs)
64
65 55 for rev in revs:
66 56 ctx = repo[rev]
67 57 # partition into parents in the rev set and missing parents, then
68 58 # augment the lists with markers, to inform graph drawing code about
69 59 # what kind of edge to draw between nodes.
70 60 pset = set(p.rev() for p in ctx.parents() if p.rev() in revs)
71 61 mpars = [p.rev() for p in ctx.parents()
72 62 if p.rev() != nullrev and p.rev() not in pset]
73 63 parents = [(PARENT, p) for p in sorted(pset)]
74 64
75 65 for mpar in mpars:
76 66 gp = gpcache.get(mpar)
77 67 if gp is None:
78 68 # precompute slow query as we know reachableroots() goes
79 69 # through all revs (issue4782)
80 70 if not isinstance(revs, revset.baseset):
81 71 revs = revset.baseset(revs)
82 72 gp = gpcache[mpar] = sorted(set(revset.reachableroots(
83 73 repo, revs, [mpar])))
84 74 if not gp:
85 75 parents.append((MISSINGPARENT, mpar))
86 76 pset.add(mpar)
87 77 else:
88 78 parents.extend((GRANDPARENT, g) for g in gp if g not in pset)
89 79 pset.update(gp)
90 80
91 81 yield (ctx.rev(), CHANGESET, ctx, parents)
92 82
93 83 def nodes(repo, nodes):
94 84 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
95 85
96 86 This generator function walks the given nodes. It only returns parents
97 87 that are in nodes, too.
98 88 """
99 89 include = set(nodes)
100 90 for node in nodes:
101 91 ctx = repo[node]
102 92 parents = set((PARENT, p.rev()) for p in ctx.parents()
103 93 if p.node() in include)
104 94 yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
105 95
106 96 def colored(dag, repo):
107 97 """annotates a DAG with colored edge information
108 98
109 99 For each DAG node this function emits tuples::
110 100
111 101 (id, type, data, (col, color), [(col, nextcol, color)])
112 102
113 103 with the following new elements:
114 104
115 105 - Tuple (col, color) with column and color index for the current node
116 106 - A list of tuples indicating the edges between the current node and its
117 107 parents.
118 108 """
119 109 seen = []
120 110 colors = {}
121 111 newcolor = 1
122 112 config = {}
123 113
124 114 for key, val in repo.ui.configitems('graph'):
125 115 if '.' in key:
126 116 branch, setting = key.rsplit('.', 1)
127 117 # Validation
128 118 if setting == "width" and val.isdigit():
129 119 config.setdefault(branch, {})[setting] = int(val)
130 120 elif setting == "color" and val.isalnum():
131 121 config.setdefault(branch, {})[setting] = val
132 122
133 123 if config:
134 124 getconf = util.lrucachefunc(
135 125 lambda rev: config.get(repo[rev].branch(), {}))
136 126 else:
137 127 getconf = lambda rev: {}
138 128
139 129 for (cur, type, data, parents) in dag:
140 130
141 131 # Compute seen and next
142 132 if cur not in seen:
143 133 seen.append(cur) # new head
144 134 colors[cur] = newcolor
145 135 newcolor += 1
146 136
147 137 col = seen.index(cur)
148 138 color = colors.pop(cur)
149 139 next = seen[:]
150 140
151 141 # Add parents to next
152 142 addparents = [p for pt, p in parents if p not in next]
153 143 next[col:col + 1] = addparents
154 144
155 145 # Set colors for the parents
156 146 for i, p in enumerate(addparents):
157 147 if not i:
158 148 colors[p] = color
159 149 else:
160 150 colors[p] = newcolor
161 151 newcolor += 1
162 152
163 153 # Add edges to the graph
164 154 edges = []
165 155 for ecol, eid in enumerate(seen):
166 156 if eid in next:
167 157 bconf = getconf(eid)
168 158 edges.append((
169 159 ecol, next.index(eid), colors[eid],
170 160 bconf.get('width', -1),
171 161 bconf.get('color', '')))
172 162 elif eid == cur:
173 163 for ptype, p in parents:
174 164 bconf = getconf(p)
175 165 edges.append((
176 166 ecol, next.index(p), color,
177 167 bconf.get('width', -1),
178 168 bconf.get('color', '')))
179 169
180 170 # Yield and move on
181 171 yield (cur, type, data, (col, color), edges)
182 172 seen = next
183 173
184 174 def asciiedges(type, char, lines, state, rev, parents):
185 175 """adds edge info to changelog DAG walk suitable for ascii()"""
186 176 seen = state['seen']
187 177 if rev not in seen:
188 178 seen.append(rev)
189 179 nodeidx = seen.index(rev)
190 180
191 181 knownparents = []
192 182 newparents = []
193 183 for ptype, parent in parents:
194 184 if parent in seen:
195 185 knownparents.append(parent)
196 186 else:
197 187 newparents.append(parent)
198 188 state['edges'][parent] = state['styles'].get(ptype, '|')
199 189
200 190 ncols = len(seen)
201 191 nextseen = seen[:]
202 192 nextseen[nodeidx:nodeidx + 1] = newparents
203 193 edges = [(nodeidx, nextseen.index(p))
204 194 for p in knownparents if p != nullrev]
205 195
206 196 seen[:] = nextseen
207 197 while len(newparents) > 2:
208 198 # ascii() only knows how to add or remove a single column between two
209 199 # calls. Nodes with more than two parents break this constraint so we
210 200 # introduce intermediate expansion lines to grow the active node list
211 201 # slowly.
212 202 edges.append((nodeidx, nodeidx))
213 203 edges.append((nodeidx, nodeidx + 1))
214 204 nmorecols = 1
215 205 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
216 206 char = '\\'
217 207 lines = []
218 208 nodeidx += 1
219 209 ncols += 1
220 210 edges = []
221 211 del newparents[0]
222 212
223 213 if len(newparents) > 0:
224 214 edges.append((nodeidx, nodeidx))
225 215 if len(newparents) > 1:
226 216 edges.append((nodeidx, nodeidx + 1))
227 217 nmorecols = len(nextseen) - ncols
228 218 # remove current node from edge characters, no longer needed
229 219 state['edges'].pop(rev, None)
230 220 yield (type, char, lines, (nodeidx, edges, ncols, nmorecols))
231 221
232 222 def _fixlongrightedges(edges):
233 223 for (i, (start, end)) in enumerate(edges):
234 224 if end > start:
235 225 edges[i] = (start, end + 1)
236 226
237 227 def _getnodelineedgestail(
238 228 echars, idx, pidx, ncols, coldiff, pdiff, fix_tail):
239 229 if fix_tail and coldiff == pdiff and coldiff != 0:
240 230 # Still going in the same non-vertical direction.
241 231 if coldiff == -1:
242 232 start = max(idx + 1, pidx)
243 233 tail = echars[idx * 2:(start - 1) * 2]
244 234 tail.extend(["/", " "] * (ncols - start))
245 235 return tail
246 236 else:
247 237 return ["\\", " "] * (ncols - idx - 1)
248 238 else:
249 239 remainder = (ncols - idx - 1)
250 240 return echars[-(remainder * 2):] if remainder > 0 else []
251 241
252 242 def _drawedges(echars, edges, nodeline, interline):
253 243 for (start, end) in edges:
254 244 if start == end + 1:
255 245 interline[2 * end + 1] = "/"
256 246 elif start == end - 1:
257 247 interline[2 * start + 1] = "\\"
258 248 elif start == end:
259 249 interline[2 * start] = echars[2 * start]
260 250 else:
261 251 if 2 * end >= len(nodeline):
262 252 continue
263 253 nodeline[2 * end] = "+"
264 254 if start > end:
265 255 (start, end) = (end, start)
266 256 for i in range(2 * start + 1, 2 * end):
267 257 if nodeline[i] != "+":
268 258 nodeline[i] = "-"
269 259
270 260 def _getpaddingline(echars, idx, ncols, edges):
271 261 # all edges up to the current node
272 262 line = echars[:idx * 2]
273 263 # an edge for the current node, if there is one
274 264 if (idx, idx - 1) in edges or (idx, idx) in edges:
275 265 # (idx, idx - 1) (idx, idx)
276 266 # | | | | | | | |
277 267 # +---o | | o---+
278 268 # | | X | | X | |
279 269 # | |/ / | |/ /
280 270 # | | | | | |
281 271 line.extend(echars[idx * 2:(idx + 1) * 2])
282 272 else:
283 273 line.extend(' ')
284 274 # all edges to the right of the current node
285 275 remainder = ncols - idx - 1
286 276 if remainder > 0:
287 277 line.extend(echars[-(remainder * 2):])
288 278 return line
289 279
290 280 def _drawendinglines(lines, extra, edgemap, seen):
291 281 """Draw ending lines for missing parent edges
292 282
293 283 None indicates an edge that ends at between this node and the next
294 284 Replace with a short line ending in ~ and add / lines to any edges to
295 285 the right.
296 286
297 287 """
298 288 if None not in edgemap.values():
299 289 return
300 290
301 291 # Check for more edges to the right of our ending edges.
302 292 # We need enough space to draw adjustment lines for these.
303 293 edgechars = extra[::2]
304 294 while edgechars and edgechars[-1] is None:
305 295 edgechars.pop()
306 296 shift_size = max((edgechars.count(None) * 2) - 1, 0)
307 297 while len(lines) < 3 + shift_size:
308 298 lines.append(extra[:])
309 299
310 300 if shift_size:
311 301 empties = []
312 302 toshift = []
313 303 first_empty = extra.index(None)
314 304 for i, c in enumerate(extra[first_empty::2], first_empty // 2):
315 305 if c is None:
316 306 empties.append(i * 2)
317 307 else:
318 308 toshift.append(i * 2)
319 309 targets = list(range(first_empty, first_empty + len(toshift) * 2, 2))
320 310 positions = toshift[:]
321 311 for line in lines[-shift_size:]:
322 312 line[first_empty:] = [' '] * (len(line) - first_empty)
323 313 for i in range(len(positions)):
324 314 pos = positions[i] - 1
325 315 positions[i] = max(pos, targets[i])
326 316 line[pos] = '/' if pos > targets[i] else extra[toshift[i]]
327 317
328 318 map = {1: '|', 2: '~'}
329 319 for i, line in enumerate(lines):
330 320 if None not in line:
331 321 continue
332 322 line[:] = [c or map.get(i, ' ') for c in line]
333 323
334 324 # remove edges that ended
335 325 remove = [p for p, c in edgemap.items() if c is None]
336 326 for parent in remove:
337 327 del edgemap[parent]
338 328 seen.remove(parent)
339 329
340 330 def asciistate():
341 331 """returns the initial value for the "state" argument to ascii()"""
342 332 return {
343 333 'seen': [],
344 334 'edges': {},
345 335 'lastcoldiff': 0,
346 336 'lastindex': 0,
347 337 'styles': EDGES.copy(),
348 338 'graphshorten': False,
349 339 }
350 340
351 341 def ascii(ui, state, type, char, text, coldata):
352 342 """prints an ASCII graph of the DAG
353 343
354 344 takes the following arguments (one call per node in the graph):
355 345
356 346 - ui to write to
357 347 - Somewhere to keep the needed state in (init to asciistate())
358 348 - Column of the current node in the set of ongoing edges.
359 349 - Type indicator of node data, usually 'C' for changesets.
360 350 - Payload: (char, lines):
361 351 - Character to use as node's symbol.
362 352 - List of lines to display as the node's text.
363 353 - Edges; a list of (col, next_col) indicating the edges between
364 354 the current node and its parents.
365 355 - Number of columns (ongoing edges) in the current revision.
366 356 - The difference between the number of columns (ongoing edges)
367 357 in the next revision and the number of columns (ongoing edges)
368 358 in the current revision. That is: -1 means one column removed;
369 359 0 means no columns added or removed; 1 means one column added.
370 360 """
371 361 idx, edges, ncols, coldiff = coldata
372 362 assert -2 < coldiff < 2
373 363
374 364 edgemap, seen = state['edges'], state['seen']
375 365 # Be tolerant of history issues; make sure we have at least ncols + coldiff
376 366 # elements to work with. See test-glog.t for broken history test cases.
377 367 echars = [c for p in seen for c in (edgemap.get(p, '|'), ' ')]
378 368 echars.extend(('|', ' ') * max(ncols + coldiff - len(seen), 0))
379 369
380 370 if coldiff == -1:
381 371 # Transform
382 372 #
383 373 # | | | | | |
384 374 # o | | into o---+
385 375 # |X / |/ /
386 376 # | | | |
387 377 _fixlongrightedges(edges)
388 378
389 379 # add_padding_line says whether to rewrite
390 380 #
391 381 # | | | | | | | |
392 382 # | o---+ into | o---+
393 383 # | / / | | | # <--- padding line
394 384 # o | | | / /
395 385 # o | |
396 386 add_padding_line = (len(text) > 2 and coldiff == -1 and
397 387 [x for (x, y) in edges if x + 1 < y])
398 388
399 389 # fix_nodeline_tail says whether to rewrite
400 390 #
401 391 # | | o | | | | o | |
402 392 # | | |/ / | | |/ /
403 393 # | o | | into | o / / # <--- fixed nodeline tail
404 394 # | |/ / | |/ /
405 395 # o | | o | |
406 396 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
407 397
408 398 # nodeline is the line containing the node character (typically o)
409 399 nodeline = echars[:idx * 2]
410 400 nodeline.extend([char, " "])
411 401
412 402 nodeline.extend(
413 403 _getnodelineedgestail(
414 404 echars, idx, state['lastindex'], ncols, coldiff,
415 405 state['lastcoldiff'], fix_nodeline_tail))
416 406
417 407 # shift_interline is the line containing the non-vertical
418 408 # edges between this entry and the next
419 409 shift_interline = echars[:idx * 2]
420 410 shift_interline.extend(' ' * (2 + coldiff))
421 411 count = ncols - idx - 1
422 412 if coldiff == -1:
423 413 shift_interline.extend('/ ' * count)
424 414 elif coldiff == 0:
425 415 shift_interline.extend(echars[(idx + 1) * 2:ncols * 2])
426 416 else:
427 417 shift_interline.extend(r'\ ' * count)
428 418
429 419 # draw edges from the current node to its parents
430 420 _drawedges(echars, edges, nodeline, shift_interline)
431 421
432 422 # lines is the list of all graph lines to print
433 423 lines = [nodeline]
434 424 if add_padding_line:
435 425 lines.append(_getpaddingline(echars, idx, ncols, edges))
436 426
437 427 # If 'graphshorten' config, only draw shift_interline
438 428 # when there is any non vertical flow in graph.
439 429 if state['graphshorten']:
440 430 if any(c in '\/' for c in shift_interline if c):
441 431 lines.append(shift_interline)
442 432 # Else, no 'graphshorten' config so draw shift_interline.
443 433 else:
444 434 lines.append(shift_interline)
445 435
446 436 # make sure that there are as many graph lines as there are
447 437 # log strings
448 438 extra_interline = echars[:(ncols + coldiff) * 2]
449 439 if len(lines) < len(text):
450 440 while len(lines) < len(text):
451 441 lines.append(extra_interline[:])
452 442
453 443 _drawendinglines(lines, extra_interline, edgemap, seen)
454 444
455 445 while len(text) < len(lines):
456 446 text.append("")
457 447
458 448 if any(len(char) > 1 for char in edgemap.values()):
459 449 # limit drawing an edge to the first or last N lines of the current
460 450 # section the rest of the edge is drawn like a parent line.
461 451 parent = state['styles'][PARENT][-1]
462 452 def _drawgp(char, i):
463 453 # should a grandparent character be drawn for this line?
464 454 if len(char) < 2:
465 455 return True
466 456 num = int(char[:-1])
467 457 # either skip first num lines or take last num lines, based on sign
468 458 return -num <= i if num < 0 else (len(lines) - i) <= num
469 459 for i, line in enumerate(lines):
470 460 line[:] = [c[-1] if _drawgp(c, i) else parent for c in line]
471 461 edgemap.update(
472 462 (e, (c if len(c) < 2 else parent)) for e, c in edgemap.items())
473 463
474 464 # print lines
475 465 indentation_level = max(ncols, ncols + coldiff)
476 466 for (line, logstr) in zip(lines, text):
477 467 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
478 468 ui.write(ln.rstrip() + '\n')
479 469
480 470 # ... and start over
481 471 state['lastcoldiff'] = coldiff
482 472 state['lastindex'] = idx
@@ -1,3634 +1,3663
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 # helpers
304 304
305 305 def getstring(x, err):
306 306 if x and (x[0] == 'string' or x[0] == 'symbol'):
307 307 return x[1]
308 308 raise error.ParseError(err)
309 309
310 310 def getlist(x):
311 311 if not x:
312 312 return []
313 313 if x[0] == 'list':
314 314 return list(x[1:])
315 315 return [x]
316 316
317 317 def getargs(x, min, max, err):
318 318 l = getlist(x)
319 319 if len(l) < min or (max >= 0 and len(l) > max):
320 320 raise error.ParseError(err)
321 321 return l
322 322
323 323 def getargsdict(x, funcname, keys):
324 324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
325 325 keyvaluenode='keyvalue', keynode='symbol')
326 326
327 327 def getset(repo, subset, x):
328 328 if not x:
329 329 raise error.ParseError(_("missing argument"))
330 330 s = methods[x[0]](repo, subset, *x[1:])
331 331 if util.safehasattr(s, 'isascending'):
332 332 return s
333 333 # else case should not happen, because all non-func are internal,
334 334 # ignoring for now.
335 335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
336 336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
337 337 % x[1][1],
338 338 '3.9')
339 339 return baseset(s)
340 340
341 341 def _getrevsource(repo, r):
342 342 extra = repo[r].extra()
343 343 for label in ('source', 'transplant_source', 'rebase_source'):
344 344 if label in extra:
345 345 try:
346 346 return repo[extra[label]].rev()
347 347 except error.RepoLookupError:
348 348 pass
349 349 return None
350 350
351 351 # operator methods
352 352
353 353 def stringset(repo, subset, x):
354 354 x = repo[x].rev()
355 355 if (x in subset
356 356 or x == node.nullrev and isinstance(subset, fullreposet)):
357 357 return baseset([x])
358 358 return baseset()
359 359
360 360 def rangeset(repo, subset, x, y):
361 361 m = getset(repo, fullreposet(repo), x)
362 362 n = getset(repo, fullreposet(repo), y)
363 363
364 364 if not m or not n:
365 365 return baseset()
366 366 m, n = m.first(), n.last()
367 367
368 368 if m == n:
369 369 r = baseset([m])
370 370 elif n == node.wdirrev:
371 371 r = spanset(repo, m, len(repo)) + baseset([n])
372 372 elif m == node.wdirrev:
373 373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
374 374 elif m < n:
375 375 r = spanset(repo, m, n + 1)
376 376 else:
377 377 r = spanset(repo, m, n - 1)
378 378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
379 379 # necessary to ensure we preserve the order in subset.
380 380 #
381 381 # This has performance implication, carrying the sorting over when possible
382 382 # would be more efficient.
383 383 return r & subset
384 384
385 385 def dagrange(repo, subset, x, y):
386 386 r = fullreposet(repo)
387 387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
388 388 includepath=True)
389 389 return subset & xs
390 390
391 391 def andset(repo, subset, x, y):
392 392 return getset(repo, getset(repo, subset, x), y)
393 393
394 394 def differenceset(repo, subset, x, y):
395 395 return getset(repo, subset, x) - getset(repo, subset, y)
396 396
397 397 def orset(repo, subset, *xs):
398 398 assert xs
399 399 if len(xs) == 1:
400 400 return getset(repo, subset, xs[0])
401 401 p = len(xs) // 2
402 402 a = orset(repo, subset, *xs[:p])
403 403 b = orset(repo, subset, *xs[p:])
404 404 return a + b
405 405
406 406 def notset(repo, subset, x):
407 407 return subset - getset(repo, subset, x)
408 408
409 409 def listset(repo, subset, *xs):
410 410 raise error.ParseError(_("can't use a list in this context"),
411 411 hint=_('see hg help "revsets.x or y"'))
412 412
413 413 def keyvaluepair(repo, subset, k, v):
414 414 raise error.ParseError(_("can't use a key-value pair in this context"))
415 415
416 416 def func(repo, subset, a, b):
417 417 if a[0] == 'symbol' and a[1] in symbols:
418 418 return symbols[a[1]](repo, subset, b)
419 419
420 420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
421 421
422 422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
423 423 raise error.UnknownIdentifier(a[1], syms)
424 424
425 425 # functions
426 426
427 427 # symbols are callables like:
428 428 # fn(repo, subset, x)
429 429 # with:
430 430 # repo - current repository instance
431 431 # subset - of revisions to be examined
432 432 # x - argument in tree form
433 433 symbols = {}
434 434
435 435 # symbols which can't be used for a DoS attack for any given input
436 436 # (e.g. those which accept regexes as plain strings shouldn't be included)
437 437 # functions that just return a lot of changesets (like all) don't count here
438 438 safesymbols = set()
439 439
440 440 predicate = registrar.revsetpredicate()
441 441
442 442 @predicate('_destupdate')
443 443 def _destupdate(repo, subset, x):
444 444 # experimental revset for update destination
445 445 args = getargsdict(x, 'limit', 'clean check')
446 446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
447 447
448 448 @predicate('_destmerge')
449 449 def _destmerge(repo, subset, x):
450 450 # experimental revset for merge destination
451 451 sourceset = None
452 452 if x is not None:
453 453 sourceset = getset(repo, fullreposet(repo), x)
454 454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
455 455
456 456 @predicate('adds(pattern)', safe=True)
457 457 def adds(repo, subset, x):
458 458 """Changesets that add a file matching pattern.
459 459
460 460 The pattern without explicit kind like ``glob:`` is expected to be
461 461 relative to the current directory and match against a file or a
462 462 directory.
463 463 """
464 464 # i18n: "adds" is a keyword
465 465 pat = getstring(x, _("adds requires a pattern"))
466 466 return checkstatus(repo, subset, pat, 1)
467 467
468 468 @predicate('ancestor(*changeset)', safe=True)
469 469 def ancestor(repo, subset, x):
470 470 """A greatest common ancestor of the changesets.
471 471
472 472 Accepts 0 or more changesets.
473 473 Will return empty list when passed no args.
474 474 Greatest common ancestor of a single changeset is that changeset.
475 475 """
476 476 # i18n: "ancestor" is a keyword
477 477 l = getlist(x)
478 478 rl = fullreposet(repo)
479 479 anc = None
480 480
481 481 # (getset(repo, rl, i) for i in l) generates a list of lists
482 482 for revs in (getset(repo, rl, i) for i in l):
483 483 for r in revs:
484 484 if anc is None:
485 485 anc = repo[r]
486 486 else:
487 487 anc = anc.ancestor(repo[r])
488 488
489 489 if anc is not None and anc.rev() in subset:
490 490 return baseset([anc.rev()])
491 491 return baseset()
492 492
493 493 def _ancestors(repo, subset, x, followfirst=False):
494 494 heads = getset(repo, fullreposet(repo), x)
495 495 if not heads:
496 496 return baseset()
497 497 s = _revancestors(repo, heads, followfirst)
498 498 return subset & s
499 499
500 500 @predicate('ancestors(set)', safe=True)
501 501 def ancestors(repo, subset, x):
502 502 """Changesets that are ancestors of a changeset in set.
503 503 """
504 504 return _ancestors(repo, subset, x)
505 505
506 506 @predicate('_firstancestors', safe=True)
507 507 def _firstancestors(repo, subset, x):
508 508 # ``_firstancestors(set)``
509 509 # Like ``ancestors(set)`` but follows only the first parents.
510 510 return _ancestors(repo, subset, x, followfirst=True)
511 511
512 512 def ancestorspec(repo, subset, x, n):
513 513 """``set~n``
514 514 Changesets that are the Nth ancestor (first parents only) of a changeset
515 515 in set.
516 516 """
517 517 try:
518 518 n = int(n[1])
519 519 except (TypeError, ValueError):
520 520 raise error.ParseError(_("~ expects a number"))
521 521 ps = set()
522 522 cl = repo.changelog
523 523 for r in getset(repo, fullreposet(repo), x):
524 524 for i in range(n):
525 525 r = cl.parentrevs(r)[0]
526 526 ps.add(r)
527 527 return subset & ps
528 528
529 529 @predicate('author(string)', safe=True)
530 530 def author(repo, subset, x):
531 531 """Alias for ``user(string)``.
532 532 """
533 533 # i18n: "author" is a keyword
534 534 n = encoding.lower(getstring(x, _("author requires a string")))
535 535 kind, pattern, matcher = _substringmatcher(n)
536 536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
537 537 condrepr=('<user %r>', n))
538 538
539 539 @predicate('bisect(string)', safe=True)
540 540 def bisect(repo, subset, x):
541 541 """Changesets marked in the specified bisect status:
542 542
543 543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
544 544 - ``goods``, ``bads`` : csets topologically good/bad
545 545 - ``range`` : csets taking part in the bisection
546 546 - ``pruned`` : csets that are goods, bads or skipped
547 547 - ``untested`` : csets whose fate is yet unknown
548 548 - ``ignored`` : csets ignored due to DAG topology
549 549 - ``current`` : the cset currently being bisected
550 550 """
551 551 # i18n: "bisect" is a keyword
552 552 status = getstring(x, _("bisect requires a string")).lower()
553 553 state = set(hbisect.get(repo, status))
554 554 return subset & state
555 555
556 556 # Backward-compatibility
557 557 # - no help entry so that we do not advertise it any more
558 558 @predicate('bisected', safe=True)
559 559 def bisected(repo, subset, x):
560 560 return bisect(repo, subset, x)
561 561
562 562 @predicate('bookmark([name])', safe=True)
563 563 def bookmark(repo, subset, x):
564 564 """The named bookmark or all bookmarks.
565 565
566 566 If `name` starts with `re:`, the remainder of the name is treated as
567 567 a regular expression. To match a bookmark that actually starts with `re:`,
568 568 use the prefix `literal:`.
569 569 """
570 570 # i18n: "bookmark" is a keyword
571 571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
572 572 if args:
573 573 bm = getstring(args[0],
574 574 # i18n: "bookmark" is a keyword
575 575 _('the argument to bookmark must be a string'))
576 576 kind, pattern, matcher = util.stringmatcher(bm)
577 577 bms = set()
578 578 if kind == 'literal':
579 579 bmrev = repo._bookmarks.get(pattern, None)
580 580 if not bmrev:
581 581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
582 582 % pattern)
583 583 bms.add(repo[bmrev].rev())
584 584 else:
585 585 matchrevs = set()
586 586 for name, bmrev in repo._bookmarks.iteritems():
587 587 if matcher(name):
588 588 matchrevs.add(bmrev)
589 589 if not matchrevs:
590 590 raise error.RepoLookupError(_("no bookmarks exist"
591 591 " that match '%s'") % pattern)
592 592 for bmrev in matchrevs:
593 593 bms.add(repo[bmrev].rev())
594 594 else:
595 595 bms = set([repo[r].rev()
596 596 for r in repo._bookmarks.values()])
597 597 bms -= set([node.nullrev])
598 598 return subset & bms
599 599
600 600 @predicate('branch(string or set)', safe=True)
601 601 def branch(repo, subset, x):
602 602 """
603 603 All changesets belonging to the given branch or the branches of the given
604 604 changesets.
605 605
606 606 If `string` starts with `re:`, the remainder of the name is treated as
607 607 a regular expression. To match a branch that actually starts with `re:`,
608 608 use the prefix `literal:`.
609 609 """
610 610 getbi = repo.revbranchcache().branchinfo
611 611
612 612 try:
613 613 b = getstring(x, '')
614 614 except error.ParseError:
615 615 # not a string, but another revspec, e.g. tip()
616 616 pass
617 617 else:
618 618 kind, pattern, matcher = util.stringmatcher(b)
619 619 if kind == 'literal':
620 620 # note: falls through to the revspec case if no branch with
621 621 # this name exists and pattern kind is not specified explicitly
622 622 if pattern in repo.branchmap():
623 623 return subset.filter(lambda r: matcher(getbi(r)[0]),
624 624 condrepr=('<branch %r>', b))
625 625 if b.startswith('literal:'):
626 626 raise error.RepoLookupError(_("branch '%s' does not exist")
627 627 % pattern)
628 628 else:
629 629 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 630 condrepr=('<branch %r>', b))
631 631
632 632 s = getset(repo, fullreposet(repo), x)
633 633 b = set()
634 634 for r in s:
635 635 b.add(getbi(r)[0])
636 636 c = s.__contains__
637 637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
638 638 condrepr=lambda: '<branch %r>' % sorted(b))
639 639
640 640 @predicate('bumped()', safe=True)
641 641 def bumped(repo, subset, x):
642 642 """Mutable changesets marked as successors of public changesets.
643 643
644 644 Only non-public and non-obsolete changesets can be `bumped`.
645 645 """
646 646 # i18n: "bumped" is a keyword
647 647 getargs(x, 0, 0, _("bumped takes no arguments"))
648 648 bumped = obsmod.getrevs(repo, 'bumped')
649 649 return subset & bumped
650 650
651 651 @predicate('bundle()', safe=True)
652 652 def bundle(repo, subset, x):
653 653 """Changesets in the bundle.
654 654
655 655 Bundle must be specified by the -R option."""
656 656
657 657 try:
658 658 bundlerevs = repo.changelog.bundlerevs
659 659 except AttributeError:
660 660 raise error.Abort(_("no bundle provided - specify with -R"))
661 661 return subset & bundlerevs
662 662
663 663 def checkstatus(repo, subset, pat, field):
664 664 hasset = matchmod.patkind(pat) == 'set'
665 665
666 666 mcache = [None]
667 667 def matches(x):
668 668 c = repo[x]
669 669 if not mcache[0] or hasset:
670 670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
671 671 m = mcache[0]
672 672 fname = None
673 673 if not m.anypats() and len(m.files()) == 1:
674 674 fname = m.files()[0]
675 675 if fname is not None:
676 676 if fname not in c.files():
677 677 return False
678 678 else:
679 679 for f in c.files():
680 680 if m(f):
681 681 break
682 682 else:
683 683 return False
684 684 files = repo.status(c.p1().node(), c.node())[field]
685 685 if fname is not None:
686 686 if fname in files:
687 687 return True
688 688 else:
689 689 for f in files:
690 690 if m(f):
691 691 return True
692 692
693 693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
694 694
695 695 def _children(repo, narrow, parentset):
696 696 if not parentset:
697 697 return baseset()
698 698 cs = set()
699 699 pr = repo.changelog.parentrevs
700 700 minrev = parentset.min()
701 701 for r in narrow:
702 702 if r <= minrev:
703 703 continue
704 704 for p in pr(r):
705 705 if p in parentset:
706 706 cs.add(r)
707 707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
708 708 # This does not break because of other fullreposet misbehavior.
709 709 return baseset(cs)
710 710
711 711 @predicate('children(set)', safe=True)
712 712 def children(repo, subset, x):
713 713 """Child changesets of changesets in set.
714 714 """
715 715 s = getset(repo, fullreposet(repo), x)
716 716 cs = _children(repo, subset, s)
717 717 return subset & cs
718 718
719 719 @predicate('closed()', safe=True)
720 720 def closed(repo, subset, x):
721 721 """Changeset is closed.
722 722 """
723 723 # i18n: "closed" is a keyword
724 724 getargs(x, 0, 0, _("closed takes no arguments"))
725 725 return subset.filter(lambda r: repo[r].closesbranch(),
726 726 condrepr='<branch closed>')
727 727
728 728 @predicate('contains(pattern)')
729 729 def contains(repo, subset, x):
730 730 """The revision's manifest contains a file matching pattern (but might not
731 731 modify it). See :hg:`help patterns` for information about file patterns.
732 732
733 733 The pattern without explicit kind like ``glob:`` is expected to be
734 734 relative to the current directory and match against a file exactly
735 735 for efficiency.
736 736 """
737 737 # i18n: "contains" is a keyword
738 738 pat = getstring(x, _("contains requires a pattern"))
739 739
740 740 def matches(x):
741 741 if not matchmod.patkind(pat):
742 742 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
743 743 if pats in repo[x]:
744 744 return True
745 745 else:
746 746 c = repo[x]
747 747 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
748 748 for f in c.manifest():
749 749 if m(f):
750 750 return True
751 751 return False
752 752
753 753 return subset.filter(matches, condrepr=('<contains %r>', pat))
754 754
755 755 @predicate('converted([id])', safe=True)
756 756 def converted(repo, subset, x):
757 757 """Changesets converted from the given identifier in the old repository if
758 758 present, or all converted changesets if no identifier is specified.
759 759 """
760 760
761 761 # There is exactly no chance of resolving the revision, so do a simple
762 762 # string compare and hope for the best
763 763
764 764 rev = None
765 765 # i18n: "converted" is a keyword
766 766 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
767 767 if l:
768 768 # i18n: "converted" is a keyword
769 769 rev = getstring(l[0], _('converted requires a revision'))
770 770
771 771 def _matchvalue(r):
772 772 source = repo[r].extra().get('convert_revision', None)
773 773 return source is not None and (rev is None or source.startswith(rev))
774 774
775 775 return subset.filter(lambda r: _matchvalue(r),
776 776 condrepr=('<converted %r>', rev))
777 777
778 778 @predicate('date(interval)', safe=True)
779 779 def date(repo, subset, x):
780 780 """Changesets within the interval, see :hg:`help dates`.
781 781 """
782 782 # i18n: "date" is a keyword
783 783 ds = getstring(x, _("date requires a string"))
784 784 dm = util.matchdate(ds)
785 785 return subset.filter(lambda x: dm(repo[x].date()[0]),
786 786 condrepr=('<date %r>', ds))
787 787
788 788 @predicate('desc(string)', safe=True)
789 789 def desc(repo, subset, x):
790 790 """Search commit message for string. The match is case-insensitive.
791 791 """
792 792 # i18n: "desc" is a keyword
793 793 ds = encoding.lower(getstring(x, _("desc requires a string")))
794 794
795 795 def matches(x):
796 796 c = repo[x]
797 797 return ds in encoding.lower(c.description())
798 798
799 799 return subset.filter(matches, condrepr=('<desc %r>', ds))
800 800
801 801 def _descendants(repo, subset, x, followfirst=False):
802 802 roots = getset(repo, fullreposet(repo), x)
803 803 if not roots:
804 804 return baseset()
805 805 s = _revdescendants(repo, roots, followfirst)
806 806
807 807 # Both sets need to be ascending in order to lazily return the union
808 808 # in the correct order.
809 809 base = subset & roots
810 810 desc = subset & s
811 811 result = base + desc
812 812 if subset.isascending():
813 813 result.sort()
814 814 elif subset.isdescending():
815 815 result.sort(reverse=True)
816 816 else:
817 817 result = subset & result
818 818 return result
819 819
820 820 @predicate('descendants(set)', safe=True)
821 821 def descendants(repo, subset, x):
822 822 """Changesets which are descendants of changesets in set.
823 823 """
824 824 return _descendants(repo, subset, x)
825 825
826 826 @predicate('_firstdescendants', safe=True)
827 827 def _firstdescendants(repo, subset, x):
828 828 # ``_firstdescendants(set)``
829 829 # Like ``descendants(set)`` but follows only the first parents.
830 830 return _descendants(repo, subset, x, followfirst=True)
831 831
832 832 @predicate('destination([set])', safe=True)
833 833 def destination(repo, subset, x):
834 834 """Changesets that were created by a graft, transplant or rebase operation,
835 835 with the given revisions specified as the source. Omitting the optional set
836 836 is the same as passing all().
837 837 """
838 838 if x is not None:
839 839 sources = getset(repo, fullreposet(repo), x)
840 840 else:
841 841 sources = fullreposet(repo)
842 842
843 843 dests = set()
844 844
845 845 # subset contains all of the possible destinations that can be returned, so
846 846 # iterate over them and see if their source(s) were provided in the arg set.
847 847 # Even if the immediate src of r is not in the arg set, src's source (or
848 848 # further back) may be. Scanning back further than the immediate src allows
849 849 # transitive transplants and rebases to yield the same results as transitive
850 850 # grafts.
851 851 for r in subset:
852 852 src = _getrevsource(repo, r)
853 853 lineage = None
854 854
855 855 while src is not None:
856 856 if lineage is None:
857 857 lineage = list()
858 858
859 859 lineage.append(r)
860 860
861 861 # The visited lineage is a match if the current source is in the arg
862 862 # set. Since every candidate dest is visited by way of iterating
863 863 # subset, any dests further back in the lineage will be tested by a
864 864 # different iteration over subset. Likewise, if the src was already
865 865 # selected, the current lineage can be selected without going back
866 866 # further.
867 867 if src in sources or src in dests:
868 868 dests.update(lineage)
869 869 break
870 870
871 871 r = src
872 872 src = _getrevsource(repo, r)
873 873
874 874 return subset.filter(dests.__contains__,
875 875 condrepr=lambda: '<destination %r>' % sorted(dests))
876 876
877 877 @predicate('divergent()', safe=True)
878 878 def divergent(repo, subset, x):
879 879 """
880 880 Final successors of changesets with an alternative set of final successors.
881 881 """
882 882 # i18n: "divergent" is a keyword
883 883 getargs(x, 0, 0, _("divergent takes no arguments"))
884 884 divergent = obsmod.getrevs(repo, 'divergent')
885 885 return subset & divergent
886 886
887 887 @predicate('extinct()', safe=True)
888 888 def extinct(repo, subset, x):
889 889 """Obsolete changesets with obsolete descendants only.
890 890 """
891 891 # i18n: "extinct" is a keyword
892 892 getargs(x, 0, 0, _("extinct takes no arguments"))
893 893 extincts = obsmod.getrevs(repo, 'extinct')
894 894 return subset & extincts
895 895
896 896 @predicate('extra(label, [value])', safe=True)
897 897 def extra(repo, subset, x):
898 898 """Changesets with the given label in the extra metadata, with the given
899 899 optional value.
900 900
901 901 If `value` starts with `re:`, the remainder of the value is treated as
902 902 a regular expression. To match a value that actually starts with `re:`,
903 903 use the prefix `literal:`.
904 904 """
905 905 args = getargsdict(x, 'extra', 'label value')
906 906 if 'label' not in args:
907 907 # i18n: "extra" is a keyword
908 908 raise error.ParseError(_('extra takes at least 1 argument'))
909 909 # i18n: "extra" is a keyword
910 910 label = getstring(args['label'], _('first argument to extra must be '
911 911 'a string'))
912 912 value = None
913 913
914 914 if 'value' in args:
915 915 # i18n: "extra" is a keyword
916 916 value = getstring(args['value'], _('second argument to extra must be '
917 917 'a string'))
918 918 kind, value, matcher = util.stringmatcher(value)
919 919
920 920 def _matchvalue(r):
921 921 extra = repo[r].extra()
922 922 return label in extra and (value is None or matcher(extra[label]))
923 923
924 924 return subset.filter(lambda r: _matchvalue(r),
925 925 condrepr=('<extra[%r] %r>', label, value))
926 926
927 927 @predicate('filelog(pattern)', safe=True)
928 928 def filelog(repo, subset, x):
929 929 """Changesets connected to the specified filelog.
930 930
931 931 For performance reasons, visits only revisions mentioned in the file-level
932 932 filelog, rather than filtering through all changesets (much faster, but
933 933 doesn't include deletes or duplicate changes). For a slower, more accurate
934 934 result, use ``file()``.
935 935
936 936 The pattern without explicit kind like ``glob:`` is expected to be
937 937 relative to the current directory and match against a file exactly
938 938 for efficiency.
939 939
940 940 If some linkrev points to revisions filtered by the current repoview, we'll
941 941 work around it to return a non-filtered value.
942 942 """
943 943
944 944 # i18n: "filelog" is a keyword
945 945 pat = getstring(x, _("filelog requires a pattern"))
946 946 s = set()
947 947 cl = repo.changelog
948 948
949 949 if not matchmod.patkind(pat):
950 950 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
951 951 files = [f]
952 952 else:
953 953 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
954 954 files = (f for f in repo[None] if m(f))
955 955
956 956 for f in files:
957 957 fl = repo.file(f)
958 958 known = {}
959 959 scanpos = 0
960 960 for fr in list(fl):
961 961 fn = fl.node(fr)
962 962 if fn in known:
963 963 s.add(known[fn])
964 964 continue
965 965
966 966 lr = fl.linkrev(fr)
967 967 if lr in cl:
968 968 s.add(lr)
969 969 elif scanpos is not None:
970 970 # lowest matching changeset is filtered, scan further
971 971 # ahead in changelog
972 972 start = max(lr, scanpos) + 1
973 973 scanpos = None
974 974 for r in cl.revs(start):
975 975 # minimize parsing of non-matching entries
976 976 if f in cl.revision(r) and f in cl.readfiles(r):
977 977 try:
978 978 # try to use manifest delta fastpath
979 979 n = repo[r].filenode(f)
980 980 if n not in known:
981 981 if n == fn:
982 982 s.add(r)
983 983 scanpos = r
984 984 break
985 985 else:
986 986 known[n] = r
987 987 except error.ManifestLookupError:
988 988 # deletion in changelog
989 989 continue
990 990
991 991 return subset & s
992 992
993 993 @predicate('first(set, [n])', safe=True)
994 994 def first(repo, subset, x):
995 995 """An alias for limit().
996 996 """
997 997 return limit(repo, subset, x)
998 998
999 999 def _follow(repo, subset, x, name, followfirst=False):
1000 1000 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1001 1001 c = repo['.']
1002 1002 if l:
1003 1003 x = getstring(l[0], _("%s expected a pattern") % name)
1004 1004 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1005 1005 ctx=repo[None], default='path')
1006 1006
1007 1007 files = c.manifest().walk(matcher)
1008 1008
1009 1009 s = set()
1010 1010 for fname in files:
1011 1011 fctx = c[fname]
1012 1012 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1013 1013 # include the revision responsible for the most recent version
1014 1014 s.add(fctx.introrev())
1015 1015 else:
1016 1016 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1017 1017
1018 1018 return subset & s
1019 1019
1020 1020 @predicate('follow([pattern])', safe=True)
1021 1021 def follow(repo, subset, x):
1022 1022 """
1023 1023 An alias for ``::.`` (ancestors of the working directory's first parent).
1024 1024 If pattern is specified, the histories of files matching given
1025 1025 pattern is followed, including copies.
1026 1026 """
1027 1027 return _follow(repo, subset, x, 'follow')
1028 1028
1029 1029 @predicate('_followfirst', safe=True)
1030 1030 def _followfirst(repo, subset, x):
1031 1031 # ``followfirst([pattern])``
1032 1032 # Like ``follow([pattern])`` but follows only the first parent of
1033 1033 # every revisions or files revisions.
1034 1034 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1035 1035
1036 1036 @predicate('all()', safe=True)
1037 1037 def getall(repo, subset, x):
1038 1038 """All changesets, the same as ``0:tip``.
1039 1039 """
1040 1040 # i18n: "all" is a keyword
1041 1041 getargs(x, 0, 0, _("all takes no arguments"))
1042 1042 return subset & spanset(repo) # drop "null" if any
1043 1043
1044 1044 @predicate('grep(regex)')
1045 1045 def grep(repo, subset, x):
1046 1046 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1047 1047 to ensure special escape characters are handled correctly. Unlike
1048 1048 ``keyword(string)``, the match is case-sensitive.
1049 1049 """
1050 1050 try:
1051 1051 # i18n: "grep" is a keyword
1052 1052 gr = re.compile(getstring(x, _("grep requires a string")))
1053 1053 except re.error as e:
1054 1054 raise error.ParseError(_('invalid match pattern: %s') % e)
1055 1055
1056 1056 def matches(x):
1057 1057 c = repo[x]
1058 1058 for e in c.files() + [c.user(), c.description()]:
1059 1059 if gr.search(e):
1060 1060 return True
1061 1061 return False
1062 1062
1063 1063 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1064 1064
1065 1065 @predicate('_matchfiles', safe=True)
1066 1066 def _matchfiles(repo, subset, x):
1067 1067 # _matchfiles takes a revset list of prefixed arguments:
1068 1068 #
1069 1069 # [p:foo, i:bar, x:baz]
1070 1070 #
1071 1071 # builds a match object from them and filters subset. Allowed
1072 1072 # prefixes are 'p:' for regular patterns, 'i:' for include
1073 1073 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1074 1074 # a revision identifier, or the empty string to reference the
1075 1075 # working directory, from which the match object is
1076 1076 # initialized. Use 'd:' to set the default matching mode, default
1077 1077 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1078 1078
1079 1079 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1080 1080 pats, inc, exc = [], [], []
1081 1081 rev, default = None, None
1082 1082 for arg in l:
1083 1083 s = getstring(arg, "_matchfiles requires string arguments")
1084 1084 prefix, value = s[:2], s[2:]
1085 1085 if prefix == 'p:':
1086 1086 pats.append(value)
1087 1087 elif prefix == 'i:':
1088 1088 inc.append(value)
1089 1089 elif prefix == 'x:':
1090 1090 exc.append(value)
1091 1091 elif prefix == 'r:':
1092 1092 if rev is not None:
1093 1093 raise error.ParseError('_matchfiles expected at most one '
1094 1094 'revision')
1095 1095 if value != '': # empty means working directory; leave rev as None
1096 1096 rev = value
1097 1097 elif prefix == 'd:':
1098 1098 if default is not None:
1099 1099 raise error.ParseError('_matchfiles expected at most one '
1100 1100 'default mode')
1101 1101 default = value
1102 1102 else:
1103 1103 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1104 1104 if not default:
1105 1105 default = 'glob'
1106 1106
1107 1107 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1108 1108 exclude=exc, ctx=repo[rev], default=default)
1109 1109
1110 1110 # This directly read the changelog data as creating changectx for all
1111 1111 # revisions is quite expensive.
1112 1112 getfiles = repo.changelog.readfiles
1113 1113 wdirrev = node.wdirrev
1114 1114 def matches(x):
1115 1115 if x == wdirrev:
1116 1116 files = repo[x].files()
1117 1117 else:
1118 1118 files = getfiles(x)
1119 1119 for f in files:
1120 1120 if m(f):
1121 1121 return True
1122 1122 return False
1123 1123
1124 1124 return subset.filter(matches,
1125 1125 condrepr=('<matchfiles patterns=%r, include=%r '
1126 1126 'exclude=%r, default=%r, rev=%r>',
1127 1127 pats, inc, exc, default, rev))
1128 1128
1129 1129 @predicate('file(pattern)', safe=True)
1130 1130 def hasfile(repo, subset, x):
1131 1131 """Changesets affecting files matched by pattern.
1132 1132
1133 1133 For a faster but less accurate result, consider using ``filelog()``
1134 1134 instead.
1135 1135
1136 1136 This predicate uses ``glob:`` as the default kind of pattern.
1137 1137 """
1138 1138 # i18n: "file" is a keyword
1139 1139 pat = getstring(x, _("file requires a pattern"))
1140 1140 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1141 1141
1142 1142 @predicate('head()', safe=True)
1143 1143 def head(repo, subset, x):
1144 1144 """Changeset is a named branch head.
1145 1145 """
1146 1146 # i18n: "head" is a keyword
1147 1147 getargs(x, 0, 0, _("head takes no arguments"))
1148 1148 hs = set()
1149 1149 cl = repo.changelog
1150 1150 for b, ls in repo.branchmap().iteritems():
1151 1151 hs.update(cl.rev(h) for h in ls)
1152 1152 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1153 1153 # This does not break because of other fullreposet misbehavior.
1154 1154 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1155 1155 # necessary to ensure we preserve the order in subset.
1156 1156 return baseset(hs) & subset
1157 1157
1158 1158 @predicate('heads(set)', safe=True)
1159 1159 def heads(repo, subset, x):
1160 1160 """Members of set with no children in set.
1161 1161 """
1162 1162 s = getset(repo, subset, x)
1163 1163 ps = parents(repo, subset, x)
1164 1164 return s - ps
1165 1165
1166 1166 @predicate('hidden()', safe=True)
1167 1167 def hidden(repo, subset, x):
1168 1168 """Hidden changesets.
1169 1169 """
1170 1170 # i18n: "hidden" is a keyword
1171 1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1172 1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1173 1173 return subset & hiddenrevs
1174 1174
1175 1175 @predicate('keyword(string)', safe=True)
1176 1176 def keyword(repo, subset, x):
1177 1177 """Search commit message, user name, and names of changed files for
1178 1178 string. The match is case-insensitive.
1179 1179 """
1180 1180 # i18n: "keyword" is a keyword
1181 1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1182 1182
1183 1183 def matches(r):
1184 1184 c = repo[r]
1185 1185 return any(kw in encoding.lower(t)
1186 1186 for t in c.files() + [c.user(), c.description()])
1187 1187
1188 1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1189 1189
1190 1190 @predicate('limit(set[, n[, offset]])', safe=True)
1191 1191 def limit(repo, subset, x):
1192 1192 """First n members of set, defaulting to 1, starting from offset.
1193 1193 """
1194 1194 args = getargsdict(x, 'limit', 'set n offset')
1195 1195 if 'set' not in args:
1196 1196 # i18n: "limit" is a keyword
1197 1197 raise error.ParseError(_("limit requires one to three arguments"))
1198 1198 try:
1199 1199 lim, ofs = 1, 0
1200 1200 if 'n' in args:
1201 1201 # i18n: "limit" is a keyword
1202 1202 lim = int(getstring(args['n'], _("limit requires a number")))
1203 1203 if 'offset' in args:
1204 1204 # i18n: "limit" is a keyword
1205 1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1206 1206 if ofs < 0:
1207 1207 raise error.ParseError(_("negative offset"))
1208 1208 except (TypeError, ValueError):
1209 1209 # i18n: "limit" is a keyword
1210 1210 raise error.ParseError(_("limit expects a number"))
1211 1211 os = getset(repo, fullreposet(repo), args['set'])
1212 1212 result = []
1213 1213 it = iter(os)
1214 1214 for x in xrange(ofs):
1215 1215 y = next(it, None)
1216 1216 if y is None:
1217 1217 break
1218 1218 for x in xrange(lim):
1219 1219 y = next(it, None)
1220 1220 if y is None:
1221 1221 break
1222 1222 elif y in subset:
1223 1223 result.append(y)
1224 1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1225 1225 lim, ofs, subset, os))
1226 1226
1227 1227 @predicate('last(set, [n])', safe=True)
1228 1228 def last(repo, subset, x):
1229 1229 """Last n members of set, defaulting to 1.
1230 1230 """
1231 1231 # i18n: "last" is a keyword
1232 1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1233 1233 try:
1234 1234 lim = 1
1235 1235 if len(l) == 2:
1236 1236 # i18n: "last" is a keyword
1237 1237 lim = int(getstring(l[1], _("last requires a number")))
1238 1238 except (TypeError, ValueError):
1239 1239 # i18n: "last" is a keyword
1240 1240 raise error.ParseError(_("last expects a number"))
1241 1241 os = getset(repo, fullreposet(repo), l[0])
1242 1242 os.reverse()
1243 1243 result = []
1244 1244 it = iter(os)
1245 1245 for x in xrange(lim):
1246 1246 y = next(it, None)
1247 1247 if y is None:
1248 1248 break
1249 1249 elif y in subset:
1250 1250 result.append(y)
1251 1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1252 1252
1253 1253 @predicate('max(set)', safe=True)
1254 1254 def maxrev(repo, subset, x):
1255 1255 """Changeset with highest revision number in set.
1256 1256 """
1257 1257 os = getset(repo, fullreposet(repo), x)
1258 1258 try:
1259 1259 m = os.max()
1260 1260 if m in subset:
1261 1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1262 1262 except ValueError:
1263 1263 # os.max() throws a ValueError when the collection is empty.
1264 1264 # Same as python's max().
1265 1265 pass
1266 1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1267 1267
1268 1268 @predicate('merge()', safe=True)
1269 1269 def merge(repo, subset, x):
1270 1270 """Changeset is a merge changeset.
1271 1271 """
1272 1272 # i18n: "merge" is a keyword
1273 1273 getargs(x, 0, 0, _("merge takes no arguments"))
1274 1274 cl = repo.changelog
1275 1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1276 1276 condrepr='<merge>')
1277 1277
1278 1278 @predicate('branchpoint()', safe=True)
1279 1279 def branchpoint(repo, subset, x):
1280 1280 """Changesets with more than one child.
1281 1281 """
1282 1282 # i18n: "branchpoint" is a keyword
1283 1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1284 1284 cl = repo.changelog
1285 1285 if not subset:
1286 1286 return baseset()
1287 1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1288 1288 # (and if it is not, it should.)
1289 1289 baserev = min(subset)
1290 1290 parentscount = [0]*(len(repo) - baserev)
1291 1291 for r in cl.revs(start=baserev + 1):
1292 1292 for p in cl.parentrevs(r):
1293 1293 if p >= baserev:
1294 1294 parentscount[p - baserev] += 1
1295 1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1296 1296 condrepr='<branchpoint>')
1297 1297
1298 1298 @predicate('min(set)', safe=True)
1299 1299 def minrev(repo, subset, x):
1300 1300 """Changeset with lowest revision number in set.
1301 1301 """
1302 1302 os = getset(repo, fullreposet(repo), x)
1303 1303 try:
1304 1304 m = os.min()
1305 1305 if m in subset:
1306 1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1307 1307 except ValueError:
1308 1308 # os.min() throws a ValueError when the collection is empty.
1309 1309 # Same as python's min().
1310 1310 pass
1311 1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1312 1312
1313 1313 @predicate('modifies(pattern)', safe=True)
1314 1314 def modifies(repo, subset, x):
1315 1315 """Changesets modifying files matched by pattern.
1316 1316
1317 1317 The pattern without explicit kind like ``glob:`` is expected to be
1318 1318 relative to the current directory and match against a file or a
1319 1319 directory.
1320 1320 """
1321 1321 # i18n: "modifies" is a keyword
1322 1322 pat = getstring(x, _("modifies requires a pattern"))
1323 1323 return checkstatus(repo, subset, pat, 0)
1324 1324
1325 1325 @predicate('named(namespace)')
1326 1326 def named(repo, subset, x):
1327 1327 """The changesets in a given namespace.
1328 1328
1329 1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1330 1330 a regular expression. To match a namespace that actually starts with `re:`,
1331 1331 use the prefix `literal:`.
1332 1332 """
1333 1333 # i18n: "named" is a keyword
1334 1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1335 1335
1336 1336 ns = getstring(args[0],
1337 1337 # i18n: "named" is a keyword
1338 1338 _('the argument to named must be a string'))
1339 1339 kind, pattern, matcher = util.stringmatcher(ns)
1340 1340 namespaces = set()
1341 1341 if kind == 'literal':
1342 1342 if pattern not in repo.names:
1343 1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1344 1344 % ns)
1345 1345 namespaces.add(repo.names[pattern])
1346 1346 else:
1347 1347 for name, ns in repo.names.iteritems():
1348 1348 if matcher(name):
1349 1349 namespaces.add(ns)
1350 1350 if not namespaces:
1351 1351 raise error.RepoLookupError(_("no namespace exists"
1352 1352 " that match '%s'") % pattern)
1353 1353
1354 1354 names = set()
1355 1355 for ns in namespaces:
1356 1356 for name in ns.listnames(repo):
1357 1357 if name not in ns.deprecated:
1358 1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1359 1359
1360 1360 names -= set([node.nullrev])
1361 1361 return subset & names
1362 1362
1363 1363 @predicate('id(string)', safe=True)
1364 1364 def node_(repo, subset, x):
1365 1365 """Revision non-ambiguously specified by the given hex string prefix.
1366 1366 """
1367 1367 # i18n: "id" is a keyword
1368 1368 l = getargs(x, 1, 1, _("id requires one argument"))
1369 1369 # i18n: "id" is a keyword
1370 1370 n = getstring(l[0], _("id requires a string"))
1371 1371 if len(n) == 40:
1372 1372 try:
1373 1373 rn = repo.changelog.rev(node.bin(n))
1374 1374 except (LookupError, TypeError):
1375 1375 rn = None
1376 1376 else:
1377 1377 rn = None
1378 1378 pm = repo.changelog._partialmatch(n)
1379 1379 if pm is not None:
1380 1380 rn = repo.changelog.rev(pm)
1381 1381
1382 1382 if rn is None:
1383 1383 return baseset()
1384 1384 result = baseset([rn])
1385 1385 return result & subset
1386 1386
1387 1387 @predicate('obsolete()', safe=True)
1388 1388 def obsolete(repo, subset, x):
1389 1389 """Mutable changeset with a newer version."""
1390 1390 # i18n: "obsolete" is a keyword
1391 1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1392 1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1393 1393 return subset & obsoletes
1394 1394
1395 1395 @predicate('only(set, [set])', safe=True)
1396 1396 def only(repo, subset, x):
1397 1397 """Changesets that are ancestors of the first set that are not ancestors
1398 1398 of any other head in the repo. If a second set is specified, the result
1399 1399 is ancestors of the first set that are not ancestors of the second set
1400 1400 (i.e. ::<set1> - ::<set2>).
1401 1401 """
1402 1402 cl = repo.changelog
1403 1403 # i18n: "only" is a keyword
1404 1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1405 1405 include = getset(repo, fullreposet(repo), args[0])
1406 1406 if len(args) == 1:
1407 1407 if not include:
1408 1408 return baseset()
1409 1409
1410 1410 descendants = set(_revdescendants(repo, include, False))
1411 1411 exclude = [rev for rev in cl.headrevs()
1412 1412 if not rev in descendants and not rev in include]
1413 1413 else:
1414 1414 exclude = getset(repo, fullreposet(repo), args[1])
1415 1415
1416 1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1417 1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1418 1418 # some optimisations from the fact this is a baseset.
1419 1419 return subset & results
1420 1420
1421 1421 @predicate('origin([set])', safe=True)
1422 1422 def origin(repo, subset, x):
1423 1423 """
1424 1424 Changesets that were specified as a source for the grafts, transplants or
1425 1425 rebases that created the given revisions. Omitting the optional set is the
1426 1426 same as passing all(). If a changeset created by these operations is itself
1427 1427 specified as a source for one of these operations, only the source changeset
1428 1428 for the first operation is selected.
1429 1429 """
1430 1430 if x is not None:
1431 1431 dests = getset(repo, fullreposet(repo), x)
1432 1432 else:
1433 1433 dests = fullreposet(repo)
1434 1434
1435 1435 def _firstsrc(rev):
1436 1436 src = _getrevsource(repo, rev)
1437 1437 if src is None:
1438 1438 return None
1439 1439
1440 1440 while True:
1441 1441 prev = _getrevsource(repo, src)
1442 1442
1443 1443 if prev is None:
1444 1444 return src
1445 1445 src = prev
1446 1446
1447 1447 o = set([_firstsrc(r) for r in dests])
1448 1448 o -= set([None])
1449 1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1450 1450 # some optimisations from the fact this is a baseset.
1451 1451 return subset & o
1452 1452
1453 1453 @predicate('outgoing([path])', safe=True)
1454 1454 def outgoing(repo, subset, x):
1455 1455 """Changesets not found in the specified destination repository, or the
1456 1456 default push location.
1457 1457 """
1458 1458 # Avoid cycles.
1459 1459 from . import (
1460 1460 discovery,
1461 1461 hg,
1462 1462 )
1463 1463 # i18n: "outgoing" is a keyword
1464 1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1465 1465 # i18n: "outgoing" is a keyword
1466 1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1467 1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1468 1468 dest, branches = hg.parseurl(dest)
1469 1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1470 1470 if revs:
1471 1471 revs = [repo.lookup(rev) for rev in revs]
1472 1472 other = hg.peer(repo, {}, dest)
1473 1473 repo.ui.pushbuffer()
1474 1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1475 1475 repo.ui.popbuffer()
1476 1476 cl = repo.changelog
1477 1477 o = set([cl.rev(r) for r in outgoing.missing])
1478 1478 return subset & o
1479 1479
1480 1480 @predicate('p1([set])', safe=True)
1481 1481 def p1(repo, subset, x):
1482 1482 """First parent of changesets in set, or the working directory.
1483 1483 """
1484 1484 if x is None:
1485 1485 p = repo[x].p1().rev()
1486 1486 if p >= 0:
1487 1487 return subset & baseset([p])
1488 1488 return baseset()
1489 1489
1490 1490 ps = set()
1491 1491 cl = repo.changelog
1492 1492 for r in getset(repo, fullreposet(repo), x):
1493 1493 ps.add(cl.parentrevs(r)[0])
1494 1494 ps -= set([node.nullrev])
1495 1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1496 1496 # some optimisations from the fact this is a baseset.
1497 1497 return subset & ps
1498 1498
1499 1499 @predicate('p2([set])', safe=True)
1500 1500 def p2(repo, subset, x):
1501 1501 """Second parent of changesets in set, or the working directory.
1502 1502 """
1503 1503 if x is None:
1504 1504 ps = repo[x].parents()
1505 1505 try:
1506 1506 p = ps[1].rev()
1507 1507 if p >= 0:
1508 1508 return subset & baseset([p])
1509 1509 return baseset()
1510 1510 except IndexError:
1511 1511 return baseset()
1512 1512
1513 1513 ps = set()
1514 1514 cl = repo.changelog
1515 1515 for r in getset(repo, fullreposet(repo), x):
1516 1516 ps.add(cl.parentrevs(r)[1])
1517 1517 ps -= set([node.nullrev])
1518 1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1519 1519 # some optimisations from the fact this is a baseset.
1520 1520 return subset & ps
1521 1521
1522 1522 @predicate('parents([set])', safe=True)
1523 1523 def parents(repo, subset, x):
1524 1524 """
1525 1525 The set of all parents for all changesets in set, or the working directory.
1526 1526 """
1527 1527 if x is None:
1528 1528 ps = set(p.rev() for p in repo[x].parents())
1529 1529 else:
1530 1530 ps = set()
1531 1531 cl = repo.changelog
1532 1532 up = ps.update
1533 1533 parentrevs = cl.parentrevs
1534 1534 for r in getset(repo, fullreposet(repo), x):
1535 1535 if r == node.wdirrev:
1536 1536 up(p.rev() for p in repo[r].parents())
1537 1537 else:
1538 1538 up(parentrevs(r))
1539 1539 ps -= set([node.nullrev])
1540 1540 return subset & ps
1541 1541
1542 1542 def _phase(repo, subset, target):
1543 1543 """helper to select all rev in phase <target>"""
1544 1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1545 1545 if repo._phasecache._phasesets:
1546 1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1547 1547 s = baseset(s)
1548 1548 s.sort() # set are non ordered, so we enforce ascending
1549 1549 return subset & s
1550 1550 else:
1551 1551 phase = repo._phasecache.phase
1552 1552 condition = lambda r: phase(repo, r) == target
1553 1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1554 1554 cache=False)
1555 1555
1556 1556 @predicate('draft()', safe=True)
1557 1557 def draft(repo, subset, x):
1558 1558 """Changeset in draft phase."""
1559 1559 # i18n: "draft" is a keyword
1560 1560 getargs(x, 0, 0, _("draft takes no arguments"))
1561 1561 target = phases.draft
1562 1562 return _phase(repo, subset, target)
1563 1563
1564 1564 @predicate('secret()', safe=True)
1565 1565 def secret(repo, subset, x):
1566 1566 """Changeset in secret phase."""
1567 1567 # i18n: "secret" is a keyword
1568 1568 getargs(x, 0, 0, _("secret takes no arguments"))
1569 1569 target = phases.secret
1570 1570 return _phase(repo, subset, target)
1571 1571
1572 1572 def parentspec(repo, subset, x, n):
1573 1573 """``set^0``
1574 1574 The set.
1575 1575 ``set^1`` (or ``set^``), ``set^2``
1576 1576 First or second parent, respectively, of all changesets in set.
1577 1577 """
1578 1578 try:
1579 1579 n = int(n[1])
1580 1580 if n not in (0, 1, 2):
1581 1581 raise ValueError
1582 1582 except (TypeError, ValueError):
1583 1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1584 1584 ps = set()
1585 1585 cl = repo.changelog
1586 1586 for r in getset(repo, fullreposet(repo), x):
1587 1587 if n == 0:
1588 1588 ps.add(r)
1589 1589 elif n == 1:
1590 1590 ps.add(cl.parentrevs(r)[0])
1591 1591 elif n == 2:
1592 1592 parents = cl.parentrevs(r)
1593 1593 if len(parents) > 1:
1594 1594 ps.add(parents[1])
1595 1595 return subset & ps
1596 1596
1597 1597 @predicate('present(set)', safe=True)
1598 1598 def present(repo, subset, x):
1599 1599 """An empty set, if any revision in set isn't found; otherwise,
1600 1600 all revisions in set.
1601 1601
1602 1602 If any of specified revisions is not present in the local repository,
1603 1603 the query is normally aborted. But this predicate allows the query
1604 1604 to continue even in such cases.
1605 1605 """
1606 1606 try:
1607 1607 return getset(repo, subset, x)
1608 1608 except error.RepoLookupError:
1609 1609 return baseset()
1610 1610
1611 1611 # for internal use
1612 1612 @predicate('_notpublic', safe=True)
1613 1613 def _notpublic(repo, subset, x):
1614 1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1615 1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1616 1616 if repo._phasecache._phasesets:
1617 1617 s = set()
1618 1618 for u in repo._phasecache._phasesets[1:]:
1619 1619 s.update(u)
1620 1620 s = baseset(s - repo.changelog.filteredrevs)
1621 1621 s.sort()
1622 1622 return subset & s
1623 1623 else:
1624 1624 phase = repo._phasecache.phase
1625 1625 target = phases.public
1626 1626 condition = lambda r: phase(repo, r) != target
1627 1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1628 1628 cache=False)
1629 1629
1630 1630 @predicate('public()', safe=True)
1631 1631 def public(repo, subset, x):
1632 1632 """Changeset in public phase."""
1633 1633 # i18n: "public" is a keyword
1634 1634 getargs(x, 0, 0, _("public takes no arguments"))
1635 1635 phase = repo._phasecache.phase
1636 1636 target = phases.public
1637 1637 condition = lambda r: phase(repo, r) == target
1638 1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1639 1639 cache=False)
1640 1640
1641 1641 @predicate('remote([id [,path]])', safe=True)
1642 1642 def remote(repo, subset, x):
1643 1643 """Local revision that corresponds to the given identifier in a
1644 1644 remote repository, if present. Here, the '.' identifier is a
1645 1645 synonym for the current local branch.
1646 1646 """
1647 1647
1648 1648 from . import hg # avoid start-up nasties
1649 1649 # i18n: "remote" is a keyword
1650 1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1651 1651
1652 1652 q = '.'
1653 1653 if len(l) > 0:
1654 1654 # i18n: "remote" is a keyword
1655 1655 q = getstring(l[0], _("remote requires a string id"))
1656 1656 if q == '.':
1657 1657 q = repo['.'].branch()
1658 1658
1659 1659 dest = ''
1660 1660 if len(l) > 1:
1661 1661 # i18n: "remote" is a keyword
1662 1662 dest = getstring(l[1], _("remote requires a repository path"))
1663 1663 dest = repo.ui.expandpath(dest or 'default')
1664 1664 dest, branches = hg.parseurl(dest)
1665 1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1666 1666 if revs:
1667 1667 revs = [repo.lookup(rev) for rev in revs]
1668 1668 other = hg.peer(repo, {}, dest)
1669 1669 n = other.lookup(q)
1670 1670 if n in repo:
1671 1671 r = repo[n].rev()
1672 1672 if r in subset:
1673 1673 return baseset([r])
1674 1674 return baseset()
1675 1675
1676 1676 @predicate('removes(pattern)', safe=True)
1677 1677 def removes(repo, subset, x):
1678 1678 """Changesets which remove files matching pattern.
1679 1679
1680 1680 The pattern without explicit kind like ``glob:`` is expected to be
1681 1681 relative to the current directory and match against a file or a
1682 1682 directory.
1683 1683 """
1684 1684 # i18n: "removes" is a keyword
1685 1685 pat = getstring(x, _("removes requires a pattern"))
1686 1686 return checkstatus(repo, subset, pat, 2)
1687 1687
1688 1688 @predicate('rev(number)', safe=True)
1689 1689 def rev(repo, subset, x):
1690 1690 """Revision with the given numeric identifier.
1691 1691 """
1692 1692 # i18n: "rev" is a keyword
1693 1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1694 1694 try:
1695 1695 # i18n: "rev" is a keyword
1696 1696 l = int(getstring(l[0], _("rev requires a number")))
1697 1697 except (TypeError, ValueError):
1698 1698 # i18n: "rev" is a keyword
1699 1699 raise error.ParseError(_("rev expects a number"))
1700 1700 if l not in repo.changelog and l != node.nullrev:
1701 1701 return baseset()
1702 1702 return subset & baseset([l])
1703 1703
1704 1704 @predicate('matching(revision [, field])', safe=True)
1705 1705 def matching(repo, subset, x):
1706 1706 """Changesets in which a given set of fields match the set of fields in the
1707 1707 selected revision or set.
1708 1708
1709 1709 To match more than one field pass the list of fields to match separated
1710 1710 by spaces (e.g. ``author description``).
1711 1711
1712 1712 Valid fields are most regular revision fields and some special fields.
1713 1713
1714 1714 Regular revision fields are ``description``, ``author``, ``branch``,
1715 1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1716 1716 and ``diff``.
1717 1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1718 1718 contents of the revision. Two revisions matching their ``diff`` will
1719 1719 also match their ``files``.
1720 1720
1721 1721 Special fields are ``summary`` and ``metadata``:
1722 1722 ``summary`` matches the first line of the description.
1723 1723 ``metadata`` is equivalent to matching ``description user date``
1724 1724 (i.e. it matches the main metadata fields).
1725 1725
1726 1726 ``metadata`` is the default field which is used when no fields are
1727 1727 specified. You can match more than one field at a time.
1728 1728 """
1729 1729 # i18n: "matching" is a keyword
1730 1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1731 1731
1732 1732 revs = getset(repo, fullreposet(repo), l[0])
1733 1733
1734 1734 fieldlist = ['metadata']
1735 1735 if len(l) > 1:
1736 1736 fieldlist = getstring(l[1],
1737 1737 # i18n: "matching" is a keyword
1738 1738 _("matching requires a string "
1739 1739 "as its second argument")).split()
1740 1740
1741 1741 # Make sure that there are no repeated fields,
1742 1742 # expand the 'special' 'metadata' field type
1743 1743 # and check the 'files' whenever we check the 'diff'
1744 1744 fields = []
1745 1745 for field in fieldlist:
1746 1746 if field == 'metadata':
1747 1747 fields += ['user', 'description', 'date']
1748 1748 elif field == 'diff':
1749 1749 # a revision matching the diff must also match the files
1750 1750 # since matching the diff is very costly, make sure to
1751 1751 # also match the files first
1752 1752 fields += ['files', 'diff']
1753 1753 else:
1754 1754 if field == 'author':
1755 1755 field = 'user'
1756 1756 fields.append(field)
1757 1757 fields = set(fields)
1758 1758 if 'summary' in fields and 'description' in fields:
1759 1759 # If a revision matches its description it also matches its summary
1760 1760 fields.discard('summary')
1761 1761
1762 1762 # We may want to match more than one field
1763 1763 # Not all fields take the same amount of time to be matched
1764 1764 # Sort the selected fields in order of increasing matching cost
1765 1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1766 1766 'files', 'description', 'substate', 'diff']
1767 1767 def fieldkeyfunc(f):
1768 1768 try:
1769 1769 return fieldorder.index(f)
1770 1770 except ValueError:
1771 1771 # assume an unknown field is very costly
1772 1772 return len(fieldorder)
1773 1773 fields = list(fields)
1774 1774 fields.sort(key=fieldkeyfunc)
1775 1775
1776 1776 # Each field will be matched with its own "getfield" function
1777 1777 # which will be added to the getfieldfuncs array of functions
1778 1778 getfieldfuncs = []
1779 1779 _funcs = {
1780 1780 'user': lambda r: repo[r].user(),
1781 1781 'branch': lambda r: repo[r].branch(),
1782 1782 'date': lambda r: repo[r].date(),
1783 1783 'description': lambda r: repo[r].description(),
1784 1784 'files': lambda r: repo[r].files(),
1785 1785 'parents': lambda r: repo[r].parents(),
1786 1786 'phase': lambda r: repo[r].phase(),
1787 1787 'substate': lambda r: repo[r].substate,
1788 1788 'summary': lambda r: repo[r].description().splitlines()[0],
1789 1789 'diff': lambda r: list(repo[r].diff(git=True),)
1790 1790 }
1791 1791 for info in fields:
1792 1792 getfield = _funcs.get(info, None)
1793 1793 if getfield is None:
1794 1794 raise error.ParseError(
1795 1795 # i18n: "matching" is a keyword
1796 1796 _("unexpected field name passed to matching: %s") % info)
1797 1797 getfieldfuncs.append(getfield)
1798 1798 # convert the getfield array of functions into a "getinfo" function
1799 1799 # which returns an array of field values (or a single value if there
1800 1800 # is only one field to match)
1801 1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1802 1802
1803 1803 def matches(x):
1804 1804 for rev in revs:
1805 1805 target = getinfo(rev)
1806 1806 match = True
1807 1807 for n, f in enumerate(getfieldfuncs):
1808 1808 if target[n] != f(x):
1809 1809 match = False
1810 1810 if match:
1811 1811 return True
1812 1812 return False
1813 1813
1814 1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1815 1815
1816 1816 @predicate('reverse(set)', safe=True)
1817 1817 def reverse(repo, subset, x):
1818 1818 """Reverse order of set.
1819 1819 """
1820 1820 l = getset(repo, subset, x)
1821 1821 l.reverse()
1822 1822 return l
1823 1823
1824 1824 @predicate('roots(set)', safe=True)
1825 1825 def roots(repo, subset, x):
1826 1826 """Changesets in set with no parent changeset in set.
1827 1827 """
1828 1828 s = getset(repo, fullreposet(repo), x)
1829 1829 parents = repo.changelog.parentrevs
1830 1830 def filter(r):
1831 1831 for p in parents(r):
1832 1832 if 0 <= p and p in s:
1833 1833 return False
1834 1834 return True
1835 1835 return subset & s.filter(filter, condrepr='<roots>')
1836 1836
1837 1837 _sortkeyfuncs = {
1838 1838 'rev': lambda c: c.rev(),
1839 1839 'branch': lambda c: c.branch(),
1840 1840 'desc': lambda c: c.description(),
1841 1841 'user': lambda c: c.user(),
1842 1842 'author': lambda c: c.user(),
1843 1843 'date': lambda c: c.date()[0],
1844 1844 }
1845 1845
1846 @predicate('sort(set[, [-]key...])', safe=True)
1846 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1847 1847 def sort(repo, subset, x):
1848 1848 """Sort set by keys. The default sort order is ascending, specify a key
1849 1849 as ``-key`` to sort in descending order.
1850 1850
1851 1851 The keys can be:
1852 1852
1853 1853 - ``rev`` for the revision number,
1854 1854 - ``branch`` for the branch name,
1855 1855 - ``desc`` for the commit message (description),
1856 1856 - ``user`` for user name (``author`` can be used as an alias),
1857 1857 - ``date`` for the commit date
1858 - ``topo`` for a reverse topographical sort
1859
1860 The ``topo`` sort order cannot be combined with other sort keys. This sort
1861 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1862 specifies what topographical branches to prioritize in the sort.
1863
1858 1864 """
1859 args = getargsdict(x, 'sort', 'set keys')
1865 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1860 1866 if 'set' not in args:
1861 1867 # i18n: "sort" is a keyword
1862 1868 raise error.ParseError(_('sort requires one or two arguments'))
1863 1869 keys = "rev"
1864 1870 if 'keys' in args:
1865 1871 # i18n: "sort" is a keyword
1866 1872 keys = getstring(args['keys'], _("sort spec must be a string"))
1867 1873
1868 1874 s = args['set']
1869 1875 keys = keys.split()
1870 1876 revs = getset(repo, subset, s)
1877
1878 if len(keys) > 1 and any(k.lstrip('-') == 'topo' for k in keys):
1879 # i18n: "topo" is a keyword
1880 raise error.ParseError(_(
1881 'topo sort order cannot be combined with other sort keys'))
1882
1883 firstbranch = ()
1884 if 'topo.firstbranch' in args:
1885 if any(k.lstrip('-') == 'topo' for k in keys):
1886 firstbranch = getset(repo, subset, args['topo.firstbranch'])
1887 else:
1888 # i18n: "topo" and "topo.firstbranch" are keywords
1889 raise error.ParseError(_(
1890 'topo.firstbranch can only be used when using the topo sort '
1891 'key'))
1892
1871 1893 if keys == ["rev"]:
1872 1894 revs.sort()
1873 1895 return revs
1874 1896 elif keys == ["-rev"]:
1875 1897 revs.sort(reverse=True)
1876 1898 return revs
1899 elif keys[0] in ("topo", "-topo"):
1900 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1901 istopo=True)
1902 if keys[0][0] == '-':
1903 revs.reverse()
1904 return revs
1905
1877 1906 # sort() is guaranteed to be stable
1878 1907 ctxs = [repo[r] for r in revs]
1879 1908 for k in reversed(keys):
1880 1909 fk = k
1881 1910 reverse = (k[0] == '-')
1882 1911 if reverse:
1883 1912 k = k[1:]
1884 1913 try:
1885 1914 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1886 1915 except KeyError:
1887 1916 raise error.ParseError(_("unknown sort key %r") % fk)
1888 1917 return baseset([c.rev() for c in ctxs])
1889 1918
1890 def groupbranchiter(revs, parentsfunc, firstbranch=()):
1919 def _toposort(revs, parentsfunc, firstbranch=()):
1891 1920 """Yield revisions from heads to roots one (topo) branch at a time.
1892 1921
1893 1922 This function aims to be used by a graph generator that wishes to minimize
1894 1923 the number of parallel branches and their interleaving.
1895 1924
1896 1925 Example iteration order (numbers show the "true" order in a changelog):
1897 1926
1898 1927 o 4
1899 1928 |
1900 1929 o 1
1901 1930 |
1902 1931 | o 3
1903 1932 | |
1904 1933 | o 2
1905 1934 |/
1906 1935 o 0
1907 1936
1908 1937 Note that the ancestors of merges are understood by the current
1909 1938 algorithm to be on the same branch. This means no reordering will
1910 1939 occur behind a merge.
1911 1940 """
1912 1941
1913 1942 ### Quick summary of the algorithm
1914 1943 #
1915 1944 # This function is based around a "retention" principle. We keep revisions
1916 1945 # in memory until we are ready to emit a whole branch that immediately
1917 1946 # "merges" into an existing one. This reduces the number of parallel
1918 1947 # branches with interleaved revisions.
1919 1948 #
1920 1949 # During iteration revs are split into two groups:
1921 1950 # A) revision already emitted
1922 1951 # B) revision in "retention". They are stored as different subgroups.
1923 1952 #
1924 1953 # for each REV, we do the following logic:
1925 1954 #
1926 1955 # 1) if REV is a parent of (A), we will emit it. If there is a
1927 1956 # retention group ((B) above) that is blocked on REV being
1928 1957 # available, we emit all the revisions out of that retention
1929 1958 # group first.
1930 1959 #
1931 1960 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1932 1961 # available, if such subgroup exist, we add REV to it and the subgroup is
1933 1962 # now awaiting for REV.parents() to be available.
1934 1963 #
1935 1964 # 3) finally if no such group existed in (B), we create a new subgroup.
1936 1965 #
1937 1966 #
1938 1967 # To bootstrap the algorithm, we emit the tipmost revision (which
1939 1968 # puts it in group (A) from above).
1940 1969
1941 1970 revs.sort(reverse=True)
1942 1971
1943 1972 # Set of parents of revision that have been emitted. They can be considered
1944 1973 # unblocked as the graph generator is already aware of them so there is no
1945 1974 # need to delay the revisions that reference them.
1946 1975 #
1947 1976 # If someone wants to prioritize a branch over the others, pre-filling this
1948 1977 # set will force all other branches to wait until this branch is ready to be
1949 1978 # emitted.
1950 1979 unblocked = set(firstbranch)
1951 1980
1952 1981 # list of groups waiting to be displayed, each group is defined by:
1953 1982 #
1954 1983 # (revs: lists of revs waiting to be displayed,
1955 1984 # blocked: set of that cannot be displayed before those in 'revs')
1956 1985 #
1957 1986 # The second value ('blocked') correspond to parents of any revision in the
1958 1987 # group ('revs') that is not itself contained in the group. The main idea
1959 1988 # of this algorithm is to delay as much as possible the emission of any
1960 1989 # revision. This means waiting for the moment we are about to display
1961 1990 # these parents to display the revs in a group.
1962 1991 #
1963 1992 # This first implementation is smart until it encounters a merge: it will
1964 1993 # emit revs as soon as any parent is about to be emitted and can grow an
1965 1994 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1966 1995 # retains new branches but gives up on any special ordering for ancestors
1967 1996 # of merges. The implementation can be improved to handle this better.
1968 1997 #
1969 1998 # The first subgroup is special. It corresponds to all the revision that
1970 1999 # were already emitted. The 'revs' lists is expected to be empty and the
1971 2000 # 'blocked' set contains the parents revisions of already emitted revision.
1972 2001 #
1973 2002 # You could pre-seed the <parents> set of groups[0] to a specific
1974 2003 # changesets to select what the first emitted branch should be.
1975 2004 groups = [([], unblocked)]
1976 2005 pendingheap = []
1977 2006 pendingset = set()
1978 2007
1979 2008 heapq.heapify(pendingheap)
1980 2009 heappop = heapq.heappop
1981 2010 heappush = heapq.heappush
1982 2011 for currentrev in revs:
1983 2012 # Heap works with smallest element, we want highest so we invert
1984 2013 if currentrev not in pendingset:
1985 2014 heappush(pendingheap, -currentrev)
1986 2015 pendingset.add(currentrev)
1987 2016 # iterates on pending rev until after the current rev have been
1988 2017 # processed.
1989 2018 rev = None
1990 2019 while rev != currentrev:
1991 2020 rev = -heappop(pendingheap)
1992 2021 pendingset.remove(rev)
1993 2022
1994 2023 # Seek for a subgroup blocked, waiting for the current revision.
1995 2024 matching = [i for i, g in enumerate(groups) if rev in g[1]]
1996 2025
1997 2026 if matching:
1998 2027 # The main idea is to gather together all sets that are blocked
1999 2028 # on the same revision.
2000 2029 #
2001 2030 # Groups are merged when a common blocking ancestor is
2002 2031 # observed. For example, given two groups:
2003 2032 #
2004 2033 # revs [5, 4] waiting for 1
2005 2034 # revs [3, 2] waiting for 1
2006 2035 #
2007 2036 # These two groups will be merged when we process
2008 2037 # 1. In theory, we could have merged the groups when
2009 2038 # we added 2 to the group it is now in (we could have
2010 2039 # noticed the groups were both blocked on 1 then), but
2011 2040 # the way it works now makes the algorithm simpler.
2012 2041 #
2013 2042 # We also always keep the oldest subgroup first. We can
2014 2043 # probably improve the behavior by having the longest set
2015 2044 # first. That way, graph algorithms could minimise the length
2016 2045 # of parallel lines their drawing. This is currently not done.
2017 2046 targetidx = matching.pop(0)
2018 2047 trevs, tparents = groups[targetidx]
2019 2048 for i in matching:
2020 2049 gr = groups[i]
2021 2050 trevs.extend(gr[0])
2022 2051 tparents |= gr[1]
2023 2052 # delete all merged subgroups (except the one we kept)
2024 2053 # (starting from the last subgroup for performance and
2025 2054 # sanity reasons)
2026 2055 for i in reversed(matching):
2027 2056 del groups[i]
2028 2057 else:
2029 2058 # This is a new head. We create a new subgroup for it.
2030 2059 targetidx = len(groups)
2031 2060 groups.append(([], set([rev])))
2032 2061
2033 2062 gr = groups[targetidx]
2034 2063
2035 2064 # We now add the current nodes to this subgroups. This is done
2036 2065 # after the subgroup merging because all elements from a subgroup
2037 2066 # that relied on this rev must precede it.
2038 2067 #
2039 2068 # we also update the <parents> set to include the parents of the
2040 2069 # new nodes.
2041 2070 if rev == currentrev: # only display stuff in rev
2042 2071 gr[0].append(rev)
2043 2072 gr[1].remove(rev)
2044 2073 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2045 2074 gr[1].update(parents)
2046 2075 for p in parents:
2047 2076 if p not in pendingset:
2048 2077 pendingset.add(p)
2049 2078 heappush(pendingheap, -p)
2050 2079
2051 2080 # Look for a subgroup to display
2052 2081 #
2053 2082 # When unblocked is empty (if clause), we were not waiting for any
2054 2083 # revisions during the first iteration (if no priority was given) or
2055 2084 # if we emitted a whole disconnected set of the graph (reached a
2056 2085 # root). In that case we arbitrarily take the oldest known
2057 2086 # subgroup. The heuristic could probably be better.
2058 2087 #
2059 2088 # Otherwise (elif clause) if the subgroup is blocked on
2060 2089 # a revision we just emitted, we can safely emit it as
2061 2090 # well.
2062 2091 if not unblocked:
2063 2092 if len(groups) > 1: # display other subset
2064 2093 targetidx = 1
2065 2094 gr = groups[1]
2066 2095 elif not gr[1] & unblocked:
2067 2096 gr = None
2068 2097
2069 2098 if gr is not None:
2070 2099 # update the set of awaited revisions with the one from the
2071 2100 # subgroup
2072 2101 unblocked |= gr[1]
2073 2102 # output all revisions in the subgroup
2074 2103 for r in gr[0]:
2075 2104 yield r
2076 2105 # delete the subgroup that you just output
2077 2106 # unless it is groups[0] in which case you just empty it.
2078 2107 if targetidx:
2079 2108 del groups[targetidx]
2080 2109 else:
2081 2110 gr[0][:] = []
2082 2111 # Check if we have some subgroup waiting for revisions we are not going to
2083 2112 # iterate over
2084 2113 for g in groups:
2085 2114 for r in g[0]:
2086 2115 yield r
2087 2116
2088 2117 @predicate('subrepo([pattern])')
2089 2118 def subrepo(repo, subset, x):
2090 2119 """Changesets that add, modify or remove the given subrepo. If no subrepo
2091 2120 pattern is named, any subrepo changes are returned.
2092 2121 """
2093 2122 # i18n: "subrepo" is a keyword
2094 2123 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2095 2124 pat = None
2096 2125 if len(args) != 0:
2097 2126 pat = getstring(args[0], _("subrepo requires a pattern"))
2098 2127
2099 2128 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2100 2129
2101 2130 def submatches(names):
2102 2131 k, p, m = util.stringmatcher(pat)
2103 2132 for name in names:
2104 2133 if m(name):
2105 2134 yield name
2106 2135
2107 2136 def matches(x):
2108 2137 c = repo[x]
2109 2138 s = repo.status(c.p1().node(), c.node(), match=m)
2110 2139
2111 2140 if pat is None:
2112 2141 return s.added or s.modified or s.removed
2113 2142
2114 2143 if s.added:
2115 2144 return any(submatches(c.substate.keys()))
2116 2145
2117 2146 if s.modified:
2118 2147 subs = set(c.p1().substate.keys())
2119 2148 subs.update(c.substate.keys())
2120 2149
2121 2150 for path in submatches(subs):
2122 2151 if c.p1().substate.get(path) != c.substate.get(path):
2123 2152 return True
2124 2153
2125 2154 if s.removed:
2126 2155 return any(submatches(c.p1().substate.keys()))
2127 2156
2128 2157 return False
2129 2158
2130 2159 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2131 2160
2132 2161 def _substringmatcher(pattern):
2133 2162 kind, pattern, matcher = util.stringmatcher(pattern)
2134 2163 if kind == 'literal':
2135 2164 matcher = lambda s: pattern in s
2136 2165 return kind, pattern, matcher
2137 2166
2138 2167 @predicate('tag([name])', safe=True)
2139 2168 def tag(repo, subset, x):
2140 2169 """The specified tag by name, or all tagged revisions if no name is given.
2141 2170
2142 2171 If `name` starts with `re:`, the remainder of the name is treated as
2143 2172 a regular expression. To match a tag that actually starts with `re:`,
2144 2173 use the prefix `literal:`.
2145 2174 """
2146 2175 # i18n: "tag" is a keyword
2147 2176 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2148 2177 cl = repo.changelog
2149 2178 if args:
2150 2179 pattern = getstring(args[0],
2151 2180 # i18n: "tag" is a keyword
2152 2181 _('the argument to tag must be a string'))
2153 2182 kind, pattern, matcher = util.stringmatcher(pattern)
2154 2183 if kind == 'literal':
2155 2184 # avoid resolving all tags
2156 2185 tn = repo._tagscache.tags.get(pattern, None)
2157 2186 if tn is None:
2158 2187 raise error.RepoLookupError(_("tag '%s' does not exist")
2159 2188 % pattern)
2160 2189 s = set([repo[tn].rev()])
2161 2190 else:
2162 2191 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2163 2192 else:
2164 2193 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2165 2194 return subset & s
2166 2195
2167 2196 @predicate('tagged', safe=True)
2168 2197 def tagged(repo, subset, x):
2169 2198 return tag(repo, subset, x)
2170 2199
2171 2200 @predicate('unstable()', safe=True)
2172 2201 def unstable(repo, subset, x):
2173 2202 """Non-obsolete changesets with obsolete ancestors.
2174 2203 """
2175 2204 # i18n: "unstable" is a keyword
2176 2205 getargs(x, 0, 0, _("unstable takes no arguments"))
2177 2206 unstables = obsmod.getrevs(repo, 'unstable')
2178 2207 return subset & unstables
2179 2208
2180 2209
2181 2210 @predicate('user(string)', safe=True)
2182 2211 def user(repo, subset, x):
2183 2212 """User name contains string. The match is case-insensitive.
2184 2213
2185 2214 If `string` starts with `re:`, the remainder of the string is treated as
2186 2215 a regular expression. To match a user that actually contains `re:`, use
2187 2216 the prefix `literal:`.
2188 2217 """
2189 2218 return author(repo, subset, x)
2190 2219
2191 2220 # experimental
2192 2221 @predicate('wdir', safe=True)
2193 2222 def wdir(repo, subset, x):
2194 2223 # i18n: "wdir" is a keyword
2195 2224 getargs(x, 0, 0, _("wdir takes no arguments"))
2196 2225 if node.wdirrev in subset or isinstance(subset, fullreposet):
2197 2226 return baseset([node.wdirrev])
2198 2227 return baseset()
2199 2228
2200 2229 # for internal use
2201 2230 @predicate('_list', safe=True)
2202 2231 def _list(repo, subset, x):
2203 2232 s = getstring(x, "internal error")
2204 2233 if not s:
2205 2234 return baseset()
2206 2235 # remove duplicates here. it's difficult for caller to deduplicate sets
2207 2236 # because different symbols can point to the same rev.
2208 2237 cl = repo.changelog
2209 2238 ls = []
2210 2239 seen = set()
2211 2240 for t in s.split('\0'):
2212 2241 try:
2213 2242 # fast path for integer revision
2214 2243 r = int(t)
2215 2244 if str(r) != t or r not in cl:
2216 2245 raise ValueError
2217 2246 revs = [r]
2218 2247 except ValueError:
2219 2248 revs = stringset(repo, subset, t)
2220 2249
2221 2250 for r in revs:
2222 2251 if r in seen:
2223 2252 continue
2224 2253 if (r in subset
2225 2254 or r == node.nullrev and isinstance(subset, fullreposet)):
2226 2255 ls.append(r)
2227 2256 seen.add(r)
2228 2257 return baseset(ls)
2229 2258
2230 2259 # for internal use
2231 2260 @predicate('_intlist', safe=True)
2232 2261 def _intlist(repo, subset, x):
2233 2262 s = getstring(x, "internal error")
2234 2263 if not s:
2235 2264 return baseset()
2236 2265 ls = [int(r) for r in s.split('\0')]
2237 2266 s = subset
2238 2267 return baseset([r for r in ls if r in s])
2239 2268
2240 2269 # for internal use
2241 2270 @predicate('_hexlist', safe=True)
2242 2271 def _hexlist(repo, subset, x):
2243 2272 s = getstring(x, "internal error")
2244 2273 if not s:
2245 2274 return baseset()
2246 2275 cl = repo.changelog
2247 2276 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2248 2277 s = subset
2249 2278 return baseset([r for r in ls if r in s])
2250 2279
2251 2280 methods = {
2252 2281 "range": rangeset,
2253 2282 "dagrange": dagrange,
2254 2283 "string": stringset,
2255 2284 "symbol": stringset,
2256 2285 "and": andset,
2257 2286 "or": orset,
2258 2287 "not": notset,
2259 2288 "difference": differenceset,
2260 2289 "list": listset,
2261 2290 "keyvalue": keyvaluepair,
2262 2291 "func": func,
2263 2292 "ancestor": ancestorspec,
2264 2293 "parent": parentspec,
2265 2294 "parentpost": p1,
2266 2295 }
2267 2296
2268 2297 def _matchonly(revs, bases):
2269 2298 """
2270 2299 >>> f = lambda *args: _matchonly(*map(parse, args))
2271 2300 >>> f('ancestors(A)', 'not ancestors(B)')
2272 2301 ('list', ('symbol', 'A'), ('symbol', 'B'))
2273 2302 """
2274 2303 if (revs is not None
2275 2304 and revs[0] == 'func'
2276 2305 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2277 2306 and bases is not None
2278 2307 and bases[0] == 'not'
2279 2308 and bases[1][0] == 'func'
2280 2309 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2281 2310 return ('list', revs[2], bases[1][2])
2282 2311
2283 2312 def _optimize(x, small):
2284 2313 if x is None:
2285 2314 return 0, x
2286 2315
2287 2316 smallbonus = 1
2288 2317 if small:
2289 2318 smallbonus = .5
2290 2319
2291 2320 op = x[0]
2292 2321 if op == 'minus':
2293 2322 return _optimize(('and', x[1], ('not', x[2])), small)
2294 2323 elif op == 'only':
2295 2324 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2296 2325 return _optimize(t, small)
2297 2326 elif op == 'onlypost':
2298 2327 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2299 2328 elif op == 'dagrangepre':
2300 2329 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2301 2330 elif op == 'dagrangepost':
2302 2331 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2303 2332 elif op == 'rangeall':
2304 2333 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2305 2334 elif op == 'rangepre':
2306 2335 return _optimize(('range', ('string', '0'), x[1]), small)
2307 2336 elif op == 'rangepost':
2308 2337 return _optimize(('range', x[1], ('string', 'tip')), small)
2309 2338 elif op == 'negate':
2310 2339 s = getstring(x[1], _("can't negate that"))
2311 2340 return _optimize(('string', '-' + s), small)
2312 2341 elif op in 'string symbol negate':
2313 2342 return smallbonus, x # single revisions are small
2314 2343 elif op == 'and':
2315 2344 wa, ta = _optimize(x[1], True)
2316 2345 wb, tb = _optimize(x[2], True)
2317 2346 w = min(wa, wb)
2318 2347
2319 2348 # (::x and not ::y)/(not ::y and ::x) have a fast path
2320 2349 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2321 2350 if tm:
2322 2351 return w, ('func', ('symbol', 'only'), tm)
2323 2352
2324 2353 if tb is not None and tb[0] == 'not':
2325 2354 return wa, ('difference', ta, tb[1])
2326 2355
2327 2356 if wa > wb:
2328 2357 return w, (op, tb, ta)
2329 2358 return w, (op, ta, tb)
2330 2359 elif op == 'or':
2331 2360 # fast path for machine-generated expression, that is likely to have
2332 2361 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2333 2362 ws, ts, ss = [], [], []
2334 2363 def flushss():
2335 2364 if not ss:
2336 2365 return
2337 2366 if len(ss) == 1:
2338 2367 w, t = ss[0]
2339 2368 else:
2340 2369 s = '\0'.join(t[1] for w, t in ss)
2341 2370 y = ('func', ('symbol', '_list'), ('string', s))
2342 2371 w, t = _optimize(y, False)
2343 2372 ws.append(w)
2344 2373 ts.append(t)
2345 2374 del ss[:]
2346 2375 for y in x[1:]:
2347 2376 w, t = _optimize(y, False)
2348 2377 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2349 2378 ss.append((w, t))
2350 2379 continue
2351 2380 flushss()
2352 2381 ws.append(w)
2353 2382 ts.append(t)
2354 2383 flushss()
2355 2384 if len(ts) == 1:
2356 2385 return ws[0], ts[0] # 'or' operation is fully optimized out
2357 2386 # we can't reorder trees by weight because it would change the order.
2358 2387 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2359 2388 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2360 2389 return max(ws), (op,) + tuple(ts)
2361 2390 elif op == 'not':
2362 2391 # Optimize not public() to _notpublic() because we have a fast version
2363 2392 if x[1] == ('func', ('symbol', 'public'), None):
2364 2393 newsym = ('func', ('symbol', '_notpublic'), None)
2365 2394 o = _optimize(newsym, not small)
2366 2395 return o[0], o[1]
2367 2396 else:
2368 2397 o = _optimize(x[1], not small)
2369 2398 return o[0], (op, o[1])
2370 2399 elif op == 'parentpost':
2371 2400 o = _optimize(x[1], small)
2372 2401 return o[0], (op, o[1])
2373 2402 elif op == 'group':
2374 2403 return _optimize(x[1], small)
2375 2404 elif op in 'dagrange range parent ancestorspec':
2376 2405 if op == 'parent':
2377 2406 # x^:y means (x^) : y, not x ^ (:y)
2378 2407 post = ('parentpost', x[1])
2379 2408 if x[2][0] == 'dagrangepre':
2380 2409 return _optimize(('dagrange', post, x[2][1]), small)
2381 2410 elif x[2][0] == 'rangepre':
2382 2411 return _optimize(('range', post, x[2][1]), small)
2383 2412
2384 2413 wa, ta = _optimize(x[1], small)
2385 2414 wb, tb = _optimize(x[2], small)
2386 2415 return wa + wb, (op, ta, tb)
2387 2416 elif op == 'list':
2388 2417 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2389 2418 return sum(ws), (op,) + ts
2390 2419 elif op == 'func':
2391 2420 f = getstring(x[1], _("not a symbol"))
2392 2421 wa, ta = _optimize(x[2], small)
2393 2422 if f in ("author branch closed date desc file grep keyword "
2394 2423 "outgoing user"):
2395 2424 w = 10 # slow
2396 2425 elif f in "modifies adds removes":
2397 2426 w = 30 # slower
2398 2427 elif f == "contains":
2399 2428 w = 100 # very slow
2400 2429 elif f == "ancestor":
2401 2430 w = 1 * smallbonus
2402 2431 elif f in "reverse limit first _intlist":
2403 2432 w = 0
2404 2433 elif f in "sort":
2405 2434 w = 10 # assume most sorts look at changelog
2406 2435 else:
2407 2436 w = 1
2408 2437 return w + wa, (op, x[1], ta)
2409 2438 return 1, x
2410 2439
2411 2440 def optimize(tree):
2412 2441 _weight, newtree = _optimize(tree, small=True)
2413 2442 return newtree
2414 2443
2415 2444 # the set of valid characters for the initial letter of symbols in
2416 2445 # alias declarations and definitions
2417 2446 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2418 2447 if c.isalnum() or c in '._@$' or ord(c) > 127)
2419 2448
2420 2449 def _parsewith(spec, lookup=None, syminitletters=None):
2421 2450 """Generate a parse tree of given spec with given tokenizing options
2422 2451
2423 2452 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2424 2453 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2425 2454 >>> _parsewith('$1')
2426 2455 Traceback (most recent call last):
2427 2456 ...
2428 2457 ParseError: ("syntax error in revset '$1'", 0)
2429 2458 >>> _parsewith('foo bar')
2430 2459 Traceback (most recent call last):
2431 2460 ...
2432 2461 ParseError: ('invalid token', 4)
2433 2462 """
2434 2463 p = parser.parser(elements)
2435 2464 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2436 2465 syminitletters=syminitletters))
2437 2466 if pos != len(spec):
2438 2467 raise error.ParseError(_('invalid token'), pos)
2439 2468 return parser.simplifyinfixops(tree, ('list', 'or'))
2440 2469
2441 2470 class _aliasrules(parser.basealiasrules):
2442 2471 """Parsing and expansion rule set of revset aliases"""
2443 2472 _section = _('revset alias')
2444 2473
2445 2474 @staticmethod
2446 2475 def _parse(spec):
2447 2476 """Parse alias declaration/definition ``spec``
2448 2477
2449 2478 This allows symbol names to use also ``$`` as an initial letter
2450 2479 (for backward compatibility), and callers of this function should
2451 2480 examine whether ``$`` is used also for unexpected symbols or not.
2452 2481 """
2453 2482 return _parsewith(spec, syminitletters=_aliassyminitletters)
2454 2483
2455 2484 @staticmethod
2456 2485 def _trygetfunc(tree):
2457 2486 if tree[0] == 'func' and tree[1][0] == 'symbol':
2458 2487 return tree[1][1], getlist(tree[2])
2459 2488
2460 2489 def expandaliases(ui, tree, showwarning=None):
2461 2490 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2462 2491 tree = _aliasrules.expand(aliases, tree)
2463 2492 if showwarning:
2464 2493 # warn about problematic (but not referred) aliases
2465 2494 for name, alias in sorted(aliases.iteritems()):
2466 2495 if alias.error and not alias.warned:
2467 2496 showwarning(_('warning: %s\n') % (alias.error))
2468 2497 alias.warned = True
2469 2498 return tree
2470 2499
2471 2500 def foldconcat(tree):
2472 2501 """Fold elements to be concatenated by `##`
2473 2502 """
2474 2503 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2475 2504 return tree
2476 2505 if tree[0] == '_concat':
2477 2506 pending = [tree]
2478 2507 l = []
2479 2508 while pending:
2480 2509 e = pending.pop()
2481 2510 if e[0] == '_concat':
2482 2511 pending.extend(reversed(e[1:]))
2483 2512 elif e[0] in ('string', 'symbol'):
2484 2513 l.append(e[1])
2485 2514 else:
2486 2515 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2487 2516 raise error.ParseError(msg)
2488 2517 return ('string', ''.join(l))
2489 2518 else:
2490 2519 return tuple(foldconcat(t) for t in tree)
2491 2520
2492 2521 def parse(spec, lookup=None):
2493 2522 return _parsewith(spec, lookup=lookup)
2494 2523
2495 2524 def posttreebuilthook(tree, repo):
2496 2525 # hook for extensions to execute code on the optimized tree
2497 2526 pass
2498 2527
2499 2528 def match(ui, spec, repo=None):
2500 2529 if not spec:
2501 2530 raise error.ParseError(_("empty query"))
2502 2531 lookup = None
2503 2532 if repo:
2504 2533 lookup = repo.__contains__
2505 2534 tree = parse(spec, lookup)
2506 2535 return _makematcher(ui, tree, repo)
2507 2536
2508 2537 def matchany(ui, specs, repo=None):
2509 2538 """Create a matcher that will include any revisions matching one of the
2510 2539 given specs"""
2511 2540 if not specs:
2512 2541 def mfunc(repo, subset=None):
2513 2542 return baseset()
2514 2543 return mfunc
2515 2544 if not all(specs):
2516 2545 raise error.ParseError(_("empty query"))
2517 2546 lookup = None
2518 2547 if repo:
2519 2548 lookup = repo.__contains__
2520 2549 if len(specs) == 1:
2521 2550 tree = parse(specs[0], lookup)
2522 2551 else:
2523 2552 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2524 2553 return _makematcher(ui, tree, repo)
2525 2554
2526 2555 def _makematcher(ui, tree, repo):
2527 2556 if ui:
2528 2557 tree = expandaliases(ui, tree, showwarning=ui.warn)
2529 2558 tree = foldconcat(tree)
2530 2559 tree = optimize(tree)
2531 2560 posttreebuilthook(tree, repo)
2532 2561 def mfunc(repo, subset=None):
2533 2562 if subset is None:
2534 2563 subset = fullreposet(repo)
2535 2564 if util.safehasattr(subset, 'isascending'):
2536 2565 result = getset(repo, subset, tree)
2537 2566 else:
2538 2567 result = getset(repo, baseset(subset), tree)
2539 2568 return result
2540 2569 return mfunc
2541 2570
2542 2571 def formatspec(expr, *args):
2543 2572 '''
2544 2573 This is a convenience function for using revsets internally, and
2545 2574 escapes arguments appropriately. Aliases are intentionally ignored
2546 2575 so that intended expression behavior isn't accidentally subverted.
2547 2576
2548 2577 Supported arguments:
2549 2578
2550 2579 %r = revset expression, parenthesized
2551 2580 %d = int(arg), no quoting
2552 2581 %s = string(arg), escaped and single-quoted
2553 2582 %b = arg.branch(), escaped and single-quoted
2554 2583 %n = hex(arg), single-quoted
2555 2584 %% = a literal '%'
2556 2585
2557 2586 Prefixing the type with 'l' specifies a parenthesized list of that type.
2558 2587
2559 2588 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2560 2589 '(10 or 11):: and ((this()) or (that()))'
2561 2590 >>> formatspec('%d:: and not %d::', 10, 20)
2562 2591 '10:: and not 20::'
2563 2592 >>> formatspec('%ld or %ld', [], [1])
2564 2593 "_list('') or 1"
2565 2594 >>> formatspec('keyword(%s)', 'foo\\xe9')
2566 2595 "keyword('foo\\\\xe9')"
2567 2596 >>> b = lambda: 'default'
2568 2597 >>> b.branch = b
2569 2598 >>> formatspec('branch(%b)', b)
2570 2599 "branch('default')"
2571 2600 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2572 2601 "root(_list('a\\x00b\\x00c\\x00d'))"
2573 2602 '''
2574 2603
2575 2604 def quote(s):
2576 2605 return repr(str(s))
2577 2606
2578 2607 def argtype(c, arg):
2579 2608 if c == 'd':
2580 2609 return str(int(arg))
2581 2610 elif c == 's':
2582 2611 return quote(arg)
2583 2612 elif c == 'r':
2584 2613 parse(arg) # make sure syntax errors are confined
2585 2614 return '(%s)' % arg
2586 2615 elif c == 'n':
2587 2616 return quote(node.hex(arg))
2588 2617 elif c == 'b':
2589 2618 return quote(arg.branch())
2590 2619
2591 2620 def listexp(s, t):
2592 2621 l = len(s)
2593 2622 if l == 0:
2594 2623 return "_list('')"
2595 2624 elif l == 1:
2596 2625 return argtype(t, s[0])
2597 2626 elif t == 'd':
2598 2627 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2599 2628 elif t == 's':
2600 2629 return "_list('%s')" % "\0".join(s)
2601 2630 elif t == 'n':
2602 2631 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2603 2632 elif t == 'b':
2604 2633 return "_list('%s')" % "\0".join(a.branch() for a in s)
2605 2634
2606 2635 m = l // 2
2607 2636 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2608 2637
2609 2638 ret = ''
2610 2639 pos = 0
2611 2640 arg = 0
2612 2641 while pos < len(expr):
2613 2642 c = expr[pos]
2614 2643 if c == '%':
2615 2644 pos += 1
2616 2645 d = expr[pos]
2617 2646 if d == '%':
2618 2647 ret += d
2619 2648 elif d in 'dsnbr':
2620 2649 ret += argtype(d, args[arg])
2621 2650 arg += 1
2622 2651 elif d == 'l':
2623 2652 # a list of some type
2624 2653 pos += 1
2625 2654 d = expr[pos]
2626 2655 ret += listexp(list(args[arg]), d)
2627 2656 arg += 1
2628 2657 else:
2629 2658 raise error.Abort('unexpected revspec format character %s' % d)
2630 2659 else:
2631 2660 ret += c
2632 2661 pos += 1
2633 2662
2634 2663 return ret
2635 2664
2636 2665 def prettyformat(tree):
2637 2666 return parser.prettyformat(tree, ('string', 'symbol'))
2638 2667
2639 2668 def depth(tree):
2640 2669 if isinstance(tree, tuple):
2641 2670 return max(map(depth, tree)) + 1
2642 2671 else:
2643 2672 return 0
2644 2673
2645 2674 def funcsused(tree):
2646 2675 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2647 2676 return set()
2648 2677 else:
2649 2678 funcs = set()
2650 2679 for s in tree[1:]:
2651 2680 funcs |= funcsused(s)
2652 2681 if tree[0] == 'func':
2653 2682 funcs.add(tree[1][1])
2654 2683 return funcs
2655 2684
2656 2685 def _formatsetrepr(r):
2657 2686 """Format an optional printable representation of a set
2658 2687
2659 2688 ======== =================================
2660 2689 type(r) example
2661 2690 ======== =================================
2662 2691 tuple ('<not %r>', other)
2663 2692 str '<branch closed>'
2664 2693 callable lambda: '<branch %r>' % sorted(b)
2665 2694 object other
2666 2695 ======== =================================
2667 2696 """
2668 2697 if r is None:
2669 2698 return ''
2670 2699 elif isinstance(r, tuple):
2671 2700 return r[0] % r[1:]
2672 2701 elif isinstance(r, str):
2673 2702 return r
2674 2703 elif callable(r):
2675 2704 return r()
2676 2705 else:
2677 2706 return repr(r)
2678 2707
2679 2708 class abstractsmartset(object):
2680 2709
2681 2710 def __nonzero__(self):
2682 2711 """True if the smartset is not empty"""
2683 2712 raise NotImplementedError()
2684 2713
2685 2714 def __contains__(self, rev):
2686 2715 """provide fast membership testing"""
2687 2716 raise NotImplementedError()
2688 2717
2689 2718 def __iter__(self):
2690 2719 """iterate the set in the order it is supposed to be iterated"""
2691 2720 raise NotImplementedError()
2692 2721
2693 2722 # Attributes containing a function to perform a fast iteration in a given
2694 2723 # direction. A smartset can have none, one, or both defined.
2695 2724 #
2696 2725 # Default value is None instead of a function returning None to avoid
2697 2726 # initializing an iterator just for testing if a fast method exists.
2698 2727 fastasc = None
2699 2728 fastdesc = None
2700 2729
2701 2730 def isascending(self):
2702 2731 """True if the set will iterate in ascending order"""
2703 2732 raise NotImplementedError()
2704 2733
2705 2734 def isdescending(self):
2706 2735 """True if the set will iterate in descending order"""
2707 2736 raise NotImplementedError()
2708 2737
2709 2738 def istopo(self):
2710 2739 """True if the set will iterate in topographical order"""
2711 2740 raise NotImplementedError()
2712 2741
2713 2742 @util.cachefunc
2714 2743 def min(self):
2715 2744 """return the minimum element in the set"""
2716 2745 if self.fastasc is not None:
2717 2746 for r in self.fastasc():
2718 2747 return r
2719 2748 raise ValueError('arg is an empty sequence')
2720 2749 return min(self)
2721 2750
2722 2751 @util.cachefunc
2723 2752 def max(self):
2724 2753 """return the maximum element in the set"""
2725 2754 if self.fastdesc is not None:
2726 2755 for r in self.fastdesc():
2727 2756 return r
2728 2757 raise ValueError('arg is an empty sequence')
2729 2758 return max(self)
2730 2759
2731 2760 def first(self):
2732 2761 """return the first element in the set (user iteration perspective)
2733 2762
2734 2763 Return None if the set is empty"""
2735 2764 raise NotImplementedError()
2736 2765
2737 2766 def last(self):
2738 2767 """return the last element in the set (user iteration perspective)
2739 2768
2740 2769 Return None if the set is empty"""
2741 2770 raise NotImplementedError()
2742 2771
2743 2772 def __len__(self):
2744 2773 """return the length of the smartsets
2745 2774
2746 2775 This can be expensive on smartset that could be lazy otherwise."""
2747 2776 raise NotImplementedError()
2748 2777
2749 2778 def reverse(self):
2750 2779 """reverse the expected iteration order"""
2751 2780 raise NotImplementedError()
2752 2781
2753 2782 def sort(self, reverse=True):
2754 2783 """get the set to iterate in an ascending or descending order"""
2755 2784 raise NotImplementedError()
2756 2785
2757 2786 def __and__(self, other):
2758 2787 """Returns a new object with the intersection of the two collections.
2759 2788
2760 2789 This is part of the mandatory API for smartset."""
2761 2790 if isinstance(other, fullreposet):
2762 2791 return self
2763 2792 return self.filter(other.__contains__, condrepr=other, cache=False)
2764 2793
2765 2794 def __add__(self, other):
2766 2795 """Returns a new object with the union of the two collections.
2767 2796
2768 2797 This is part of the mandatory API for smartset."""
2769 2798 return addset(self, other)
2770 2799
2771 2800 def __sub__(self, other):
2772 2801 """Returns a new object with the substraction of the two collections.
2773 2802
2774 2803 This is part of the mandatory API for smartset."""
2775 2804 c = other.__contains__
2776 2805 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2777 2806 cache=False)
2778 2807
2779 2808 def filter(self, condition, condrepr=None, cache=True):
2780 2809 """Returns this smartset filtered by condition as a new smartset.
2781 2810
2782 2811 `condition` is a callable which takes a revision number and returns a
2783 2812 boolean. Optional `condrepr` provides a printable representation of
2784 2813 the given `condition`.
2785 2814
2786 2815 This is part of the mandatory API for smartset."""
2787 2816 # builtin cannot be cached. but do not needs to
2788 2817 if cache and util.safehasattr(condition, 'func_code'):
2789 2818 condition = util.cachefunc(condition)
2790 2819 return filteredset(self, condition, condrepr)
2791 2820
2792 2821 class baseset(abstractsmartset):
2793 2822 """Basic data structure that represents a revset and contains the basic
2794 2823 operation that it should be able to perform.
2795 2824
2796 2825 Every method in this class should be implemented by any smartset class.
2797 2826 """
2798 2827 def __init__(self, data=(), datarepr=None, istopo=False):
2799 2828 """
2800 2829 datarepr: a tuple of (format, obj, ...), a function or an object that
2801 2830 provides a printable representation of the given data.
2802 2831 """
2803 2832 self._ascending = None
2804 2833 self._istopo = istopo
2805 2834 if not isinstance(data, list):
2806 2835 if isinstance(data, set):
2807 2836 self._set = data
2808 2837 # set has no order we pick one for stability purpose
2809 2838 self._ascending = True
2810 2839 data = list(data)
2811 2840 self._list = data
2812 2841 self._datarepr = datarepr
2813 2842
2814 2843 @util.propertycache
2815 2844 def _set(self):
2816 2845 return set(self._list)
2817 2846
2818 2847 @util.propertycache
2819 2848 def _asclist(self):
2820 2849 asclist = self._list[:]
2821 2850 asclist.sort()
2822 2851 return asclist
2823 2852
2824 2853 def __iter__(self):
2825 2854 if self._ascending is None:
2826 2855 return iter(self._list)
2827 2856 elif self._ascending:
2828 2857 return iter(self._asclist)
2829 2858 else:
2830 2859 return reversed(self._asclist)
2831 2860
2832 2861 def fastasc(self):
2833 2862 return iter(self._asclist)
2834 2863
2835 2864 def fastdesc(self):
2836 2865 return reversed(self._asclist)
2837 2866
2838 2867 @util.propertycache
2839 2868 def __contains__(self):
2840 2869 return self._set.__contains__
2841 2870
2842 2871 def __nonzero__(self):
2843 2872 return bool(self._list)
2844 2873
2845 2874 def sort(self, reverse=False):
2846 2875 self._ascending = not bool(reverse)
2847 2876 self._istopo = False
2848 2877
2849 2878 def reverse(self):
2850 2879 if self._ascending is None:
2851 2880 self._list.reverse()
2852 2881 else:
2853 2882 self._ascending = not self._ascending
2854 2883 self._istopo = False
2855 2884
2856 2885 def __len__(self):
2857 2886 return len(self._list)
2858 2887
2859 2888 def isascending(self):
2860 2889 """Returns True if the collection is ascending order, False if not.
2861 2890
2862 2891 This is part of the mandatory API for smartset."""
2863 2892 if len(self) <= 1:
2864 2893 return True
2865 2894 return self._ascending is not None and self._ascending
2866 2895
2867 2896 def isdescending(self):
2868 2897 """Returns True if the collection is descending order, False if not.
2869 2898
2870 2899 This is part of the mandatory API for smartset."""
2871 2900 if len(self) <= 1:
2872 2901 return True
2873 2902 return self._ascending is not None and not self._ascending
2874 2903
2875 2904 def istopo(self):
2876 2905 """Is the collection is in topographical order or not.
2877 2906
2878 2907 This is part of the mandatory API for smartset."""
2879 2908 if len(self) <= 1:
2880 2909 return True
2881 2910 return self._istopo
2882 2911
2883 2912 def first(self):
2884 2913 if self:
2885 2914 if self._ascending is None:
2886 2915 return self._list[0]
2887 2916 elif self._ascending:
2888 2917 return self._asclist[0]
2889 2918 else:
2890 2919 return self._asclist[-1]
2891 2920 return None
2892 2921
2893 2922 def last(self):
2894 2923 if self:
2895 2924 if self._ascending is None:
2896 2925 return self._list[-1]
2897 2926 elif self._ascending:
2898 2927 return self._asclist[-1]
2899 2928 else:
2900 2929 return self._asclist[0]
2901 2930 return None
2902 2931
2903 2932 def __repr__(self):
2904 2933 d = {None: '', False: '-', True: '+'}[self._ascending]
2905 2934 s = _formatsetrepr(self._datarepr)
2906 2935 if not s:
2907 2936 l = self._list
2908 2937 # if _list has been built from a set, it might have a different
2909 2938 # order from one python implementation to another.
2910 2939 # We fallback to the sorted version for a stable output.
2911 2940 if self._ascending is not None:
2912 2941 l = self._asclist
2913 2942 s = repr(l)
2914 2943 return '<%s%s %s>' % (type(self).__name__, d, s)
2915 2944
2916 2945 class filteredset(abstractsmartset):
2917 2946 """Duck type for baseset class which iterates lazily over the revisions in
2918 2947 the subset and contains a function which tests for membership in the
2919 2948 revset
2920 2949 """
2921 2950 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2922 2951 """
2923 2952 condition: a function that decide whether a revision in the subset
2924 2953 belongs to the revset or not.
2925 2954 condrepr: a tuple of (format, obj, ...), a function or an object that
2926 2955 provides a printable representation of the given condition.
2927 2956 """
2928 2957 self._subset = subset
2929 2958 self._condition = condition
2930 2959 self._condrepr = condrepr
2931 2960
2932 2961 def __contains__(self, x):
2933 2962 return x in self._subset and self._condition(x)
2934 2963
2935 2964 def __iter__(self):
2936 2965 return self._iterfilter(self._subset)
2937 2966
2938 2967 def _iterfilter(self, it):
2939 2968 cond = self._condition
2940 2969 for x in it:
2941 2970 if cond(x):
2942 2971 yield x
2943 2972
2944 2973 @property
2945 2974 def fastasc(self):
2946 2975 it = self._subset.fastasc
2947 2976 if it is None:
2948 2977 return None
2949 2978 return lambda: self._iterfilter(it())
2950 2979
2951 2980 @property
2952 2981 def fastdesc(self):
2953 2982 it = self._subset.fastdesc
2954 2983 if it is None:
2955 2984 return None
2956 2985 return lambda: self._iterfilter(it())
2957 2986
2958 2987 def __nonzero__(self):
2959 2988 fast = None
2960 2989 candidates = [self.fastasc if self.isascending() else None,
2961 2990 self.fastdesc if self.isdescending() else None,
2962 2991 self.fastasc,
2963 2992 self.fastdesc]
2964 2993 for candidate in candidates:
2965 2994 if candidate is not None:
2966 2995 fast = candidate
2967 2996 break
2968 2997
2969 2998 if fast is not None:
2970 2999 it = fast()
2971 3000 else:
2972 3001 it = self
2973 3002
2974 3003 for r in it:
2975 3004 return True
2976 3005 return False
2977 3006
2978 3007 def __len__(self):
2979 3008 # Basic implementation to be changed in future patches.
2980 3009 # until this gets improved, we use generator expression
2981 3010 # here, since list compr is free to call __len__ again
2982 3011 # causing infinite recursion
2983 3012 l = baseset(r for r in self)
2984 3013 return len(l)
2985 3014
2986 3015 def sort(self, reverse=False):
2987 3016 self._subset.sort(reverse=reverse)
2988 3017
2989 3018 def reverse(self):
2990 3019 self._subset.reverse()
2991 3020
2992 3021 def isascending(self):
2993 3022 return self._subset.isascending()
2994 3023
2995 3024 def isdescending(self):
2996 3025 return self._subset.isdescending()
2997 3026
2998 3027 def istopo(self):
2999 3028 return self._subset.istopo()
3000 3029
3001 3030 def first(self):
3002 3031 for x in self:
3003 3032 return x
3004 3033 return None
3005 3034
3006 3035 def last(self):
3007 3036 it = None
3008 3037 if self.isascending():
3009 3038 it = self.fastdesc
3010 3039 elif self.isdescending():
3011 3040 it = self.fastasc
3012 3041 if it is not None:
3013 3042 for x in it():
3014 3043 return x
3015 3044 return None #empty case
3016 3045 else:
3017 3046 x = None
3018 3047 for x in self:
3019 3048 pass
3020 3049 return x
3021 3050
3022 3051 def __repr__(self):
3023 3052 xs = [repr(self._subset)]
3024 3053 s = _formatsetrepr(self._condrepr)
3025 3054 if s:
3026 3055 xs.append(s)
3027 3056 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3028 3057
3029 3058 def _iterordered(ascending, iter1, iter2):
3030 3059 """produce an ordered iteration from two iterators with the same order
3031 3060
3032 3061 The ascending is used to indicated the iteration direction.
3033 3062 """
3034 3063 choice = max
3035 3064 if ascending:
3036 3065 choice = min
3037 3066
3038 3067 val1 = None
3039 3068 val2 = None
3040 3069 try:
3041 3070 # Consume both iterators in an ordered way until one is empty
3042 3071 while True:
3043 3072 if val1 is None:
3044 3073 val1 = next(iter1)
3045 3074 if val2 is None:
3046 3075 val2 = next(iter2)
3047 3076 n = choice(val1, val2)
3048 3077 yield n
3049 3078 if val1 == n:
3050 3079 val1 = None
3051 3080 if val2 == n:
3052 3081 val2 = None
3053 3082 except StopIteration:
3054 3083 # Flush any remaining values and consume the other one
3055 3084 it = iter2
3056 3085 if val1 is not None:
3057 3086 yield val1
3058 3087 it = iter1
3059 3088 elif val2 is not None:
3060 3089 # might have been equality and both are empty
3061 3090 yield val2
3062 3091 for val in it:
3063 3092 yield val
3064 3093
3065 3094 class addset(abstractsmartset):
3066 3095 """Represent the addition of two sets
3067 3096
3068 3097 Wrapper structure for lazily adding two structures without losing much
3069 3098 performance on the __contains__ method
3070 3099
3071 3100 If the ascending attribute is set, that means the two structures are
3072 3101 ordered in either an ascending or descending way. Therefore, we can add
3073 3102 them maintaining the order by iterating over both at the same time
3074 3103
3075 3104 >>> xs = baseset([0, 3, 2])
3076 3105 >>> ys = baseset([5, 2, 4])
3077 3106
3078 3107 >>> rs = addset(xs, ys)
3079 3108 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3080 3109 (True, True, False, True, 0, 4)
3081 3110 >>> rs = addset(xs, baseset([]))
3082 3111 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3083 3112 (True, True, False, 0, 2)
3084 3113 >>> rs = addset(baseset([]), baseset([]))
3085 3114 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3086 3115 (False, False, None, None)
3087 3116
3088 3117 iterate unsorted:
3089 3118 >>> rs = addset(xs, ys)
3090 3119 >>> # (use generator because pypy could call len())
3091 3120 >>> list(x for x in rs) # without _genlist
3092 3121 [0, 3, 2, 5, 4]
3093 3122 >>> assert not rs._genlist
3094 3123 >>> len(rs)
3095 3124 5
3096 3125 >>> [x for x in rs] # with _genlist
3097 3126 [0, 3, 2, 5, 4]
3098 3127 >>> assert rs._genlist
3099 3128
3100 3129 iterate ascending:
3101 3130 >>> rs = addset(xs, ys, ascending=True)
3102 3131 >>> # (use generator because pypy could call len())
3103 3132 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3104 3133 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3105 3134 >>> assert not rs._asclist
3106 3135 >>> len(rs)
3107 3136 5
3108 3137 >>> [x for x in rs], [x for x in rs.fastasc()]
3109 3138 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3110 3139 >>> assert rs._asclist
3111 3140
3112 3141 iterate descending:
3113 3142 >>> rs = addset(xs, ys, ascending=False)
3114 3143 >>> # (use generator because pypy could call len())
3115 3144 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3116 3145 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3117 3146 >>> assert not rs._asclist
3118 3147 >>> len(rs)
3119 3148 5
3120 3149 >>> [x for x in rs], [x for x in rs.fastdesc()]
3121 3150 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3122 3151 >>> assert rs._asclist
3123 3152
3124 3153 iterate ascending without fastasc:
3125 3154 >>> rs = addset(xs, generatorset(ys), ascending=True)
3126 3155 >>> assert rs.fastasc is None
3127 3156 >>> [x for x in rs]
3128 3157 [0, 2, 3, 4, 5]
3129 3158
3130 3159 iterate descending without fastdesc:
3131 3160 >>> rs = addset(generatorset(xs), ys, ascending=False)
3132 3161 >>> assert rs.fastdesc is None
3133 3162 >>> [x for x in rs]
3134 3163 [5, 4, 3, 2, 0]
3135 3164 """
3136 3165 def __init__(self, revs1, revs2, ascending=None):
3137 3166 self._r1 = revs1
3138 3167 self._r2 = revs2
3139 3168 self._iter = None
3140 3169 self._ascending = ascending
3141 3170 self._genlist = None
3142 3171 self._asclist = None
3143 3172
3144 3173 def __len__(self):
3145 3174 return len(self._list)
3146 3175
3147 3176 def __nonzero__(self):
3148 3177 return bool(self._r1) or bool(self._r2)
3149 3178
3150 3179 @util.propertycache
3151 3180 def _list(self):
3152 3181 if not self._genlist:
3153 3182 self._genlist = baseset(iter(self))
3154 3183 return self._genlist
3155 3184
3156 3185 def __iter__(self):
3157 3186 """Iterate over both collections without repeating elements
3158 3187
3159 3188 If the ascending attribute is not set, iterate over the first one and
3160 3189 then over the second one checking for membership on the first one so we
3161 3190 dont yield any duplicates.
3162 3191
3163 3192 If the ascending attribute is set, iterate over both collections at the
3164 3193 same time, yielding only one value at a time in the given order.
3165 3194 """
3166 3195 if self._ascending is None:
3167 3196 if self._genlist:
3168 3197 return iter(self._genlist)
3169 3198 def arbitraryordergen():
3170 3199 for r in self._r1:
3171 3200 yield r
3172 3201 inr1 = self._r1.__contains__
3173 3202 for r in self._r2:
3174 3203 if not inr1(r):
3175 3204 yield r
3176 3205 return arbitraryordergen()
3177 3206 # try to use our own fast iterator if it exists
3178 3207 self._trysetasclist()
3179 3208 if self._ascending:
3180 3209 attr = 'fastasc'
3181 3210 else:
3182 3211 attr = 'fastdesc'
3183 3212 it = getattr(self, attr)
3184 3213 if it is not None:
3185 3214 return it()
3186 3215 # maybe half of the component supports fast
3187 3216 # get iterator for _r1
3188 3217 iter1 = getattr(self._r1, attr)
3189 3218 if iter1 is None:
3190 3219 # let's avoid side effect (not sure it matters)
3191 3220 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3192 3221 else:
3193 3222 iter1 = iter1()
3194 3223 # get iterator for _r2
3195 3224 iter2 = getattr(self._r2, attr)
3196 3225 if iter2 is None:
3197 3226 # let's avoid side effect (not sure it matters)
3198 3227 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3199 3228 else:
3200 3229 iter2 = iter2()
3201 3230 return _iterordered(self._ascending, iter1, iter2)
3202 3231
3203 3232 def _trysetasclist(self):
3204 3233 """populate the _asclist attribute if possible and necessary"""
3205 3234 if self._genlist is not None and self._asclist is None:
3206 3235 self._asclist = sorted(self._genlist)
3207 3236
3208 3237 @property
3209 3238 def fastasc(self):
3210 3239 self._trysetasclist()
3211 3240 if self._asclist is not None:
3212 3241 return self._asclist.__iter__
3213 3242 iter1 = self._r1.fastasc
3214 3243 iter2 = self._r2.fastasc
3215 3244 if None in (iter1, iter2):
3216 3245 return None
3217 3246 return lambda: _iterordered(True, iter1(), iter2())
3218 3247
3219 3248 @property
3220 3249 def fastdesc(self):
3221 3250 self._trysetasclist()
3222 3251 if self._asclist is not None:
3223 3252 return self._asclist.__reversed__
3224 3253 iter1 = self._r1.fastdesc
3225 3254 iter2 = self._r2.fastdesc
3226 3255 if None in (iter1, iter2):
3227 3256 return None
3228 3257 return lambda: _iterordered(False, iter1(), iter2())
3229 3258
3230 3259 def __contains__(self, x):
3231 3260 return x in self._r1 or x in self._r2
3232 3261
3233 3262 def sort(self, reverse=False):
3234 3263 """Sort the added set
3235 3264
3236 3265 For this we use the cached list with all the generated values and if we
3237 3266 know they are ascending or descending we can sort them in a smart way.
3238 3267 """
3239 3268 self._ascending = not reverse
3240 3269
3241 3270 def isascending(self):
3242 3271 return self._ascending is not None and self._ascending
3243 3272
3244 3273 def isdescending(self):
3245 3274 return self._ascending is not None and not self._ascending
3246 3275
3247 3276 def istopo(self):
3248 3277 # not worth the trouble asserting if the two sets combined are still
3249 3278 # in topographical order. Use the sort() predicate to explicitly sort
3250 3279 # again instead.
3251 3280 return False
3252 3281
3253 3282 def reverse(self):
3254 3283 if self._ascending is None:
3255 3284 self._list.reverse()
3256 3285 else:
3257 3286 self._ascending = not self._ascending
3258 3287
3259 3288 def first(self):
3260 3289 for x in self:
3261 3290 return x
3262 3291 return None
3263 3292
3264 3293 def last(self):
3265 3294 self.reverse()
3266 3295 val = self.first()
3267 3296 self.reverse()
3268 3297 return val
3269 3298
3270 3299 def __repr__(self):
3271 3300 d = {None: '', False: '-', True: '+'}[self._ascending]
3272 3301 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3273 3302
3274 3303 class generatorset(abstractsmartset):
3275 3304 """Wrap a generator for lazy iteration
3276 3305
3277 3306 Wrapper structure for generators that provides lazy membership and can
3278 3307 be iterated more than once.
3279 3308 When asked for membership it generates values until either it finds the
3280 3309 requested one or has gone through all the elements in the generator
3281 3310 """
3282 3311 def __init__(self, gen, iterasc=None):
3283 3312 """
3284 3313 gen: a generator producing the values for the generatorset.
3285 3314 """
3286 3315 self._gen = gen
3287 3316 self._asclist = None
3288 3317 self._cache = {}
3289 3318 self._genlist = []
3290 3319 self._finished = False
3291 3320 self._ascending = True
3292 3321 if iterasc is not None:
3293 3322 if iterasc:
3294 3323 self.fastasc = self._iterator
3295 3324 self.__contains__ = self._asccontains
3296 3325 else:
3297 3326 self.fastdesc = self._iterator
3298 3327 self.__contains__ = self._desccontains
3299 3328
3300 3329 def __nonzero__(self):
3301 3330 # Do not use 'for r in self' because it will enforce the iteration
3302 3331 # order (default ascending), possibly unrolling a whole descending
3303 3332 # iterator.
3304 3333 if self._genlist:
3305 3334 return True
3306 3335 for r in self._consumegen():
3307 3336 return True
3308 3337 return False
3309 3338
3310 3339 def __contains__(self, x):
3311 3340 if x in self._cache:
3312 3341 return self._cache[x]
3313 3342
3314 3343 # Use new values only, as existing values would be cached.
3315 3344 for l in self._consumegen():
3316 3345 if l == x:
3317 3346 return True
3318 3347
3319 3348 self._cache[x] = False
3320 3349 return False
3321 3350
3322 3351 def _asccontains(self, x):
3323 3352 """version of contains optimised for ascending generator"""
3324 3353 if x in self._cache:
3325 3354 return self._cache[x]
3326 3355
3327 3356 # Use new values only, as existing values would be cached.
3328 3357 for l in self._consumegen():
3329 3358 if l == x:
3330 3359 return True
3331 3360 if l > x:
3332 3361 break
3333 3362
3334 3363 self._cache[x] = False
3335 3364 return False
3336 3365
3337 3366 def _desccontains(self, x):
3338 3367 """version of contains optimised for descending generator"""
3339 3368 if x in self._cache:
3340 3369 return self._cache[x]
3341 3370
3342 3371 # Use new values only, as existing values would be cached.
3343 3372 for l in self._consumegen():
3344 3373 if l == x:
3345 3374 return True
3346 3375 if l < x:
3347 3376 break
3348 3377
3349 3378 self._cache[x] = False
3350 3379 return False
3351 3380
3352 3381 def __iter__(self):
3353 3382 if self._ascending:
3354 3383 it = self.fastasc
3355 3384 else:
3356 3385 it = self.fastdesc
3357 3386 if it is not None:
3358 3387 return it()
3359 3388 # we need to consume the iterator
3360 3389 for x in self._consumegen():
3361 3390 pass
3362 3391 # recall the same code
3363 3392 return iter(self)
3364 3393
3365 3394 def _iterator(self):
3366 3395 if self._finished:
3367 3396 return iter(self._genlist)
3368 3397
3369 3398 # We have to use this complex iteration strategy to allow multiple
3370 3399 # iterations at the same time. We need to be able to catch revision
3371 3400 # removed from _consumegen and added to genlist in another instance.
3372 3401 #
3373 3402 # Getting rid of it would provide an about 15% speed up on this
3374 3403 # iteration.
3375 3404 genlist = self._genlist
3376 3405 nextrev = self._consumegen().next
3377 3406 _len = len # cache global lookup
3378 3407 def gen():
3379 3408 i = 0
3380 3409 while True:
3381 3410 if i < _len(genlist):
3382 3411 yield genlist[i]
3383 3412 else:
3384 3413 yield nextrev()
3385 3414 i += 1
3386 3415 return gen()
3387 3416
3388 3417 def _consumegen(self):
3389 3418 cache = self._cache
3390 3419 genlist = self._genlist.append
3391 3420 for item in self._gen:
3392 3421 cache[item] = True
3393 3422 genlist(item)
3394 3423 yield item
3395 3424 if not self._finished:
3396 3425 self._finished = True
3397 3426 asc = self._genlist[:]
3398 3427 asc.sort()
3399 3428 self._asclist = asc
3400 3429 self.fastasc = asc.__iter__
3401 3430 self.fastdesc = asc.__reversed__
3402 3431
3403 3432 def __len__(self):
3404 3433 for x in self._consumegen():
3405 3434 pass
3406 3435 return len(self._genlist)
3407 3436
3408 3437 def sort(self, reverse=False):
3409 3438 self._ascending = not reverse
3410 3439
3411 3440 def reverse(self):
3412 3441 self._ascending = not self._ascending
3413 3442
3414 3443 def isascending(self):
3415 3444 return self._ascending
3416 3445
3417 3446 def isdescending(self):
3418 3447 return not self._ascending
3419 3448
3420 3449 def istopo(self):
3421 3450 # not worth the trouble asserting if the two sets combined are still
3422 3451 # in topographical order. Use the sort() predicate to explicitly sort
3423 3452 # again instead.
3424 3453 return False
3425 3454
3426 3455 def first(self):
3427 3456 if self._ascending:
3428 3457 it = self.fastasc
3429 3458 else:
3430 3459 it = self.fastdesc
3431 3460 if it is None:
3432 3461 # we need to consume all and try again
3433 3462 for x in self._consumegen():
3434 3463 pass
3435 3464 return self.first()
3436 3465 return next(it(), None)
3437 3466
3438 3467 def last(self):
3439 3468 if self._ascending:
3440 3469 it = self.fastdesc
3441 3470 else:
3442 3471 it = self.fastasc
3443 3472 if it is None:
3444 3473 # we need to consume all and try again
3445 3474 for x in self._consumegen():
3446 3475 pass
3447 3476 return self.first()
3448 3477 return next(it(), None)
3449 3478
3450 3479 def __repr__(self):
3451 3480 d = {False: '-', True: '+'}[self._ascending]
3452 3481 return '<%s%s>' % (type(self).__name__, d)
3453 3482
3454 3483 class spanset(abstractsmartset):
3455 3484 """Duck type for baseset class which represents a range of revisions and
3456 3485 can work lazily and without having all the range in memory
3457 3486
3458 3487 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3459 3488 notable points:
3460 3489 - when x < y it will be automatically descending,
3461 3490 - revision filtered with this repoview will be skipped.
3462 3491
3463 3492 """
3464 3493 def __init__(self, repo, start=0, end=None):
3465 3494 """
3466 3495 start: first revision included the set
3467 3496 (default to 0)
3468 3497 end: first revision excluded (last+1)
3469 3498 (default to len(repo)
3470 3499
3471 3500 Spanset will be descending if `end` < `start`.
3472 3501 """
3473 3502 if end is None:
3474 3503 end = len(repo)
3475 3504 self._ascending = start <= end
3476 3505 if not self._ascending:
3477 3506 start, end = end + 1, start +1
3478 3507 self._start = start
3479 3508 self._end = end
3480 3509 self._hiddenrevs = repo.changelog.filteredrevs
3481 3510
3482 3511 def sort(self, reverse=False):
3483 3512 self._ascending = not reverse
3484 3513
3485 3514 def reverse(self):
3486 3515 self._ascending = not self._ascending
3487 3516
3488 3517 def istopo(self):
3489 3518 # not worth the trouble asserting if the two sets combined are still
3490 3519 # in topographical order. Use the sort() predicate to explicitly sort
3491 3520 # again instead.
3492 3521 return False
3493 3522
3494 3523 def _iterfilter(self, iterrange):
3495 3524 s = self._hiddenrevs
3496 3525 for r in iterrange:
3497 3526 if r not in s:
3498 3527 yield r
3499 3528
3500 3529 def __iter__(self):
3501 3530 if self._ascending:
3502 3531 return self.fastasc()
3503 3532 else:
3504 3533 return self.fastdesc()
3505 3534
3506 3535 def fastasc(self):
3507 3536 iterrange = xrange(self._start, self._end)
3508 3537 if self._hiddenrevs:
3509 3538 return self._iterfilter(iterrange)
3510 3539 return iter(iterrange)
3511 3540
3512 3541 def fastdesc(self):
3513 3542 iterrange = xrange(self._end - 1, self._start - 1, -1)
3514 3543 if self._hiddenrevs:
3515 3544 return self._iterfilter(iterrange)
3516 3545 return iter(iterrange)
3517 3546
3518 3547 def __contains__(self, rev):
3519 3548 hidden = self._hiddenrevs
3520 3549 return ((self._start <= rev < self._end)
3521 3550 and not (hidden and rev in hidden))
3522 3551
3523 3552 def __nonzero__(self):
3524 3553 for r in self:
3525 3554 return True
3526 3555 return False
3527 3556
3528 3557 def __len__(self):
3529 3558 if not self._hiddenrevs:
3530 3559 return abs(self._end - self._start)
3531 3560 else:
3532 3561 count = 0
3533 3562 start = self._start
3534 3563 end = self._end
3535 3564 for rev in self._hiddenrevs:
3536 3565 if (end < rev <= start) or (start <= rev < end):
3537 3566 count += 1
3538 3567 return abs(self._end - self._start) - count
3539 3568
3540 3569 def isascending(self):
3541 3570 return self._ascending
3542 3571
3543 3572 def isdescending(self):
3544 3573 return not self._ascending
3545 3574
3546 3575 def first(self):
3547 3576 if self._ascending:
3548 3577 it = self.fastasc
3549 3578 else:
3550 3579 it = self.fastdesc
3551 3580 for x in it():
3552 3581 return x
3553 3582 return None
3554 3583
3555 3584 def last(self):
3556 3585 if self._ascending:
3557 3586 it = self.fastdesc
3558 3587 else:
3559 3588 it = self.fastasc
3560 3589 for x in it():
3561 3590 return x
3562 3591 return None
3563 3592
3564 3593 def __repr__(self):
3565 3594 d = {False: '-', True: '+'}[self._ascending]
3566 3595 return '<%s%s %d:%d>' % (type(self).__name__, d,
3567 3596 self._start, self._end - 1)
3568 3597
3569 3598 class fullreposet(spanset):
3570 3599 """a set containing all revisions in the repo
3571 3600
3572 3601 This class exists to host special optimization and magic to handle virtual
3573 3602 revisions such as "null".
3574 3603 """
3575 3604
3576 3605 def __init__(self, repo):
3577 3606 super(fullreposet, self).__init__(repo)
3578 3607
3579 3608 def __and__(self, other):
3580 3609 """As self contains the whole repo, all of the other set should also be
3581 3610 in self. Therefore `self & other = other`.
3582 3611
3583 3612 This boldly assumes the other contains valid revs only.
3584 3613 """
3585 3614 # other not a smartset, make is so
3586 3615 if not util.safehasattr(other, 'isascending'):
3587 3616 # filter out hidden revision
3588 3617 # (this boldly assumes all smartset are pure)
3589 3618 #
3590 3619 # `other` was used with "&", let's assume this is a set like
3591 3620 # object.
3592 3621 other = baseset(other - self._hiddenrevs)
3593 3622
3594 3623 # XXX As fullreposet is also used as bootstrap, this is wrong.
3595 3624 #
3596 3625 # With a giveme312() revset returning [3,1,2], this makes
3597 3626 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3598 3627 # We cannot just drop it because other usage still need to sort it:
3599 3628 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3600 3629 #
3601 3630 # There is also some faulty revset implementations that rely on it
3602 3631 # (eg: children as of its state in e8075329c5fb)
3603 3632 #
3604 3633 # When we fix the two points above we can move this into the if clause
3605 3634 other.sort(reverse=self.isdescending())
3606 3635 return other
3607 3636
3608 3637 def prettyformatset(revs):
3609 3638 lines = []
3610 3639 rs = repr(revs)
3611 3640 p = 0
3612 3641 while p < len(rs):
3613 3642 q = rs.find('<', p + 1)
3614 3643 if q < 0:
3615 3644 q = len(rs)
3616 3645 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3617 3646 assert l >= 0
3618 3647 lines.append((l, rs[p:q].rstrip()))
3619 3648 p = q
3620 3649 return '\n'.join(' ' * l + s for l, s in lines)
3621 3650
3622 3651 def loadpredicate(ui, extname, registrarobj):
3623 3652 """Load revset predicates from specified registrarobj
3624 3653 """
3625 3654 for name, func in registrarobj._table.iteritems():
3626 3655 symbols[name] = func
3627 3656 if func._safe:
3628 3657 safesymbols.add(name)
3629 3658
3630 3659 # load built-in predicates explicitly to setup safesymbols
3631 3660 loadpredicate(None, None, predicate)
3632 3661
3633 3662 # tell hggettext to extract docstrings from these functions:
3634 3663 i18nfunctions = symbols.values()
@@ -1,101 +1,101
1 1 This test file aims at test topological iteration and the various configuration it can has.
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [ui]
5 5 > logtemplate={rev}\n
6 6 > EOF
7 7
8 8 On this simple example, all topological branch are displayed in turn until we
9 9 can finally display 0. this implies skipping from 8 to 3 and coming back to 7
10 10 later.
11 11
12 12 $ hg init test01
13 13 $ cd test01
14 14 $ hg unbundle $TESTDIR/bundles/remote.hg
15 15 adding changesets
16 16 adding manifests
17 17 adding file changes
18 18 added 9 changesets with 7 changes to 4 files (+1 heads)
19 19 (run 'hg heads' to see heads, 'hg merge' to merge)
20 20
21 21 $ hg log -G
22 22 o 8
23 23 |
24 24 | o 7
25 25 | |
26 26 | o 6
27 27 | |
28 28 | o 5
29 29 | |
30 30 | o 4
31 31 | |
32 32 o | 3
33 33 | |
34 34 o | 2
35 35 | |
36 36 o | 1
37 37 |/
38 38 o 0
39 39
40 40
41 41 (display all nodes)
42 42
43 $ hg --config experimental.graph-group-branches=1 log -G
43 $ hg log -G -r 'sort(all(), topo)'
44 44 o 8
45 45 |
46 46 o 3
47 47 |
48 48 o 2
49 49 |
50 50 o 1
51 51 |
52 52 | o 7
53 53 | |
54 54 | o 6
55 55 | |
56 56 | o 5
57 57 | |
58 58 | o 4
59 59 |/
60 60 o 0
61 61
62 62
63 63 (revset skipping nodes)
64 64
65 $ hg --config experimental.graph-group-branches=1 log -G --rev 'not (2+6)'
65 $ hg log -G --rev 'sort(not (2+6), topo)'
66 66 o 8
67 67 |
68 68 o 3
69 69 :
70 70 o 1
71 71 |
72 72 | o 7
73 73 | :
74 74 | o 5
75 75 | |
76 76 | o 4
77 77 |/
78 78 o 0
79 79
80 80
81 81 (begin) from the other branch
82 82
83 $ hg --config experimental.graph-group-branches=1 --config experimental.graph-group-branches.firstbranch=5 log -G
83 $ hg log -G -r 'sort(all(), topo, topo.firstbranch=5)'
84 84 o 7
85 85 |
86 86 o 6
87 87 |
88 88 o 5
89 89 |
90 90 o 4
91 91 |
92 92 | o 8
93 93 | |
94 94 | o 3
95 95 | |
96 96 | o 2
97 97 | |
98 98 | o 1
99 99 |/
100 100 o 0
101 101
@@ -1,2499 +1,2560
1 1 $ HGENCODING=utf-8
2 2 $ export HGENCODING
3 3 $ cat > testrevset.py << EOF
4 4 > import mercurial.revset
5 5 >
6 6 > baseset = mercurial.revset.baseset
7 7 >
8 8 > def r3232(repo, subset, x):
9 9 > """"simple revset that return [3,2,3,2]
10 10 >
11 11 > revisions duplicated on purpose.
12 12 > """
13 13 > if 3 not in subset:
14 14 > if 2 in subset:
15 15 > return baseset([2,2])
16 16 > return baseset()
17 17 > return baseset([3,3,2,2])
18 18 >
19 19 > mercurial.revset.symbols['r3232'] = r3232
20 20 > EOF
21 21 $ cat >> $HGRCPATH << EOF
22 22 > [extensions]
23 23 > testrevset=$TESTTMP/testrevset.py
24 24 > EOF
25 25
26 26 $ try() {
27 27 > hg debugrevspec --debug "$@"
28 28 > }
29 29
30 30 $ log() {
31 31 > hg log --template '{rev}\n' -r "$1"
32 32 > }
33 33
34 34 $ hg init repo
35 35 $ cd repo
36 36
37 37 $ echo a > a
38 38 $ hg branch a
39 39 marked working directory as branch a
40 40 (branches are permanent and global, did you want a bookmark?)
41 41 $ hg ci -Aqm0
42 42
43 43 $ echo b > b
44 44 $ hg branch b
45 45 marked working directory as branch b
46 46 $ hg ci -Aqm1
47 47
48 48 $ rm a
49 49 $ hg branch a-b-c-
50 50 marked working directory as branch a-b-c-
51 51 $ hg ci -Aqm2 -u Bob
52 52
53 53 $ hg log -r "extra('branch', 'a-b-c-')" --template '{rev}\n'
54 54 2
55 55 $ hg log -r "extra('branch')" --template '{rev}\n'
56 56 0
57 57 1
58 58 2
59 59 $ hg log -r "extra('branch', 're:a')" --template '{rev} {branch}\n'
60 60 0 a
61 61 2 a-b-c-
62 62
63 63 $ hg co 1
64 64 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
65 65 $ hg branch +a+b+c+
66 66 marked working directory as branch +a+b+c+
67 67 $ hg ci -Aqm3
68 68
69 69 $ hg co 2 # interleave
70 70 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
71 71 $ echo bb > b
72 72 $ hg branch -- -a-b-c-
73 73 marked working directory as branch -a-b-c-
74 74 $ hg ci -Aqm4 -d "May 12 2005"
75 75
76 76 $ hg co 3
77 77 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
78 78 $ hg branch !a/b/c/
79 79 marked working directory as branch !a/b/c/
80 80 $ hg ci -Aqm"5 bug"
81 81
82 82 $ hg merge 4
83 83 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
84 84 (branch merge, don't forget to commit)
85 85 $ hg branch _a_b_c_
86 86 marked working directory as branch _a_b_c_
87 87 $ hg ci -Aqm"6 issue619"
88 88
89 89 $ hg branch .a.b.c.
90 90 marked working directory as branch .a.b.c.
91 91 $ hg ci -Aqm7
92 92
93 93 $ hg branch all
94 94 marked working directory as branch all
95 95
96 96 $ hg co 4
97 97 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
98 98 $ hg branch Γ©
99 99 marked working directory as branch \xc3\xa9 (esc)
100 100 $ hg ci -Aqm9
101 101
102 102 $ hg tag -r6 1.0
103 103 $ hg bookmark -r6 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
104 104
105 105 $ hg clone --quiet -U -r 7 . ../remote1
106 106 $ hg clone --quiet -U -r 8 . ../remote2
107 107 $ echo "[paths]" >> .hg/hgrc
108 108 $ echo "default = ../remote1" >> .hg/hgrc
109 109
110 110 trivial
111 111
112 112 $ try 0:1
113 113 (range
114 114 ('symbol', '0')
115 115 ('symbol', '1'))
116 116 * set:
117 117 <spanset+ 0:1>
118 118 0
119 119 1
120 120 $ try --optimize :
121 121 (rangeall
122 122 None)
123 123 * optimized:
124 124 (range
125 125 ('string', '0')
126 126 ('string', 'tip'))
127 127 * set:
128 128 <spanset+ 0:9>
129 129 0
130 130 1
131 131 2
132 132 3
133 133 4
134 134 5
135 135 6
136 136 7
137 137 8
138 138 9
139 139 $ try 3::6
140 140 (dagrange
141 141 ('symbol', '3')
142 142 ('symbol', '6'))
143 143 * set:
144 144 <baseset+ [3, 5, 6]>
145 145 3
146 146 5
147 147 6
148 148 $ try '0|1|2'
149 149 (or
150 150 ('symbol', '0')
151 151 ('symbol', '1')
152 152 ('symbol', '2'))
153 153 * set:
154 154 <baseset [0, 1, 2]>
155 155 0
156 156 1
157 157 2
158 158
159 159 names that should work without quoting
160 160
161 161 $ try a
162 162 ('symbol', 'a')
163 163 * set:
164 164 <baseset [0]>
165 165 0
166 166 $ try b-a
167 167 (minus
168 168 ('symbol', 'b')
169 169 ('symbol', 'a'))
170 170 * set:
171 171 <filteredset
172 172 <baseset [1]>,
173 173 <not
174 174 <baseset [0]>>>
175 175 1
176 176 $ try _a_b_c_
177 177 ('symbol', '_a_b_c_')
178 178 * set:
179 179 <baseset [6]>
180 180 6
181 181 $ try _a_b_c_-a
182 182 (minus
183 183 ('symbol', '_a_b_c_')
184 184 ('symbol', 'a'))
185 185 * set:
186 186 <filteredset
187 187 <baseset [6]>,
188 188 <not
189 189 <baseset [0]>>>
190 190 6
191 191 $ try .a.b.c.
192 192 ('symbol', '.a.b.c.')
193 193 * set:
194 194 <baseset [7]>
195 195 7
196 196 $ try .a.b.c.-a
197 197 (minus
198 198 ('symbol', '.a.b.c.')
199 199 ('symbol', 'a'))
200 200 * set:
201 201 <filteredset
202 202 <baseset [7]>,
203 203 <not
204 204 <baseset [0]>>>
205 205 7
206 206
207 207 names that should be caught by fallback mechanism
208 208
209 209 $ try -- '-a-b-c-'
210 210 ('symbol', '-a-b-c-')
211 211 * set:
212 212 <baseset [4]>
213 213 4
214 214 $ log -a-b-c-
215 215 4
216 216 $ try '+a+b+c+'
217 217 ('symbol', '+a+b+c+')
218 218 * set:
219 219 <baseset [3]>
220 220 3
221 221 $ try '+a+b+c+:'
222 222 (rangepost
223 223 ('symbol', '+a+b+c+'))
224 224 * set:
225 225 <spanset+ 3:9>
226 226 3
227 227 4
228 228 5
229 229 6
230 230 7
231 231 8
232 232 9
233 233 $ try ':+a+b+c+'
234 234 (rangepre
235 235 ('symbol', '+a+b+c+'))
236 236 * set:
237 237 <spanset+ 0:3>
238 238 0
239 239 1
240 240 2
241 241 3
242 242 $ try -- '-a-b-c-:+a+b+c+'
243 243 (range
244 244 ('symbol', '-a-b-c-')
245 245 ('symbol', '+a+b+c+'))
246 246 * set:
247 247 <spanset- 3:4>
248 248 4
249 249 3
250 250 $ log '-a-b-c-:+a+b+c+'
251 251 4
252 252 3
253 253
254 254 $ try -- -a-b-c--a # complains
255 255 (minus
256 256 (minus
257 257 (minus
258 258 (negate
259 259 ('symbol', 'a'))
260 260 ('symbol', 'b'))
261 261 ('symbol', 'c'))
262 262 (negate
263 263 ('symbol', 'a')))
264 264 abort: unknown revision '-a'!
265 265 [255]
266 266 $ try Γ©
267 267 ('symbol', '\xc3\xa9')
268 268 * set:
269 269 <baseset [9]>
270 270 9
271 271
272 272 no quoting needed
273 273
274 274 $ log ::a-b-c-
275 275 0
276 276 1
277 277 2
278 278
279 279 quoting needed
280 280
281 281 $ try '"-a-b-c-"-a'
282 282 (minus
283 283 ('string', '-a-b-c-')
284 284 ('symbol', 'a'))
285 285 * set:
286 286 <filteredset
287 287 <baseset [4]>,
288 288 <not
289 289 <baseset [0]>>>
290 290 4
291 291
292 292 $ log '1 or 2'
293 293 1
294 294 2
295 295 $ log '1|2'
296 296 1
297 297 2
298 298 $ log '1 and 2'
299 299 $ log '1&2'
300 300 $ try '1&2|3' # precedence - and is higher
301 301 (or
302 302 (and
303 303 ('symbol', '1')
304 304 ('symbol', '2'))
305 305 ('symbol', '3'))
306 306 * set:
307 307 <addset
308 308 <baseset []>,
309 309 <baseset [3]>>
310 310 3
311 311 $ try '1|2&3'
312 312 (or
313 313 ('symbol', '1')
314 314 (and
315 315 ('symbol', '2')
316 316 ('symbol', '3')))
317 317 * set:
318 318 <addset
319 319 <baseset [1]>,
320 320 <baseset []>>
321 321 1
322 322 $ try '1&2&3' # associativity
323 323 (and
324 324 (and
325 325 ('symbol', '1')
326 326 ('symbol', '2'))
327 327 ('symbol', '3'))
328 328 * set:
329 329 <baseset []>
330 330 $ try '1|(2|3)'
331 331 (or
332 332 ('symbol', '1')
333 333 (group
334 334 (or
335 335 ('symbol', '2')
336 336 ('symbol', '3'))))
337 337 * set:
338 338 <addset
339 339 <baseset [1]>,
340 340 <baseset [2, 3]>>
341 341 1
342 342 2
343 343 3
344 344 $ log '1.0' # tag
345 345 6
346 346 $ log 'a' # branch
347 347 0
348 348 $ log '2785f51ee'
349 349 0
350 350 $ log 'date(2005)'
351 351 4
352 352 $ log 'date(this is a test)'
353 353 hg: parse error at 10: unexpected token: symbol
354 354 [255]
355 355 $ log 'date()'
356 356 hg: parse error: date requires a string
357 357 [255]
358 358 $ log 'date'
359 359 abort: unknown revision 'date'!
360 360 [255]
361 361 $ log 'date('
362 362 hg: parse error at 5: not a prefix: end
363 363 [255]
364 364 $ log 'date("\xy")'
365 365 hg: parse error: invalid \x escape
366 366 [255]
367 367 $ log 'date(tip)'
368 368 abort: invalid date: 'tip'
369 369 [255]
370 370 $ log '0:date'
371 371 abort: unknown revision 'date'!
372 372 [255]
373 373 $ log '::"date"'
374 374 abort: unknown revision 'date'!
375 375 [255]
376 376 $ hg book date -r 4
377 377 $ log '0:date'
378 378 0
379 379 1
380 380 2
381 381 3
382 382 4
383 383 $ log '::date'
384 384 0
385 385 1
386 386 2
387 387 4
388 388 $ log '::"date"'
389 389 0
390 390 1
391 391 2
392 392 4
393 393 $ log 'date(2005) and 1::'
394 394 4
395 395 $ hg book -d date
396 396
397 397 keyword arguments
398 398
399 399 $ log 'extra(branch, value=a)'
400 400 0
401 401
402 402 $ log 'extra(branch, a, b)'
403 403 hg: parse error: extra takes at most 2 arguments
404 404 [255]
405 405 $ log 'extra(a, label=b)'
406 406 hg: parse error: extra got multiple values for keyword argument 'label'
407 407 [255]
408 408 $ log 'extra(label=branch, default)'
409 409 hg: parse error: extra got an invalid argument
410 410 [255]
411 411 $ log 'extra(branch, foo+bar=baz)'
412 412 hg: parse error: extra got an invalid argument
413 413 [255]
414 414 $ log 'extra(unknown=branch)'
415 415 hg: parse error: extra got an unexpected keyword argument 'unknown'
416 416 [255]
417 417
418 418 $ try 'foo=bar|baz'
419 419 (keyvalue
420 420 ('symbol', 'foo')
421 421 (or
422 422 ('symbol', 'bar')
423 423 ('symbol', 'baz')))
424 424 hg: parse error: can't use a key-value pair in this context
425 425 [255]
426 426
427 427 Test that symbols only get parsed as functions if there's an opening
428 428 parenthesis.
429 429
430 430 $ hg book only -r 9
431 431 $ log 'only(only)' # Outer "only" is a function, inner "only" is the bookmark
432 432 8
433 433 9
434 434
435 435 ancestor can accept 0 or more arguments
436 436
437 437 $ log 'ancestor()'
438 438 $ log 'ancestor(1)'
439 439 1
440 440 $ log 'ancestor(4,5)'
441 441 1
442 442 $ log 'ancestor(4,5) and 4'
443 443 $ log 'ancestor(0,0,1,3)'
444 444 0
445 445 $ log 'ancestor(3,1,5,3,5,1)'
446 446 1
447 447 $ log 'ancestor(0,1,3,5)'
448 448 0
449 449 $ log 'ancestor(1,2,3,4,5)'
450 450 1
451 451
452 452 test ancestors
453 453
454 454 $ log 'ancestors(5)'
455 455 0
456 456 1
457 457 3
458 458 5
459 459 $ log 'ancestor(ancestors(5))'
460 460 0
461 461 $ log '::r3232()'
462 462 0
463 463 1
464 464 2
465 465 3
466 466
467 467 $ log 'author(bob)'
468 468 2
469 469 $ log 'author("re:bob|test")'
470 470 0
471 471 1
472 472 2
473 473 3
474 474 4
475 475 5
476 476 6
477 477 7
478 478 8
479 479 9
480 480 $ log 'branch(Γ©)'
481 481 8
482 482 9
483 483 $ log 'branch(a)'
484 484 0
485 485 $ hg log -r 'branch("re:a")' --template '{rev} {branch}\n'
486 486 0 a
487 487 2 a-b-c-
488 488 3 +a+b+c+
489 489 4 -a-b-c-
490 490 5 !a/b/c/
491 491 6 _a_b_c_
492 492 7 .a.b.c.
493 493 $ log 'children(ancestor(4,5))'
494 494 2
495 495 3
496 496 $ log 'closed()'
497 497 $ log 'contains(a)'
498 498 0
499 499 1
500 500 3
501 501 5
502 502 $ log 'contains("../repo/a")'
503 503 0
504 504 1
505 505 3
506 506 5
507 507 $ log 'desc(B)'
508 508 5
509 509 $ log 'descendants(2 or 3)'
510 510 2
511 511 3
512 512 4
513 513 5
514 514 6
515 515 7
516 516 8
517 517 9
518 518 $ log 'file("b*")'
519 519 1
520 520 4
521 521 $ log 'filelog("b")'
522 522 1
523 523 4
524 524 $ log 'filelog("../repo/b")'
525 525 1
526 526 4
527 527 $ log 'follow()'
528 528 0
529 529 1
530 530 2
531 531 4
532 532 8
533 533 9
534 534 $ log 'grep("issue\d+")'
535 535 6
536 536 $ try 'grep("(")' # invalid regular expression
537 537 (func
538 538 ('symbol', 'grep')
539 539 ('string', '('))
540 540 hg: parse error: invalid match pattern: unbalanced parenthesis
541 541 [255]
542 542 $ try 'grep("\bissue\d+")'
543 543 (func
544 544 ('symbol', 'grep')
545 545 ('string', '\x08issue\\d+'))
546 546 * set:
547 547 <filteredset
548 548 <fullreposet+ 0:9>,
549 549 <grep '\x08issue\\d+'>>
550 550 $ try 'grep(r"\bissue\d+")'
551 551 (func
552 552 ('symbol', 'grep')
553 553 ('string', '\\bissue\\d+'))
554 554 * set:
555 555 <filteredset
556 556 <fullreposet+ 0:9>,
557 557 <grep '\\bissue\\d+'>>
558 558 6
559 559 $ try 'grep(r"\")'
560 560 hg: parse error at 7: unterminated string
561 561 [255]
562 562 $ log 'head()'
563 563 0
564 564 1
565 565 2
566 566 3
567 567 4
568 568 5
569 569 6
570 570 7
571 571 9
572 572 $ log 'heads(6::)'
573 573 7
574 574 $ log 'keyword(issue)'
575 575 6
576 576 $ log 'keyword("test a")'
577 577 $ log 'limit(head(), 1)'
578 578 0
579 579 $ log 'limit(author("re:bob|test"), 3, 5)'
580 580 5
581 581 6
582 582 7
583 583 $ log 'limit(author("re:bob|test"), offset=6)'
584 584 6
585 585 $ log 'limit(author("re:bob|test"), offset=10)'
586 586 $ log 'limit(all(), 1, -1)'
587 587 hg: parse error: negative offset
588 588 [255]
589 589 $ log 'matching(6)'
590 590 6
591 591 $ log 'matching(6:7, "phase parents user date branch summary files description substate")'
592 592 6
593 593 7
594 594
595 595 Testing min and max
596 596
597 597 max: simple
598 598
599 599 $ log 'max(contains(a))'
600 600 5
601 601
602 602 max: simple on unordered set)
603 603
604 604 $ log 'max((4+0+2+5+7) and contains(a))'
605 605 5
606 606
607 607 max: no result
608 608
609 609 $ log 'max(contains(stringthatdoesnotappearanywhere))'
610 610
611 611 max: no result on unordered set
612 612
613 613 $ log 'max((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
614 614
615 615 min: simple
616 616
617 617 $ log 'min(contains(a))'
618 618 0
619 619
620 620 min: simple on unordered set
621 621
622 622 $ log 'min((4+0+2+5+7) and contains(a))'
623 623 0
624 624
625 625 min: empty
626 626
627 627 $ log 'min(contains(stringthatdoesnotappearanywhere))'
628 628
629 629 min: empty on unordered set
630 630
631 631 $ log 'min((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
632 632
633 633
634 634 $ log 'merge()'
635 635 6
636 636 $ log 'branchpoint()'
637 637 1
638 638 4
639 639 $ log 'modifies(b)'
640 640 4
641 641 $ log 'modifies("path:b")'
642 642 4
643 643 $ log 'modifies("*")'
644 644 4
645 645 6
646 646 $ log 'modifies("set:modified()")'
647 647 4
648 648 $ log 'id(5)'
649 649 2
650 650 $ log 'only(9)'
651 651 8
652 652 9
653 653 $ log 'only(8)'
654 654 8
655 655 $ log 'only(9, 5)'
656 656 2
657 657 4
658 658 8
659 659 9
660 660 $ log 'only(7 + 9, 5 + 2)'
661 661 4
662 662 6
663 663 7
664 664 8
665 665 9
666 666
667 667 Test empty set input
668 668 $ log 'only(p2())'
669 669 $ log 'only(p1(), p2())'
670 670 0
671 671 1
672 672 2
673 673 4
674 674 8
675 675 9
676 676
677 677 Test '%' operator
678 678
679 679 $ log '9%'
680 680 8
681 681 9
682 682 $ log '9%5'
683 683 2
684 684 4
685 685 8
686 686 9
687 687 $ log '(7 + 9)%(5 + 2)'
688 688 4
689 689 6
690 690 7
691 691 8
692 692 9
693 693
694 694 Test opreand of '%' is optimized recursively (issue4670)
695 695
696 696 $ try --optimize '8:9-8%'
697 697 (onlypost
698 698 (minus
699 699 (range
700 700 ('symbol', '8')
701 701 ('symbol', '9'))
702 702 ('symbol', '8')))
703 703 * optimized:
704 704 (func
705 705 ('symbol', 'only')
706 706 (difference
707 707 (range
708 708 ('symbol', '8')
709 709 ('symbol', '9'))
710 710 ('symbol', '8')))
711 711 * set:
712 712 <baseset+ [8, 9]>
713 713 8
714 714 9
715 715 $ try --optimize '(9)%(5)'
716 716 (only
717 717 (group
718 718 ('symbol', '9'))
719 719 (group
720 720 ('symbol', '5')))
721 721 * optimized:
722 722 (func
723 723 ('symbol', 'only')
724 724 (list
725 725 ('symbol', '9')
726 726 ('symbol', '5')))
727 727 * set:
728 728 <baseset+ [2, 4, 8, 9]>
729 729 2
730 730 4
731 731 8
732 732 9
733 733
734 734 Test the order of operations
735 735
736 736 $ log '7 + 9%5 + 2'
737 737 7
738 738 2
739 739 4
740 740 8
741 741 9
742 742
743 743 Test explicit numeric revision
744 744 $ log 'rev(-2)'
745 745 $ log 'rev(-1)'
746 746 -1
747 747 $ log 'rev(0)'
748 748 0
749 749 $ log 'rev(9)'
750 750 9
751 751 $ log 'rev(10)'
752 752 $ log 'rev(tip)'
753 753 hg: parse error: rev expects a number
754 754 [255]
755 755
756 756 Test hexadecimal revision
757 757 $ log 'id(2)'
758 758 abort: 00changelog.i@2: ambiguous identifier!
759 759 [255]
760 760 $ log 'id(23268)'
761 761 4
762 762 $ log 'id(2785f51eece)'
763 763 0
764 764 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532c)'
765 765 8
766 766 $ log 'id(d5d0dcbdc4a)'
767 767 $ log 'id(d5d0dcbdc4w)'
768 768 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532d)'
769 769 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532q)'
770 770 $ log 'id(1.0)'
771 771 $ log 'id(xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)'
772 772
773 773 Test null revision
774 774 $ log '(null)'
775 775 -1
776 776 $ log '(null:0)'
777 777 -1
778 778 0
779 779 $ log '(0:null)'
780 780 0
781 781 -1
782 782 $ log 'null::0'
783 783 -1
784 784 0
785 785 $ log 'null:tip - 0:'
786 786 -1
787 787 $ log 'null: and null::' | head -1
788 788 -1
789 789 $ log 'null: or 0:' | head -2
790 790 -1
791 791 0
792 792 $ log 'ancestors(null)'
793 793 -1
794 794 $ log 'reverse(null:)' | tail -2
795 795 0
796 796 -1
797 797 BROKEN: should be '-1'
798 798 $ log 'first(null:)'
799 799 BROKEN: should be '-1'
800 800 $ log 'min(null:)'
801 801 $ log 'tip:null and all()' | tail -2
802 802 1
803 803 0
804 804
805 805 Test working-directory revision
806 806 $ hg debugrevspec 'wdir()'
807 807 2147483647
808 808 $ hg debugrevspec 'tip or wdir()'
809 809 9
810 810 2147483647
811 811 $ hg debugrevspec '0:tip and wdir()'
812 812 $ log '0:wdir()' | tail -3
813 813 8
814 814 9
815 815 2147483647
816 816 $ log 'wdir():0' | head -3
817 817 2147483647
818 818 9
819 819 8
820 820 $ log 'wdir():wdir()'
821 821 2147483647
822 822 $ log '(all() + wdir()) & min(. + wdir())'
823 823 9
824 824 $ log '(all() + wdir()) & max(. + wdir())'
825 825 2147483647
826 826 $ log '(all() + wdir()) & first(wdir() + .)'
827 827 2147483647
828 828 $ log '(all() + wdir()) & last(. + wdir())'
829 829 2147483647
830 830
831 831 $ log 'outgoing()'
832 832 8
833 833 9
834 834 $ log 'outgoing("../remote1")'
835 835 8
836 836 9
837 837 $ log 'outgoing("../remote2")'
838 838 3
839 839 5
840 840 6
841 841 7
842 842 9
843 843 $ log 'p1(merge())'
844 844 5
845 845 $ log 'p2(merge())'
846 846 4
847 847 $ log 'parents(merge())'
848 848 4
849 849 5
850 850 $ log 'p1(branchpoint())'
851 851 0
852 852 2
853 853 $ log 'p2(branchpoint())'
854 854 $ log 'parents(branchpoint())'
855 855 0
856 856 2
857 857 $ log 'removes(a)'
858 858 2
859 859 6
860 860 $ log 'roots(all())'
861 861 0
862 862 $ log 'reverse(2 or 3 or 4 or 5)'
863 863 5
864 864 4
865 865 3
866 866 2
867 867 $ log 'reverse(all())'
868 868 9
869 869 8
870 870 7
871 871 6
872 872 5
873 873 4
874 874 3
875 875 2
876 876 1
877 877 0
878 878 $ log 'reverse(all()) & filelog(b)'
879 879 4
880 880 1
881 881 $ log 'rev(5)'
882 882 5
883 883 $ log 'sort(limit(reverse(all()), 3))'
884 884 7
885 885 8
886 886 9
887 887 $ log 'sort(2 or 3 or 4 or 5, date)'
888 888 2
889 889 3
890 890 5
891 891 4
892 892 $ log 'tagged()'
893 893 6
894 894 $ log 'tag()'
895 895 6
896 896 $ log 'tag(1.0)'
897 897 6
898 898 $ log 'tag(tip)'
899 899 9
900 900
901 901 Test order of revisions in compound expression
902 902 ----------------------------------------------
903 903
904 904 'A & B' should follow the order of 'A':
905 905
906 906 $ log '2:0 & 0::2'
907 907 2
908 908 1
909 909 0
910 910
911 911 test sort revset
912 912 --------------------------------------------
913 913
914 914 test when adding two unordered revsets
915 915
916 916 $ log 'sort(keyword(issue) or modifies(b))'
917 917 4
918 918 6
919 919
920 920 test when sorting a reversed collection in the same way it is
921 921
922 922 $ log 'sort(reverse(all()), -rev)'
923 923 9
924 924 8
925 925 7
926 926 6
927 927 5
928 928 4
929 929 3
930 930 2
931 931 1
932 932 0
933 933
934 934 test when sorting a reversed collection
935 935
936 936 $ log 'sort(reverse(all()), rev)'
937 937 0
938 938 1
939 939 2
940 940 3
941 941 4
942 942 5
943 943 6
944 944 7
945 945 8
946 946 9
947 947
948 948
949 949 test sorting two sorted collections in different orders
950 950
951 951 $ log 'sort(outgoing() or reverse(removes(a)), rev)'
952 952 2
953 953 6
954 954 8
955 955 9
956 956
957 957 test sorting two sorted collections in different orders backwards
958 958
959 959 $ log 'sort(outgoing() or reverse(removes(a)), -rev)'
960 960 9
961 961 8
962 962 6
963 963 2
964 964
965 965 test invalid sort keys
966 966
967 967 $ log 'sort(all(), -invalid)'
968 968 hg: parse error: unknown sort key '-invalid'
969 969 [255]
970 970
971 971 $ cd ..
972 972
973 973 test sorting by multiple keys including variable-length strings
974 974
975 975 $ hg init sorting
976 976 $ cd sorting
977 977 $ cat <<EOF >> .hg/hgrc
978 978 > [ui]
979 979 > logtemplate = '{rev} {branch|p5}{desc|p5}{author|p5}{date|hgdate}\n'
980 980 > [templatealias]
981 981 > p5(s) = pad(s, 5)
982 982 > EOF
983 983 $ hg branch -qf b12
984 984 $ hg ci -m m111 -u u112 -d '111 10800'
985 985 $ hg branch -qf b11
986 986 $ hg ci -m m12 -u u111 -d '112 7200'
987 987 $ hg branch -qf b111
988 988 $ hg ci -m m11 -u u12 -d '111 3600'
989 989 $ hg branch -qf b112
990 990 $ hg ci -m m111 -u u11 -d '120 0'
991 991 $ hg branch -qf b111
992 992 $ hg ci -m m112 -u u111 -d '110 14400'
993 993 created new head
994 994
995 995 compare revisions (has fast path):
996 996
997 997 $ hg log -r 'sort(all(), rev)'
998 998 0 b12 m111 u112 111 10800
999 999 1 b11 m12 u111 112 7200
1000 1000 2 b111 m11 u12 111 3600
1001 1001 3 b112 m111 u11 120 0
1002 1002 4 b111 m112 u111 110 14400
1003 1003
1004 1004 $ hg log -r 'sort(all(), -rev)'
1005 1005 4 b111 m112 u111 110 14400
1006 1006 3 b112 m111 u11 120 0
1007 1007 2 b111 m11 u12 111 3600
1008 1008 1 b11 m12 u111 112 7200
1009 1009 0 b12 m111 u112 111 10800
1010 1010
1011 1011 compare variable-length strings (issue5218):
1012 1012
1013 1013 $ hg log -r 'sort(all(), branch)'
1014 1014 1 b11 m12 u111 112 7200
1015 1015 2 b111 m11 u12 111 3600
1016 1016 4 b111 m112 u111 110 14400
1017 1017 3 b112 m111 u11 120 0
1018 1018 0 b12 m111 u112 111 10800
1019 1019
1020 1020 $ hg log -r 'sort(all(), -branch)'
1021 1021 0 b12 m111 u112 111 10800
1022 1022 3 b112 m111 u11 120 0
1023 1023 2 b111 m11 u12 111 3600
1024 1024 4 b111 m112 u111 110 14400
1025 1025 1 b11 m12 u111 112 7200
1026 1026
1027 1027 $ hg log -r 'sort(all(), desc)'
1028 1028 2 b111 m11 u12 111 3600
1029 1029 0 b12 m111 u112 111 10800
1030 1030 3 b112 m111 u11 120 0
1031 1031 4 b111 m112 u111 110 14400
1032 1032 1 b11 m12 u111 112 7200
1033 1033
1034 1034 $ hg log -r 'sort(all(), -desc)'
1035 1035 1 b11 m12 u111 112 7200
1036 1036 4 b111 m112 u111 110 14400
1037 1037 0 b12 m111 u112 111 10800
1038 1038 3 b112 m111 u11 120 0
1039 1039 2 b111 m11 u12 111 3600
1040 1040
1041 1041 $ hg log -r 'sort(all(), user)'
1042 1042 3 b112 m111 u11 120 0
1043 1043 1 b11 m12 u111 112 7200
1044 1044 4 b111 m112 u111 110 14400
1045 1045 0 b12 m111 u112 111 10800
1046 1046 2 b111 m11 u12 111 3600
1047 1047
1048 1048 $ hg log -r 'sort(all(), -user)'
1049 1049 2 b111 m11 u12 111 3600
1050 1050 0 b12 m111 u112 111 10800
1051 1051 1 b11 m12 u111 112 7200
1052 1052 4 b111 m112 u111 110 14400
1053 1053 3 b112 m111 u11 120 0
1054 1054
1055 1055 compare dates (tz offset should have no effect):
1056 1056
1057 1057 $ hg log -r 'sort(all(), date)'
1058 1058 4 b111 m112 u111 110 14400
1059 1059 0 b12 m111 u112 111 10800
1060 1060 2 b111 m11 u12 111 3600
1061 1061 1 b11 m12 u111 112 7200
1062 1062 3 b112 m111 u11 120 0
1063 1063
1064 1064 $ hg log -r 'sort(all(), -date)'
1065 1065 3 b112 m111 u11 120 0
1066 1066 1 b11 m12 u111 112 7200
1067 1067 0 b12 m111 u112 111 10800
1068 1068 2 b111 m11 u12 111 3600
1069 1069 4 b111 m112 u111 110 14400
1070 1070
1071 1071 be aware that 'sort(x, -k)' is not exactly the same as 'reverse(sort(x, k))'
1072 1072 because '-k' reverses the comparison, not the list itself:
1073 1073
1074 1074 $ hg log -r 'sort(0 + 2, date)'
1075 1075 0 b12 m111 u112 111 10800
1076 1076 2 b111 m11 u12 111 3600
1077 1077
1078 1078 $ hg log -r 'sort(0 + 2, -date)'
1079 1079 0 b12 m111 u112 111 10800
1080 1080 2 b111 m11 u12 111 3600
1081 1081
1082 1082 $ hg log -r 'reverse(sort(0 + 2, date))'
1083 1083 2 b111 m11 u12 111 3600
1084 1084 0 b12 m111 u112 111 10800
1085 1085
1086 1086 sort by multiple keys:
1087 1087
1088 1088 $ hg log -r 'sort(all(), "branch -rev")'
1089 1089 1 b11 m12 u111 112 7200
1090 1090 4 b111 m112 u111 110 14400
1091 1091 2 b111 m11 u12 111 3600
1092 1092 3 b112 m111 u11 120 0
1093 1093 0 b12 m111 u112 111 10800
1094 1094
1095 1095 $ hg log -r 'sort(all(), "-desc -date")'
1096 1096 1 b11 m12 u111 112 7200
1097 1097 4 b111 m112 u111 110 14400
1098 1098 3 b112 m111 u11 120 0
1099 1099 0 b12 m111 u112 111 10800
1100 1100 2 b111 m11 u12 111 3600
1101 1101
1102 1102 $ hg log -r 'sort(all(), "user -branch date rev")'
1103 1103 3 b112 m111 u11 120 0
1104 1104 4 b111 m112 u111 110 14400
1105 1105 1 b11 m12 u111 112 7200
1106 1106 0 b12 m111 u112 111 10800
1107 1107 2 b111 m11 u12 111 3600
1108 1108
1109 toposort prioritises graph branches
1110
1111 $ hg up 2
1112 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1113 $ touch a
1114 $ hg addremove
1115 adding a
1116 $ hg ci -m 't1' -u 'tu' -d '130 0'
1117 created new head
1118 $ echo 'a' >> a
1119 $ hg ci -m 't2' -u 'tu' -d '130 0'
1120 $ hg book book1
1121 $ hg up 4
1122 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1123 (leaving bookmark book1)
1124 $ touch a
1125 $ hg addremove
1126 adding a
1127 $ hg ci -m 't3' -u 'tu' -d '130 0'
1128
1129 $ hg log -r 'sort(all(), topo)'
1130 7 b111 t3 tu 130 0
1131 4 b111 m112 u111 110 14400
1132 3 b112 m111 u11 120 0
1133 6 b111 t2 tu 130 0
1134 5 b111 t1 tu 130 0
1135 2 b111 m11 u12 111 3600
1136 1 b11 m12 u111 112 7200
1137 0 b12 m111 u112 111 10800
1138
1139 $ hg log -r 'sort(all(), -topo)'
1140 0 b12 m111 u112 111 10800
1141 1 b11 m12 u111 112 7200
1142 2 b111 m11 u12 111 3600
1143 5 b111 t1 tu 130 0
1144 6 b111 t2 tu 130 0
1145 3 b112 m111 u11 120 0
1146 4 b111 m112 u111 110 14400
1147 7 b111 t3 tu 130 0
1148
1149 $ hg log -r 'sort(all(), topo, topo.firstbranch=book1)'
1150 6 b111 t2 tu 130 0
1151 5 b111 t1 tu 130 0
1152 7 b111 t3 tu 130 0
1153 4 b111 m112 u111 110 14400
1154 3 b112 m111 u11 120 0
1155 2 b111 m11 u12 111 3600
1156 1 b11 m12 u111 112 7200
1157 0 b12 m111 u112 111 10800
1158
1159 topographical sorting can't be combined with other sort keys, and you can't
1160 use the topo.firstbranch option when topo sort is not active:
1161
1162 $ hg log -r 'sort(all(), "topo user")'
1163 hg: parse error: topo sort order cannot be combined with other sort keys
1164 [255]
1165
1166 $ hg log -r 'sort(all(), user, topo.firstbranch=book1)'
1167 hg: parse error: topo.firstbranch can only be used when using the topo sort key
1168 [255]
1169
1109 1170 $ cd ..
1110 1171 $ cd repo
1111 1172
1112 1173 test subtracting something from an addset
1113 1174
1114 1175 $ log '(outgoing() or removes(a)) - removes(a)'
1115 1176 8
1116 1177 9
1117 1178
1118 1179 test intersecting something with an addset
1119 1180
1120 1181 $ log 'parents(outgoing() or removes(a))'
1121 1182 1
1122 1183 4
1123 1184 5
1124 1185 8
1125 1186
1126 1187 test that `or` operation combines elements in the right order:
1127 1188
1128 1189 $ log '3:4 or 2:5'
1129 1190 3
1130 1191 4
1131 1192 2
1132 1193 5
1133 1194 $ log '3:4 or 5:2'
1134 1195 3
1135 1196 4
1136 1197 5
1137 1198 2
1138 1199 $ log 'sort(3:4 or 2:5)'
1139 1200 2
1140 1201 3
1141 1202 4
1142 1203 5
1143 1204 $ log 'sort(3:4 or 5:2)'
1144 1205 2
1145 1206 3
1146 1207 4
1147 1208 5
1148 1209
1149 1210 test that more than one `-r`s are combined in the right order and deduplicated:
1150 1211
1151 1212 $ hg log -T '{rev}\n' -r 3 -r 3 -r 4 -r 5:2 -r 'ancestors(4)'
1152 1213 3
1153 1214 4
1154 1215 5
1155 1216 2
1156 1217 0
1157 1218 1
1158 1219
1159 1220 test that `or` operation skips duplicated revisions from right-hand side
1160 1221
1161 1222 $ try 'reverse(1::5) or ancestors(4)'
1162 1223 (or
1163 1224 (func
1164 1225 ('symbol', 'reverse')
1165 1226 (dagrange
1166 1227 ('symbol', '1')
1167 1228 ('symbol', '5')))
1168 1229 (func
1169 1230 ('symbol', 'ancestors')
1170 1231 ('symbol', '4')))
1171 1232 * set:
1172 1233 <addset
1173 1234 <baseset- [1, 3, 5]>,
1174 1235 <generatorset+>>
1175 1236 5
1176 1237 3
1177 1238 1
1178 1239 0
1179 1240 2
1180 1241 4
1181 1242 $ try 'sort(ancestors(4) or reverse(1::5))'
1182 1243 (func
1183 1244 ('symbol', 'sort')
1184 1245 (or
1185 1246 (func
1186 1247 ('symbol', 'ancestors')
1187 1248 ('symbol', '4'))
1188 1249 (func
1189 1250 ('symbol', 'reverse')
1190 1251 (dagrange
1191 1252 ('symbol', '1')
1192 1253 ('symbol', '5')))))
1193 1254 * set:
1194 1255 <addset+
1195 1256 <generatorset+>,
1196 1257 <baseset- [1, 3, 5]>>
1197 1258 0
1198 1259 1
1199 1260 2
1200 1261 3
1201 1262 4
1202 1263 5
1203 1264
1204 1265 test optimization of trivial `or` operation
1205 1266
1206 1267 $ try --optimize '0|(1)|"2"|-2|tip|null'
1207 1268 (or
1208 1269 ('symbol', '0')
1209 1270 (group
1210 1271 ('symbol', '1'))
1211 1272 ('string', '2')
1212 1273 (negate
1213 1274 ('symbol', '2'))
1214 1275 ('symbol', 'tip')
1215 1276 ('symbol', 'null'))
1216 1277 * optimized:
1217 1278 (func
1218 1279 ('symbol', '_list')
1219 1280 ('string', '0\x001\x002\x00-2\x00tip\x00null'))
1220 1281 * set:
1221 1282 <baseset [0, 1, 2, 8, 9, -1]>
1222 1283 0
1223 1284 1
1224 1285 2
1225 1286 8
1226 1287 9
1227 1288 -1
1228 1289
1229 1290 $ try --optimize '0|1|2:3'
1230 1291 (or
1231 1292 ('symbol', '0')
1232 1293 ('symbol', '1')
1233 1294 (range
1234 1295 ('symbol', '2')
1235 1296 ('symbol', '3')))
1236 1297 * optimized:
1237 1298 (or
1238 1299 (func
1239 1300 ('symbol', '_list')
1240 1301 ('string', '0\x001'))
1241 1302 (range
1242 1303 ('symbol', '2')
1243 1304 ('symbol', '3')))
1244 1305 * set:
1245 1306 <addset
1246 1307 <baseset [0, 1]>,
1247 1308 <spanset+ 2:3>>
1248 1309 0
1249 1310 1
1250 1311 2
1251 1312 3
1252 1313
1253 1314 $ try --optimize '0:1|2|3:4|5|6'
1254 1315 (or
1255 1316 (range
1256 1317 ('symbol', '0')
1257 1318 ('symbol', '1'))
1258 1319 ('symbol', '2')
1259 1320 (range
1260 1321 ('symbol', '3')
1261 1322 ('symbol', '4'))
1262 1323 ('symbol', '5')
1263 1324 ('symbol', '6'))
1264 1325 * optimized:
1265 1326 (or
1266 1327 (range
1267 1328 ('symbol', '0')
1268 1329 ('symbol', '1'))
1269 1330 ('symbol', '2')
1270 1331 (range
1271 1332 ('symbol', '3')
1272 1333 ('symbol', '4'))
1273 1334 (func
1274 1335 ('symbol', '_list')
1275 1336 ('string', '5\x006')))
1276 1337 * set:
1277 1338 <addset
1278 1339 <addset
1279 1340 <spanset+ 0:1>,
1280 1341 <baseset [2]>>,
1281 1342 <addset
1282 1343 <spanset+ 3:4>,
1283 1344 <baseset [5, 6]>>>
1284 1345 0
1285 1346 1
1286 1347 2
1287 1348 3
1288 1349 4
1289 1350 5
1290 1351 6
1291 1352
1292 1353 test that `_list` should be narrowed by provided `subset`
1293 1354
1294 1355 $ log '0:2 and (null|1|2|3)'
1295 1356 1
1296 1357 2
1297 1358
1298 1359 test that `_list` should remove duplicates
1299 1360
1300 1361 $ log '0|1|2|1|2|-1|tip'
1301 1362 0
1302 1363 1
1303 1364 2
1304 1365 9
1305 1366
1306 1367 test unknown revision in `_list`
1307 1368
1308 1369 $ log '0|unknown'
1309 1370 abort: unknown revision 'unknown'!
1310 1371 [255]
1311 1372
1312 1373 test integer range in `_list`
1313 1374
1314 1375 $ log '-1|-10'
1315 1376 9
1316 1377 0
1317 1378
1318 1379 $ log '-10|-11'
1319 1380 abort: unknown revision '-11'!
1320 1381 [255]
1321 1382
1322 1383 $ log '9|10'
1323 1384 abort: unknown revision '10'!
1324 1385 [255]
1325 1386
1326 1387 test '0000' != '0' in `_list`
1327 1388
1328 1389 $ log '0|0000'
1329 1390 0
1330 1391 -1
1331 1392
1332 1393 test ',' in `_list`
1333 1394 $ log '0,1'
1334 1395 hg: parse error: can't use a list in this context
1335 1396 (see hg help "revsets.x or y")
1336 1397 [255]
1337 1398 $ try '0,1,2'
1338 1399 (list
1339 1400 ('symbol', '0')
1340 1401 ('symbol', '1')
1341 1402 ('symbol', '2'))
1342 1403 hg: parse error: can't use a list in this context
1343 1404 (see hg help "revsets.x or y")
1344 1405 [255]
1345 1406
1346 1407 test that chained `or` operations make balanced addsets
1347 1408
1348 1409 $ try '0:1|1:2|2:3|3:4|4:5'
1349 1410 (or
1350 1411 (range
1351 1412 ('symbol', '0')
1352 1413 ('symbol', '1'))
1353 1414 (range
1354 1415 ('symbol', '1')
1355 1416 ('symbol', '2'))
1356 1417 (range
1357 1418 ('symbol', '2')
1358 1419 ('symbol', '3'))
1359 1420 (range
1360 1421 ('symbol', '3')
1361 1422 ('symbol', '4'))
1362 1423 (range
1363 1424 ('symbol', '4')
1364 1425 ('symbol', '5')))
1365 1426 * set:
1366 1427 <addset
1367 1428 <addset
1368 1429 <spanset+ 0:1>,
1369 1430 <spanset+ 1:2>>,
1370 1431 <addset
1371 1432 <spanset+ 2:3>,
1372 1433 <addset
1373 1434 <spanset+ 3:4>,
1374 1435 <spanset+ 4:5>>>>
1375 1436 0
1376 1437 1
1377 1438 2
1378 1439 3
1379 1440 4
1380 1441 5
1381 1442
1382 1443 no crash by empty group "()" while optimizing `or` operations
1383 1444
1384 1445 $ try --optimize '0|()'
1385 1446 (or
1386 1447 ('symbol', '0')
1387 1448 (group
1388 1449 None))
1389 1450 * optimized:
1390 1451 (or
1391 1452 ('symbol', '0')
1392 1453 None)
1393 1454 hg: parse error: missing argument
1394 1455 [255]
1395 1456
1396 1457 test that chained `or` operations never eat up stack (issue4624)
1397 1458 (uses `0:1` instead of `0` to avoid future optimization of trivial revisions)
1398 1459
1399 1460 $ hg log -T '{rev}\n' -r `python -c "print '+'.join(['0:1'] * 500)"`
1400 1461 0
1401 1462 1
1402 1463
1403 1464 test that repeated `-r` options never eat up stack (issue4565)
1404 1465 (uses `-r 0::1` to avoid possible optimization at old-style parser)
1405 1466
1406 1467 $ hg log -T '{rev}\n' `python -c "for i in xrange(500): print '-r 0::1 ',"`
1407 1468 0
1408 1469 1
1409 1470
1410 1471 check that conversion to only works
1411 1472 $ try --optimize '::3 - ::1'
1412 1473 (minus
1413 1474 (dagrangepre
1414 1475 ('symbol', '3'))
1415 1476 (dagrangepre
1416 1477 ('symbol', '1')))
1417 1478 * optimized:
1418 1479 (func
1419 1480 ('symbol', 'only')
1420 1481 (list
1421 1482 ('symbol', '3')
1422 1483 ('symbol', '1')))
1423 1484 * set:
1424 1485 <baseset+ [3]>
1425 1486 3
1426 1487 $ try --optimize 'ancestors(1) - ancestors(3)'
1427 1488 (minus
1428 1489 (func
1429 1490 ('symbol', 'ancestors')
1430 1491 ('symbol', '1'))
1431 1492 (func
1432 1493 ('symbol', 'ancestors')
1433 1494 ('symbol', '3')))
1434 1495 * optimized:
1435 1496 (func
1436 1497 ('symbol', 'only')
1437 1498 (list
1438 1499 ('symbol', '1')
1439 1500 ('symbol', '3')))
1440 1501 * set:
1441 1502 <baseset+ []>
1442 1503 $ try --optimize 'not ::2 and ::6'
1443 1504 (and
1444 1505 (not
1445 1506 (dagrangepre
1446 1507 ('symbol', '2')))
1447 1508 (dagrangepre
1448 1509 ('symbol', '6')))
1449 1510 * optimized:
1450 1511 (func
1451 1512 ('symbol', 'only')
1452 1513 (list
1453 1514 ('symbol', '6')
1454 1515 ('symbol', '2')))
1455 1516 * set:
1456 1517 <baseset+ [3, 4, 5, 6]>
1457 1518 3
1458 1519 4
1459 1520 5
1460 1521 6
1461 1522 $ try --optimize 'ancestors(6) and not ancestors(4)'
1462 1523 (and
1463 1524 (func
1464 1525 ('symbol', 'ancestors')
1465 1526 ('symbol', '6'))
1466 1527 (not
1467 1528 (func
1468 1529 ('symbol', 'ancestors')
1469 1530 ('symbol', '4'))))
1470 1531 * optimized:
1471 1532 (func
1472 1533 ('symbol', 'only')
1473 1534 (list
1474 1535 ('symbol', '6')
1475 1536 ('symbol', '4')))
1476 1537 * set:
1477 1538 <baseset+ [3, 5, 6]>
1478 1539 3
1479 1540 5
1480 1541 6
1481 1542
1482 1543 no crash by empty group "()" while optimizing to "only()"
1483 1544
1484 1545 $ try --optimize '::1 and ()'
1485 1546 (and
1486 1547 (dagrangepre
1487 1548 ('symbol', '1'))
1488 1549 (group
1489 1550 None))
1490 1551 * optimized:
1491 1552 (and
1492 1553 None
1493 1554 (func
1494 1555 ('symbol', 'ancestors')
1495 1556 ('symbol', '1')))
1496 1557 hg: parse error: missing argument
1497 1558 [255]
1498 1559
1499 1560 we can use patterns when searching for tags
1500 1561
1501 1562 $ log 'tag("1..*")'
1502 1563 abort: tag '1..*' does not exist!
1503 1564 [255]
1504 1565 $ log 'tag("re:1..*")'
1505 1566 6
1506 1567 $ log 'tag("re:[0-9].[0-9]")'
1507 1568 6
1508 1569 $ log 'tag("literal:1.0")'
1509 1570 6
1510 1571 $ log 'tag("re:0..*")'
1511 1572
1512 1573 $ log 'tag(unknown)'
1513 1574 abort: tag 'unknown' does not exist!
1514 1575 [255]
1515 1576 $ log 'tag("re:unknown")'
1516 1577 $ log 'present(tag("unknown"))'
1517 1578 $ log 'present(tag("re:unknown"))'
1518 1579 $ log 'branch(unknown)'
1519 1580 abort: unknown revision 'unknown'!
1520 1581 [255]
1521 1582 $ log 'branch("literal:unknown")'
1522 1583 abort: branch 'unknown' does not exist!
1523 1584 [255]
1524 1585 $ log 'branch("re:unknown")'
1525 1586 $ log 'present(branch("unknown"))'
1526 1587 $ log 'present(branch("re:unknown"))'
1527 1588 $ log 'user(bob)'
1528 1589 2
1529 1590
1530 1591 $ log '4::8'
1531 1592 4
1532 1593 8
1533 1594 $ log '4:8'
1534 1595 4
1535 1596 5
1536 1597 6
1537 1598 7
1538 1599 8
1539 1600
1540 1601 $ log 'sort(!merge() & (modifies(b) | user(bob) | keyword(bug) | keyword(issue) & 1::9), "-date")'
1541 1602 4
1542 1603 2
1543 1604 5
1544 1605
1545 1606 $ log 'not 0 and 0:2'
1546 1607 1
1547 1608 2
1548 1609 $ log 'not 1 and 0:2'
1549 1610 0
1550 1611 2
1551 1612 $ log 'not 2 and 0:2'
1552 1613 0
1553 1614 1
1554 1615 $ log '(1 and 2)::'
1555 1616 $ log '(1 and 2):'
1556 1617 $ log '(1 and 2):3'
1557 1618 $ log 'sort(head(), -rev)'
1558 1619 9
1559 1620 7
1560 1621 6
1561 1622 5
1562 1623 4
1563 1624 3
1564 1625 2
1565 1626 1
1566 1627 0
1567 1628 $ log '4::8 - 8'
1568 1629 4
1569 1630
1570 1631 matching() should preserve the order of the input set:
1571 1632
1572 1633 $ log '(2 or 3 or 1) and matching(1 or 2 or 3)'
1573 1634 2
1574 1635 3
1575 1636 1
1576 1637
1577 1638 $ log 'named("unknown")'
1578 1639 abort: namespace 'unknown' does not exist!
1579 1640 [255]
1580 1641 $ log 'named("re:unknown")'
1581 1642 abort: no namespace exists that match 'unknown'!
1582 1643 [255]
1583 1644 $ log 'present(named("unknown"))'
1584 1645 $ log 'present(named("re:unknown"))'
1585 1646
1586 1647 $ log 'tag()'
1587 1648 6
1588 1649 $ log 'named("tags")'
1589 1650 6
1590 1651
1591 1652 issue2437
1592 1653
1593 1654 $ log '3 and p1(5)'
1594 1655 3
1595 1656 $ log '4 and p2(6)'
1596 1657 4
1597 1658 $ log '1 and parents(:2)'
1598 1659 1
1599 1660 $ log '2 and children(1:)'
1600 1661 2
1601 1662 $ log 'roots(all()) or roots(all())'
1602 1663 0
1603 1664 $ hg debugrevspec 'roots(all()) or roots(all())'
1604 1665 0
1605 1666 $ log 'heads(branch(Γ©)) or heads(branch(Γ©))'
1606 1667 9
1607 1668 $ log 'ancestors(8) and (heads(branch("-a-b-c-")) or heads(branch(Γ©)))'
1608 1669 4
1609 1670
1610 1671 issue2654: report a parse error if the revset was not completely parsed
1611 1672
1612 1673 $ log '1 OR 2'
1613 1674 hg: parse error at 2: invalid token
1614 1675 [255]
1615 1676
1616 1677 or operator should preserve ordering:
1617 1678 $ log 'reverse(2::4) or tip'
1618 1679 4
1619 1680 2
1620 1681 9
1621 1682
1622 1683 parentrevspec
1623 1684
1624 1685 $ log 'merge()^0'
1625 1686 6
1626 1687 $ log 'merge()^'
1627 1688 5
1628 1689 $ log 'merge()^1'
1629 1690 5
1630 1691 $ log 'merge()^2'
1631 1692 4
1632 1693 $ log 'merge()^^'
1633 1694 3
1634 1695 $ log 'merge()^1^'
1635 1696 3
1636 1697 $ log 'merge()^^^'
1637 1698 1
1638 1699
1639 1700 $ log 'merge()~0'
1640 1701 6
1641 1702 $ log 'merge()~1'
1642 1703 5
1643 1704 $ log 'merge()~2'
1644 1705 3
1645 1706 $ log 'merge()~2^1'
1646 1707 1
1647 1708 $ log 'merge()~3'
1648 1709 1
1649 1710
1650 1711 $ log '(-3:tip)^'
1651 1712 4
1652 1713 6
1653 1714 8
1654 1715
1655 1716 $ log 'tip^foo'
1656 1717 hg: parse error: ^ expects a number 0, 1, or 2
1657 1718 [255]
1658 1719
1659 1720 Bogus function gets suggestions
1660 1721 $ log 'add()'
1661 1722 hg: parse error: unknown identifier: add
1662 1723 (did you mean adds?)
1663 1724 [255]
1664 1725 $ log 'added()'
1665 1726 hg: parse error: unknown identifier: added
1666 1727 (did you mean adds?)
1667 1728 [255]
1668 1729 $ log 'remo()'
1669 1730 hg: parse error: unknown identifier: remo
1670 1731 (did you mean one of remote, removes?)
1671 1732 [255]
1672 1733 $ log 'babar()'
1673 1734 hg: parse error: unknown identifier: babar
1674 1735 [255]
1675 1736
1676 1737 Bogus function with a similar internal name doesn't suggest the internal name
1677 1738 $ log 'matches()'
1678 1739 hg: parse error: unknown identifier: matches
1679 1740 (did you mean matching?)
1680 1741 [255]
1681 1742
1682 1743 Undocumented functions aren't suggested as similar either
1683 1744 $ log 'wdir2()'
1684 1745 hg: parse error: unknown identifier: wdir2
1685 1746 [255]
1686 1747
1687 1748 multiple revspecs
1688 1749
1689 1750 $ hg log -r 'tip~1:tip' -r 'tip~2:tip~1' --template '{rev}\n'
1690 1751 8
1691 1752 9
1692 1753 4
1693 1754 5
1694 1755 6
1695 1756 7
1696 1757
1697 1758 test usage in revpair (with "+")
1698 1759
1699 1760 (real pair)
1700 1761
1701 1762 $ hg diff -r 'tip^^' -r 'tip'
1702 1763 diff -r 2326846efdab -r 24286f4ae135 .hgtags
1703 1764 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1704 1765 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
1705 1766 @@ -0,0 +1,1 @@
1706 1767 +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
1707 1768 $ hg diff -r 'tip^^::tip'
1708 1769 diff -r 2326846efdab -r 24286f4ae135 .hgtags
1709 1770 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1710 1771 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
1711 1772 @@ -0,0 +1,1 @@
1712 1773 +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
1713 1774
1714 1775 (single rev)
1715 1776
1716 1777 $ hg diff -r 'tip^' -r 'tip^'
1717 1778 $ hg diff -r 'tip^:tip^'
1718 1779
1719 1780 (single rev that does not looks like a range)
1720 1781
1721 1782 $ hg diff -r 'tip^::tip^ or tip^'
1722 1783 diff -r d5d0dcbdc4d9 .hgtags
1723 1784 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1724 1785 +++ b/.hgtags * (glob)
1725 1786 @@ -0,0 +1,1 @@
1726 1787 +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
1727 1788 $ hg diff -r 'tip^ or tip^'
1728 1789 diff -r d5d0dcbdc4d9 .hgtags
1729 1790 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1730 1791 +++ b/.hgtags * (glob)
1731 1792 @@ -0,0 +1,1 @@
1732 1793 +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
1733 1794
1734 1795 (no rev)
1735 1796
1736 1797 $ hg diff -r 'author("babar") or author("celeste")'
1737 1798 abort: empty revision range
1738 1799 [255]
1739 1800
1740 1801 aliases:
1741 1802
1742 1803 $ echo '[revsetalias]' >> .hg/hgrc
1743 1804 $ echo 'm = merge()' >> .hg/hgrc
1744 1805 (revset aliases can override builtin revsets)
1745 1806 $ echo 'p2($1) = p1($1)' >> .hg/hgrc
1746 1807 $ echo 'sincem = descendants(m)' >> .hg/hgrc
1747 1808 $ echo 'd($1) = reverse(sort($1, date))' >> .hg/hgrc
1748 1809 $ echo 'rs(ARG1, ARG2) = reverse(sort(ARG1, ARG2))' >> .hg/hgrc
1749 1810 $ echo 'rs4(ARG1, ARGA, ARGB, ARG2) = reverse(sort(ARG1, ARG2))' >> .hg/hgrc
1750 1811
1751 1812 $ try m
1752 1813 ('symbol', 'm')
1753 1814 * expanded:
1754 1815 (func
1755 1816 ('symbol', 'merge')
1756 1817 None)
1757 1818 * set:
1758 1819 <filteredset
1759 1820 <fullreposet+ 0:9>,
1760 1821 <merge>>
1761 1822 6
1762 1823
1763 1824 $ HGPLAIN=1
1764 1825 $ export HGPLAIN
1765 1826 $ try m
1766 1827 ('symbol', 'm')
1767 1828 abort: unknown revision 'm'!
1768 1829 [255]
1769 1830
1770 1831 $ HGPLAINEXCEPT=revsetalias
1771 1832 $ export HGPLAINEXCEPT
1772 1833 $ try m
1773 1834 ('symbol', 'm')
1774 1835 * expanded:
1775 1836 (func
1776 1837 ('symbol', 'merge')
1777 1838 None)
1778 1839 * set:
1779 1840 <filteredset
1780 1841 <fullreposet+ 0:9>,
1781 1842 <merge>>
1782 1843 6
1783 1844
1784 1845 $ unset HGPLAIN
1785 1846 $ unset HGPLAINEXCEPT
1786 1847
1787 1848 $ try 'p2(.)'
1788 1849 (func
1789 1850 ('symbol', 'p2')
1790 1851 ('symbol', '.'))
1791 1852 * expanded:
1792 1853 (func
1793 1854 ('symbol', 'p1')
1794 1855 ('symbol', '.'))
1795 1856 * set:
1796 1857 <baseset+ [8]>
1797 1858 8
1798 1859
1799 1860 $ HGPLAIN=1
1800 1861 $ export HGPLAIN
1801 1862 $ try 'p2(.)'
1802 1863 (func
1803 1864 ('symbol', 'p2')
1804 1865 ('symbol', '.'))
1805 1866 * set:
1806 1867 <baseset+ []>
1807 1868
1808 1869 $ HGPLAINEXCEPT=revsetalias
1809 1870 $ export HGPLAINEXCEPT
1810 1871 $ try 'p2(.)'
1811 1872 (func
1812 1873 ('symbol', 'p2')
1813 1874 ('symbol', '.'))
1814 1875 * expanded:
1815 1876 (func
1816 1877 ('symbol', 'p1')
1817 1878 ('symbol', '.'))
1818 1879 * set:
1819 1880 <baseset+ [8]>
1820 1881 8
1821 1882
1822 1883 $ unset HGPLAIN
1823 1884 $ unset HGPLAINEXCEPT
1824 1885
1825 1886 test alias recursion
1826 1887
1827 1888 $ try sincem
1828 1889 ('symbol', 'sincem')
1829 1890 * expanded:
1830 1891 (func
1831 1892 ('symbol', 'descendants')
1832 1893 (func
1833 1894 ('symbol', 'merge')
1834 1895 None))
1835 1896 * set:
1836 1897 <addset+
1837 1898 <filteredset
1838 1899 <fullreposet+ 0:9>,
1839 1900 <merge>>,
1840 1901 <generatorset+>>
1841 1902 6
1842 1903 7
1843 1904
1844 1905 test infinite recursion
1845 1906
1846 1907 $ echo 'recurse1 = recurse2' >> .hg/hgrc
1847 1908 $ echo 'recurse2 = recurse1' >> .hg/hgrc
1848 1909 $ try recurse1
1849 1910 ('symbol', 'recurse1')
1850 1911 hg: parse error: infinite expansion of revset alias "recurse1" detected
1851 1912 [255]
1852 1913
1853 1914 $ echo 'level1($1, $2) = $1 or $2' >> .hg/hgrc
1854 1915 $ echo 'level2($1, $2) = level1($2, $1)' >> .hg/hgrc
1855 1916 $ try "level2(level1(1, 2), 3)"
1856 1917 (func
1857 1918 ('symbol', 'level2')
1858 1919 (list
1859 1920 (func
1860 1921 ('symbol', 'level1')
1861 1922 (list
1862 1923 ('symbol', '1')
1863 1924 ('symbol', '2')))
1864 1925 ('symbol', '3')))
1865 1926 * expanded:
1866 1927 (or
1867 1928 ('symbol', '3')
1868 1929 (or
1869 1930 ('symbol', '1')
1870 1931 ('symbol', '2')))
1871 1932 * set:
1872 1933 <addset
1873 1934 <baseset [3]>,
1874 1935 <baseset [1, 2]>>
1875 1936 3
1876 1937 1
1877 1938 2
1878 1939
1879 1940 test nesting and variable passing
1880 1941
1881 1942 $ echo 'nested($1) = nested2($1)' >> .hg/hgrc
1882 1943 $ echo 'nested2($1) = nested3($1)' >> .hg/hgrc
1883 1944 $ echo 'nested3($1) = max($1)' >> .hg/hgrc
1884 1945 $ try 'nested(2:5)'
1885 1946 (func
1886 1947 ('symbol', 'nested')
1887 1948 (range
1888 1949 ('symbol', '2')
1889 1950 ('symbol', '5')))
1890 1951 * expanded:
1891 1952 (func
1892 1953 ('symbol', 'max')
1893 1954 (range
1894 1955 ('symbol', '2')
1895 1956 ('symbol', '5')))
1896 1957 * set:
1897 1958 <baseset
1898 1959 <max
1899 1960 <fullreposet+ 0:9>,
1900 1961 <spanset+ 2:5>>>
1901 1962 5
1902 1963
1903 1964 test chained `or` operations are flattened at parsing phase
1904 1965
1905 1966 $ echo 'chainedorops($1, $2, $3) = $1|$2|$3' >> .hg/hgrc
1906 1967 $ try 'chainedorops(0:1, 1:2, 2:3)'
1907 1968 (func
1908 1969 ('symbol', 'chainedorops')
1909 1970 (list
1910 1971 (range
1911 1972 ('symbol', '0')
1912 1973 ('symbol', '1'))
1913 1974 (range
1914 1975 ('symbol', '1')
1915 1976 ('symbol', '2'))
1916 1977 (range
1917 1978 ('symbol', '2')
1918 1979 ('symbol', '3'))))
1919 1980 * expanded:
1920 1981 (or
1921 1982 (range
1922 1983 ('symbol', '0')
1923 1984 ('symbol', '1'))
1924 1985 (range
1925 1986 ('symbol', '1')
1926 1987 ('symbol', '2'))
1927 1988 (range
1928 1989 ('symbol', '2')
1929 1990 ('symbol', '3')))
1930 1991 * set:
1931 1992 <addset
1932 1993 <spanset+ 0:1>,
1933 1994 <addset
1934 1995 <spanset+ 1:2>,
1935 1996 <spanset+ 2:3>>>
1936 1997 0
1937 1998 1
1938 1999 2
1939 2000 3
1940 2001
1941 2002 test variable isolation, variable placeholders are rewritten as string
1942 2003 then parsed and matched again as string. Check they do not leak too
1943 2004 far away.
1944 2005
1945 2006 $ echo 'injectparamasstring = max("$1")' >> .hg/hgrc
1946 2007 $ echo 'callinjection($1) = descendants(injectparamasstring)' >> .hg/hgrc
1947 2008 $ try 'callinjection(2:5)'
1948 2009 (func
1949 2010 ('symbol', 'callinjection')
1950 2011 (range
1951 2012 ('symbol', '2')
1952 2013 ('symbol', '5')))
1953 2014 * expanded:
1954 2015 (func
1955 2016 ('symbol', 'descendants')
1956 2017 (func
1957 2018 ('symbol', 'max')
1958 2019 ('string', '$1')))
1959 2020 abort: unknown revision '$1'!
1960 2021 [255]
1961 2022
1962 2023 test scope of alias expansion: 'universe' is expanded prior to 'shadowall(0)',
1963 2024 but 'all()' should never be substituded to '0()'.
1964 2025
1965 2026 $ echo 'universe = all()' >> .hg/hgrc
1966 2027 $ echo 'shadowall(all) = all and universe' >> .hg/hgrc
1967 2028 $ try 'shadowall(0)'
1968 2029 (func
1969 2030 ('symbol', 'shadowall')
1970 2031 ('symbol', '0'))
1971 2032 * expanded:
1972 2033 (and
1973 2034 ('symbol', '0')
1974 2035 (func
1975 2036 ('symbol', 'all')
1976 2037 None))
1977 2038 * set:
1978 2039 <filteredset
1979 2040 <baseset [0]>,
1980 2041 <spanset+ 0:9>>
1981 2042 0
1982 2043
1983 2044 test unknown reference:
1984 2045
1985 2046 $ try "unknownref(0)" --config 'revsetalias.unknownref($1)=$1:$2'
1986 2047 (func
1987 2048 ('symbol', 'unknownref')
1988 2049 ('symbol', '0'))
1989 2050 abort: bad definition of revset alias "unknownref": invalid symbol '$2'
1990 2051 [255]
1991 2052
1992 2053 $ hg debugrevspec --debug --config revsetalias.anotherbadone='branch(' "tip"
1993 2054 ('symbol', 'tip')
1994 2055 warning: bad definition of revset alias "anotherbadone": at 7: not a prefix: end
1995 2056 * set:
1996 2057 <baseset [9]>
1997 2058 9
1998 2059
1999 2060 $ try 'tip'
2000 2061 ('symbol', 'tip')
2001 2062 * set:
2002 2063 <baseset [9]>
2003 2064 9
2004 2065
2005 2066 $ hg debugrevspec --debug --config revsetalias.'bad name'='tip' "tip"
2006 2067 ('symbol', 'tip')
2007 2068 warning: bad declaration of revset alias "bad name": at 4: invalid token
2008 2069 * set:
2009 2070 <baseset [9]>
2010 2071 9
2011 2072 $ echo 'strictreplacing($1, $10) = $10 or desc("$1")' >> .hg/hgrc
2012 2073 $ try 'strictreplacing("foo", tip)'
2013 2074 (func
2014 2075 ('symbol', 'strictreplacing')
2015 2076 (list
2016 2077 ('string', 'foo')
2017 2078 ('symbol', 'tip')))
2018 2079 * expanded:
2019 2080 (or
2020 2081 ('symbol', 'tip')
2021 2082 (func
2022 2083 ('symbol', 'desc')
2023 2084 ('string', '$1')))
2024 2085 * set:
2025 2086 <addset
2026 2087 <baseset [9]>,
2027 2088 <filteredset
2028 2089 <fullreposet+ 0:9>,
2029 2090 <desc '$1'>>>
2030 2091 9
2031 2092
2032 2093 $ try 'd(2:5)'
2033 2094 (func
2034 2095 ('symbol', 'd')
2035 2096 (range
2036 2097 ('symbol', '2')
2037 2098 ('symbol', '5')))
2038 2099 * expanded:
2039 2100 (func
2040 2101 ('symbol', 'reverse')
2041 2102 (func
2042 2103 ('symbol', 'sort')
2043 2104 (list
2044 2105 (range
2045 2106 ('symbol', '2')
2046 2107 ('symbol', '5'))
2047 2108 ('symbol', 'date'))))
2048 2109 * set:
2049 2110 <baseset [4, 5, 3, 2]>
2050 2111 4
2051 2112 5
2052 2113 3
2053 2114 2
2054 2115 $ try 'rs(2 or 3, date)'
2055 2116 (func
2056 2117 ('symbol', 'rs')
2057 2118 (list
2058 2119 (or
2059 2120 ('symbol', '2')
2060 2121 ('symbol', '3'))
2061 2122 ('symbol', 'date')))
2062 2123 * expanded:
2063 2124 (func
2064 2125 ('symbol', 'reverse')
2065 2126 (func
2066 2127 ('symbol', 'sort')
2067 2128 (list
2068 2129 (or
2069 2130 ('symbol', '2')
2070 2131 ('symbol', '3'))
2071 2132 ('symbol', 'date'))))
2072 2133 * set:
2073 2134 <baseset [3, 2]>
2074 2135 3
2075 2136 2
2076 2137 $ try 'rs()'
2077 2138 (func
2078 2139 ('symbol', 'rs')
2079 2140 None)
2080 2141 hg: parse error: invalid number of arguments: 0
2081 2142 [255]
2082 2143 $ try 'rs(2)'
2083 2144 (func
2084 2145 ('symbol', 'rs')
2085 2146 ('symbol', '2'))
2086 2147 hg: parse error: invalid number of arguments: 1
2087 2148 [255]
2088 2149 $ try 'rs(2, data, 7)'
2089 2150 (func
2090 2151 ('symbol', 'rs')
2091 2152 (list
2092 2153 ('symbol', '2')
2093 2154 ('symbol', 'data')
2094 2155 ('symbol', '7')))
2095 2156 hg: parse error: invalid number of arguments: 3
2096 2157 [255]
2097 2158 $ try 'rs4(2 or 3, x, x, date)'
2098 2159 (func
2099 2160 ('symbol', 'rs4')
2100 2161 (list
2101 2162 (or
2102 2163 ('symbol', '2')
2103 2164 ('symbol', '3'))
2104 2165 ('symbol', 'x')
2105 2166 ('symbol', 'x')
2106 2167 ('symbol', 'date')))
2107 2168 * expanded:
2108 2169 (func
2109 2170 ('symbol', 'reverse')
2110 2171 (func
2111 2172 ('symbol', 'sort')
2112 2173 (list
2113 2174 (or
2114 2175 ('symbol', '2')
2115 2176 ('symbol', '3'))
2116 2177 ('symbol', 'date'))))
2117 2178 * set:
2118 2179 <baseset [3, 2]>
2119 2180 3
2120 2181 2
2121 2182
2122 2183 issue4553: check that revset aliases override existing hash prefix
2123 2184
2124 2185 $ hg log -qr e
2125 2186 6:e0cc66ef77e8
2126 2187
2127 2188 $ hg log -qr e --config revsetalias.e="all()"
2128 2189 0:2785f51eece5
2129 2190 1:d75937da8da0
2130 2191 2:5ed5505e9f1c
2131 2192 3:8528aa5637f2
2132 2193 4:2326846efdab
2133 2194 5:904fa392b941
2134 2195 6:e0cc66ef77e8
2135 2196 7:013af1973af4
2136 2197 8:d5d0dcbdc4d9
2137 2198 9:24286f4ae135
2138 2199
2139 2200 $ hg log -qr e: --config revsetalias.e="0"
2140 2201 0:2785f51eece5
2141 2202 1:d75937da8da0
2142 2203 2:5ed5505e9f1c
2143 2204 3:8528aa5637f2
2144 2205 4:2326846efdab
2145 2206 5:904fa392b941
2146 2207 6:e0cc66ef77e8
2147 2208 7:013af1973af4
2148 2209 8:d5d0dcbdc4d9
2149 2210 9:24286f4ae135
2150 2211
2151 2212 $ hg log -qr :e --config revsetalias.e="9"
2152 2213 0:2785f51eece5
2153 2214 1:d75937da8da0
2154 2215 2:5ed5505e9f1c
2155 2216 3:8528aa5637f2
2156 2217 4:2326846efdab
2157 2218 5:904fa392b941
2158 2219 6:e0cc66ef77e8
2159 2220 7:013af1973af4
2160 2221 8:d5d0dcbdc4d9
2161 2222 9:24286f4ae135
2162 2223
2163 2224 $ hg log -qr e:
2164 2225 6:e0cc66ef77e8
2165 2226 7:013af1973af4
2166 2227 8:d5d0dcbdc4d9
2167 2228 9:24286f4ae135
2168 2229
2169 2230 $ hg log -qr :e
2170 2231 0:2785f51eece5
2171 2232 1:d75937da8da0
2172 2233 2:5ed5505e9f1c
2173 2234 3:8528aa5637f2
2174 2235 4:2326846efdab
2175 2236 5:904fa392b941
2176 2237 6:e0cc66ef77e8
2177 2238
2178 2239 issue2549 - correct optimizations
2179 2240
2180 2241 $ try 'limit(1 or 2 or 3, 2) and not 2'
2181 2242 (and
2182 2243 (func
2183 2244 ('symbol', 'limit')
2184 2245 (list
2185 2246 (or
2186 2247 ('symbol', '1')
2187 2248 ('symbol', '2')
2188 2249 ('symbol', '3'))
2189 2250 ('symbol', '2')))
2190 2251 (not
2191 2252 ('symbol', '2')))
2192 2253 * set:
2193 2254 <filteredset
2194 2255 <baseset
2195 2256 <limit n=2, offset=0,
2196 2257 <fullreposet+ 0:9>,
2197 2258 <baseset [1, 2, 3]>>>,
2198 2259 <not
2199 2260 <baseset [2]>>>
2200 2261 1
2201 2262 $ try 'max(1 or 2) and not 2'
2202 2263 (and
2203 2264 (func
2204 2265 ('symbol', 'max')
2205 2266 (or
2206 2267 ('symbol', '1')
2207 2268 ('symbol', '2')))
2208 2269 (not
2209 2270 ('symbol', '2')))
2210 2271 * set:
2211 2272 <filteredset
2212 2273 <baseset
2213 2274 <max
2214 2275 <fullreposet+ 0:9>,
2215 2276 <baseset [1, 2]>>>,
2216 2277 <not
2217 2278 <baseset [2]>>>
2218 2279 $ try 'min(1 or 2) and not 1'
2219 2280 (and
2220 2281 (func
2221 2282 ('symbol', 'min')
2222 2283 (or
2223 2284 ('symbol', '1')
2224 2285 ('symbol', '2')))
2225 2286 (not
2226 2287 ('symbol', '1')))
2227 2288 * set:
2228 2289 <filteredset
2229 2290 <baseset
2230 2291 <min
2231 2292 <fullreposet+ 0:9>,
2232 2293 <baseset [1, 2]>>>,
2233 2294 <not
2234 2295 <baseset [1]>>>
2235 2296 $ try 'last(1 or 2, 1) and not 2'
2236 2297 (and
2237 2298 (func
2238 2299 ('symbol', 'last')
2239 2300 (list
2240 2301 (or
2241 2302 ('symbol', '1')
2242 2303 ('symbol', '2'))
2243 2304 ('symbol', '1')))
2244 2305 (not
2245 2306 ('symbol', '2')))
2246 2307 * set:
2247 2308 <filteredset
2248 2309 <baseset
2249 2310 <last n=1,
2250 2311 <fullreposet+ 0:9>,
2251 2312 <baseset [2, 1]>>>,
2252 2313 <not
2253 2314 <baseset [2]>>>
2254 2315
2255 2316 issue4289 - ordering of built-ins
2256 2317 $ hg log -M -q -r 3:2
2257 2318 3:8528aa5637f2
2258 2319 2:5ed5505e9f1c
2259 2320
2260 2321 test revsets started with 40-chars hash (issue3669)
2261 2322
2262 2323 $ ISSUE3669_TIP=`hg tip --template '{node}'`
2263 2324 $ hg log -r "${ISSUE3669_TIP}" --template '{rev}\n'
2264 2325 9
2265 2326 $ hg log -r "${ISSUE3669_TIP}^" --template '{rev}\n'
2266 2327 8
2267 2328
2268 2329 test or-ed indirect predicates (issue3775)
2269 2330
2270 2331 $ log '6 or 6^1' | sort
2271 2332 5
2272 2333 6
2273 2334 $ log '6^1 or 6' | sort
2274 2335 5
2275 2336 6
2276 2337 $ log '4 or 4~1' | sort
2277 2338 2
2278 2339 4
2279 2340 $ log '4~1 or 4' | sort
2280 2341 2
2281 2342 4
2282 2343 $ log '(0 or 2):(4 or 6) or 0 or 6' | sort
2283 2344 0
2284 2345 1
2285 2346 2
2286 2347 3
2287 2348 4
2288 2349 5
2289 2350 6
2290 2351 $ log '0 or 6 or (0 or 2):(4 or 6)' | sort
2291 2352 0
2292 2353 1
2293 2354 2
2294 2355 3
2295 2356 4
2296 2357 5
2297 2358 6
2298 2359
2299 2360 tests for 'remote()' predicate:
2300 2361 #. (csets in remote) (id) (remote)
2301 2362 1. less than local current branch "default"
2302 2363 2. same with local specified "default"
2303 2364 3. more than local specified specified
2304 2365
2305 2366 $ hg clone --quiet -U . ../remote3
2306 2367 $ cd ../remote3
2307 2368 $ hg update -q 7
2308 2369 $ echo r > r
2309 2370 $ hg ci -Aqm 10
2310 2371 $ log 'remote()'
2311 2372 7
2312 2373 $ log 'remote("a-b-c-")'
2313 2374 2
2314 2375 $ cd ../repo
2315 2376 $ log 'remote(".a.b.c.", "../remote3")'
2316 2377
2317 2378 tests for concatenation of strings/symbols by "##"
2318 2379
2319 2380 $ try "278 ## '5f5' ## 1ee ## 'ce5'"
2320 2381 (_concat
2321 2382 (_concat
2322 2383 (_concat
2323 2384 ('symbol', '278')
2324 2385 ('string', '5f5'))
2325 2386 ('symbol', '1ee'))
2326 2387 ('string', 'ce5'))
2327 2388 * concatenated:
2328 2389 ('string', '2785f51eece5')
2329 2390 * set:
2330 2391 <baseset [0]>
2331 2392 0
2332 2393
2333 2394 $ echo 'cat4($1, $2, $3, $4) = $1 ## $2 ## $3 ## $4' >> .hg/hgrc
2334 2395 $ try "cat4(278, '5f5', 1ee, 'ce5')"
2335 2396 (func
2336 2397 ('symbol', 'cat4')
2337 2398 (list
2338 2399 ('symbol', '278')
2339 2400 ('string', '5f5')
2340 2401 ('symbol', '1ee')
2341 2402 ('string', 'ce5')))
2342 2403 * expanded:
2343 2404 (_concat
2344 2405 (_concat
2345 2406 (_concat
2346 2407 ('symbol', '278')
2347 2408 ('string', '5f5'))
2348 2409 ('symbol', '1ee'))
2349 2410 ('string', 'ce5'))
2350 2411 * concatenated:
2351 2412 ('string', '2785f51eece5')
2352 2413 * set:
2353 2414 <baseset [0]>
2354 2415 0
2355 2416
2356 2417 (check concatenation in alias nesting)
2357 2418
2358 2419 $ echo 'cat2($1, $2) = $1 ## $2' >> .hg/hgrc
2359 2420 $ echo 'cat2x2($1, $2, $3, $4) = cat2($1 ## $2, $3 ## $4)' >> .hg/hgrc
2360 2421 $ log "cat2x2(278, '5f5', 1ee, 'ce5')"
2361 2422 0
2362 2423
2363 2424 (check operator priority)
2364 2425
2365 2426 $ echo 'cat2n2($1, $2, $3, $4) = $1 ## $2 or $3 ## $4~2' >> .hg/hgrc
2366 2427 $ log "cat2n2(2785f5, 1eece5, 24286f, 4ae135)"
2367 2428 0
2368 2429 4
2369 2430
2370 2431 $ cd ..
2371 2432
2372 2433 prepare repository that has "default" branches of multiple roots
2373 2434
2374 2435 $ hg init namedbranch
2375 2436 $ cd namedbranch
2376 2437
2377 2438 $ echo default0 >> a
2378 2439 $ hg ci -Aqm0
2379 2440 $ echo default1 >> a
2380 2441 $ hg ci -m1
2381 2442
2382 2443 $ hg branch -q stable
2383 2444 $ echo stable2 >> a
2384 2445 $ hg ci -m2
2385 2446 $ echo stable3 >> a
2386 2447 $ hg ci -m3
2387 2448
2388 2449 $ hg update -q null
2389 2450 $ echo default4 >> a
2390 2451 $ hg ci -Aqm4
2391 2452 $ echo default5 >> a
2392 2453 $ hg ci -m5
2393 2454
2394 2455 "null" revision belongs to "default" branch (issue4683)
2395 2456
2396 2457 $ log 'branch(null)'
2397 2458 0
2398 2459 1
2399 2460 4
2400 2461 5
2401 2462
2402 2463 "null" revision belongs to "default" branch, but it shouldn't appear in set
2403 2464 unless explicitly specified (issue4682)
2404 2465
2405 2466 $ log 'children(branch(default))'
2406 2467 1
2407 2468 2
2408 2469 5
2409 2470
2410 2471 $ cd ..
2411 2472
2412 2473 test author/desc/keyword in problematic encoding
2413 2474 # unicode: cp932:
2414 2475 # u30A2 0x83 0x41(= 'A')
2415 2476 # u30C2 0x83 0x61(= 'a')
2416 2477
2417 2478 $ hg init problematicencoding
2418 2479 $ cd problematicencoding
2419 2480
2420 2481 $ python > setup.sh <<EOF
2421 2482 > print u'''
2422 2483 > echo a > text
2423 2484 > hg add text
2424 2485 > hg --encoding utf-8 commit -u '\u30A2' -m none
2425 2486 > echo b > text
2426 2487 > hg --encoding utf-8 commit -u '\u30C2' -m none
2427 2488 > echo c > text
2428 2489 > hg --encoding utf-8 commit -u none -m '\u30A2'
2429 2490 > echo d > text
2430 2491 > hg --encoding utf-8 commit -u none -m '\u30C2'
2431 2492 > '''.encode('utf-8')
2432 2493 > EOF
2433 2494 $ sh < setup.sh
2434 2495
2435 2496 test in problematic encoding
2436 2497 $ python > test.sh <<EOF
2437 2498 > print u'''
2438 2499 > hg --encoding cp932 log --template '{rev}\\n' -r 'author(\u30A2)'
2439 2500 > echo ====
2440 2501 > hg --encoding cp932 log --template '{rev}\\n' -r 'author(\u30C2)'
2441 2502 > echo ====
2442 2503 > hg --encoding cp932 log --template '{rev}\\n' -r 'desc(\u30A2)'
2443 2504 > echo ====
2444 2505 > hg --encoding cp932 log --template '{rev}\\n' -r 'desc(\u30C2)'
2445 2506 > echo ====
2446 2507 > hg --encoding cp932 log --template '{rev}\\n' -r 'keyword(\u30A2)'
2447 2508 > echo ====
2448 2509 > hg --encoding cp932 log --template '{rev}\\n' -r 'keyword(\u30C2)'
2449 2510 > '''.encode('cp932')
2450 2511 > EOF
2451 2512 $ sh < test.sh
2452 2513 0
2453 2514 ====
2454 2515 1
2455 2516 ====
2456 2517 2
2457 2518 ====
2458 2519 3
2459 2520 ====
2460 2521 0
2461 2522 2
2462 2523 ====
2463 2524 1
2464 2525 3
2465 2526
2466 2527 test error message of bad revset
2467 2528 $ hg log -r 'foo\\'
2468 2529 hg: parse error at 3: syntax error in revset 'foo\\'
2469 2530 [255]
2470 2531
2471 2532 $ cd ..
2472 2533
2473 2534 Test that revset predicate of extension isn't loaded at failure of
2474 2535 loading it
2475 2536
2476 2537 $ cd repo
2477 2538
2478 2539 $ cat <<EOF > $TESTTMP/custompredicate.py
2479 2540 > from mercurial import error, registrar, revset
2480 2541 >
2481 2542 > revsetpredicate = registrar.revsetpredicate()
2482 2543 >
2483 2544 > @revsetpredicate('custom1()')
2484 2545 > def custom1(repo, subset, x):
2485 2546 > return revset.baseset([1])
2486 2547 >
2487 2548 > raise error.Abort('intentional failure of loading extension')
2488 2549 > EOF
2489 2550 $ cat <<EOF > .hg/hgrc
2490 2551 > [extensions]
2491 2552 > custompredicate = $TESTTMP/custompredicate.py
2492 2553 > EOF
2493 2554
2494 2555 $ hg debugrevspec "custom1()"
2495 2556 *** failed to import extension custompredicate from $TESTTMP/custompredicate.py: intentional failure of loading extension
2496 2557 hg: parse error: unknown identifier: custom1
2497 2558 [255]
2498 2559
2499 2560 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now